repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
fergaljd/pep_ga
|
chromosomes/base_chromosome.py
|
1
|
2239
|
import random
def basic_mutation(sequence, mutation_rate, building_blocks):
"""
An amino acid is randomly mutated into any other amino acid.
"""
mutated_sequence = []
for element in sequence:
if random.random() < mutation_rate:
new_element = random.choice(building_blocks)
while new_element == element:
new_element = random.choice(building_blocks)
mutated_sequence.append(new_element)
else:
mutated_sequence.append(element)
return mutated_sequence
class GAChromosome(object):
"""
Base chromosome class for use in the genetic algorithm.
"""
def __init__(self,
sequence,
mutation_rate,
mutation_function=basic_mutation,
elements=[]):
self._sequence = tuple(sequence)
self.fitness = None
self.mutation_rate = mutation_rate
self._mutation_function = mutation_function
self.elements = elements
@property
def sequence(self):
return self._sequence
@sequence.setter
def sequence(self, new_sequence):
self._sequence = tuple(new_sequence)
@classmethod
def random_chromosome(cls,
length,
mutation_rate,
mutation_function=basic_mutation,
elements=[]):
return cls([random.choice(elements) for _ in range(length)],
mutation_rate,
mutation_function,
elements)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.sequence == other.sequence
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def mutate(self):
self.sequence = self._mutation_function(self.sequence,
self.mutation_rate,
self.elements)
@property
def seq_as_string(self):
return "-".join(self._sequence)
@property
def idx_name(self):
return '-'.join([str(self.elements.index(s)) for s in self.sequence])
|
gpl-2.0
|
kevinastone/sentry
|
src/sentry/utils/email.py
|
14
|
6441
|
"""
sentry.utils.email
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import toronado
from django.conf import settings
from django.core.mail import get_connection, EmailMultiAlternatives
from django.core.signing import Signer
from django.utils.encoding import force_bytes
from django.utils.functional import cached_property
from email.utils import parseaddr
from sentry.web.helpers import render_to_string
from sentry.utils import metrics
from sentry.utils.safe import safe_execute
signer = Signer()
SMTP_HOSTNAME = getattr(settings, 'SENTRY_SMTP_HOSTNAME', 'localhost')
ENABLE_EMAIL_REPLIES = getattr(settings, 'SENTRY_ENABLE_EMAIL_REPLIES', False)
def email_to_group_id(address):
"""
Email address should be in the form of:
{group_id}+{signature}@example.com
"""
address = address.split('@', 1)[0]
signed_data = address.replace('+', ':')
return int(force_bytes(signer.unsign(signed_data)))
def group_id_to_email(group_id):
signed_data = signer.sign(str(group_id))
return '@'.join((signed_data.replace(':', '+'), SMTP_HOSTNAME))
def email_id_for_model(model):
return '<%s/%s@%s>' % (type(model).__name__.lower(), model.pk, FROM_EMAIL_DOMAIN)
def domain_from_email(email):
email = parseaddr(email)[1]
try:
return email.split('@', 1)[1]
except IndexError:
# The email address is likely malformed or something
return email
FROM_EMAIL_DOMAIN = domain_from_email(settings.DEFAULT_FROM_EMAIL)
class MessageBuilder(object):
def __init__(self, subject, context=None, template=None, html_template=None,
body=None, html_body=None, headers=None, reference=None,
reply_reference=None):
assert not (body and template)
assert not (html_body and html_template)
assert context or not (template or html_template)
self.subject = subject
self.context = context or {}
self.template = template
self.html_template = html_template
self._txt_body = body
self._html_body = html_body
self.headers = headers
self.reference = reference # The object that generated this message
self.reply_reference = reply_reference # The object this message is replying about
self._send_to = set()
@cached_property
def html_body(self):
html_body = None
if self.html_template:
html_body = render_to_string(self.html_template, self.context)
else:
html_body = self._html_body
if html_body is not None:
return inline_css(html_body)
@cached_property
def txt_body(self):
if self.template:
return render_to_string(self.template, self.context)
return self._txt_body
@cached_property
def message_id(self):
if self.reference is not None:
return email_id_for_model(self.reference)
@cached_property
def reply_to_id(self):
if self.reply_reference is not None:
return email_id_for_model(self.reply_reference)
def add_users(self, user_ids, project=None):
from sentry.models import User, UserOption
email_list = set()
user_ids = set(user_ids)
# XXX: It's possible that options have been set to an empty value
if project:
queryset = UserOption.objects.filter(
project=project,
user__in=user_ids,
key='mail:email',
)
for option in (o for o in queryset if o.value):
user_ids.remove(option.user_id)
email_list.add(option.value)
if user_ids:
queryset = UserOption.objects.filter(
user__in=user_ids,
key='alert_email',
)
for option in (o for o in queryset if o.value):
try:
user_ids.remove(option.user_id)
email_list.add(option.value)
except KeyError:
# options.user_id might not exist in user_ids set
pass
if user_ids:
email_list |= set(filter(bool, User.objects.filter(
pk__in=user_ids, is_active=True,
).values_list('email', flat=True)))
self._send_to.update(email_list)
def build(self, to, reply_to=()):
if self.headers is None:
headers = {}
else:
headers = self.headers.copy()
if ENABLE_EMAIL_REPLIES and 'X-Sentry-Reply-To' in headers:
reply_to = headers['X-Sentry-Reply-To']
else:
reply_to = set(reply_to)
reply_to.remove(to)
reply_to = ', '.join(reply_to)
if reply_to:
headers.setdefault('Reply-To', reply_to)
if self.message_id is not None:
headers.setdefault('Message-Id', self.message_id)
subject = self.subject
if self.reply_to_id is not None:
headers.setdefault('In-Reply-To', self.reply_to_id)
headers.setdefault('References', self.reply_to_id)
subject = 'Re: %s' % subject
msg = EmailMultiAlternatives(
subject,
self.txt_body,
settings.SERVER_EMAIL,
(to,),
headers=headers
)
if self.html_body:
msg.attach_alternative(self.html_body, 'text/html')
return msg
def get_built_messages(self, to=None):
send_to = set(to or ())
send_to.update(self._send_to)
return [self.build(to=email, reply_to=send_to) for email in send_to]
def send(self, to=None, fail_silently=False):
messages = self.get_built_messages(to)
self.send_all(messages, fail_silently=fail_silently)
def send_all(self, messages, fail_silently=False):
connection = get_connection(fail_silently=fail_silently)
metrics.incr('email.sent', len(messages))
return connection.send_messages(messages)
def send_async(self, to=None):
from sentry.tasks.email import send_email
messages = self.get_built_messages(to)
for message in messages:
safe_execute(send_email.delay, message=message)
def inline_css(html):
return toronado.from_string(html)
|
bsd-3-clause
|
michellab/SireUnitTests
|
unittests/SireMM/test_extract.py
|
1
|
2046
|
from Sire.Mol import *
from Sire.MM import *
from Sire.IO import *
from nose.tools import assert_almost_equal
(molecules, space) = Amber().readCrdTop("../io/proteinbox.crd", "../io/proteinbox.top")
molnums = molecules.molNums()
for molnum in molnums:
protein = molecules[molnum].molecule()
if protein.nAtoms() > 200:
break
residues = protein.selectAll( ResIdx(1) )
for i in range(2,11):
residues = residues.add( ResIdx(i) )
partial = PartialMolecule(residues)
extract = partial.extract()
def test_internal_different(verbose=False):
internal1 = InternalFF("1")
internal2 = InternalFF("2")
internal1.add(protein)
internal2.add(extract)
if verbose:
print("WHOLE: %s EXTRACT: %s (should be different!)" % (internal1.energy(),internal2.energy()))
assert( internal1.energy() != internal2.energy() )
def test_internal_same(verbose=False):
internal1 = InternalFF("1")
internal2 = InternalFF("2")
internal1.add(partial)
internal1.setStrict(True)
internal2.add(extract)
if verbose:
print("WHOLE: %s EXTRACT: %s (should be same!)" % (internal1.energy(),internal2.energy()))
assert_almost_equal( internal1.energy().value(), internal2.energy().value(), 1 )
def test_intra_different(verbose=False):
intra1 = IntraCLJFF("1")
intra2 = IntraCLJFF("2")
intra1.add(protein)
intra2.add(extract)
if verbose:
print("WHOLE: %s EXTRACT: %s (should be different!)" % (intra1.energy(),intra2.energy()))
assert( intra1.energy() != intra2.energy() )
def test_intra_same(verbose=False):
intra1 = IntraCLJFF("1")
intra2 = IntraCLJFF("2")
intra1.add(partial)
intra2.add(extract)
if verbose:
print("WHOLE: %s EXTRACT: %s (should be same!)" % (intra1.energy(),intra2.energy()))
assert_almost_equal( intra1.energy().value(), intra2.energy().value(), 1 )
if __name__ == "__main__":
test_internal_different(True)
test_internal_same(True)
test_intra_different(True)
test_intra_same(True)
|
gpl-3.0
|
steventimberman/masterDebater
|
venv/lib/python2.7/site-packages/pbr/tests/test_commands.py
|
33
|
3688
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
from testtools import content
from pbr.tests import base
class TestCommands(base.BaseTestCase):
def test_custom_build_py_command(self):
"""Test custom build_py command.
Test that a custom subclass of the build_py command runs when listed in
the commands [global] option, rather than the normal build command.
"""
stdout, stderr, return_code = self.run_setup('build_py')
self.addDetail('stdout', content.text_content(stdout))
self.addDetail('stderr', content.text_content(stderr))
self.assertIn('Running custom build_py command.', stdout)
self.assertEqual(0, return_code)
def test_custom_deb_version_py_command(self):
"""Test custom deb_version command."""
stdout, stderr, return_code = self.run_setup('deb_version')
self.addDetail('stdout', content.text_content(stdout))
self.addDetail('stderr', content.text_content(stderr))
self.assertIn('Extracting deb version', stdout)
self.assertEqual(0, return_code)
def test_custom_rpm_version_py_command(self):
"""Test custom rpm_version command."""
stdout, stderr, return_code = self.run_setup('rpm_version')
self.addDetail('stdout', content.text_content(stdout))
self.addDetail('stderr', content.text_content(stderr))
self.assertIn('Extracting rpm version', stdout)
self.assertEqual(0, return_code)
def test_freeze_command(self):
"""Test that freeze output is sorted in a case-insensitive manner."""
stdout, stderr, return_code = self.run_pbr('freeze')
self.assertEqual(0, return_code)
pkgs = []
for l in stdout.split('\n'):
pkgs.append(l.split('==')[0].lower())
pkgs_sort = sorted(pkgs[:])
self.assertEqual(pkgs_sort, pkgs)
|
mit
|
sccn/SNAP
|
src/rpyc/__init__.py
|
2
|
2209
|
"""
::
##### ##### ####
## ## ## ## ## ####
## ## ## ## ## #
##### ##### ## ## ## ##
## ## ## ## ## ## #
## ## ## ### ## ###
## ## ## ## #####
-------------------- ## ------------------------------------------
##
Remote Python Call (RPyC) v $$VERSION$$, $$DATE$$
Licensed under the MIT license (see `LICENSE` file)
A transparent, symmetric and light-weight RPC and distributed computing
library for python.
Usage::
>>> import rpyc
>>> c = rpyc.connect_by_service("SERVICENAME")
>>> print c.root.some_function(1, 2, 3)
Classic-style usage::
>>> import rpyc
>>> # `hostname` is assumed to be running a slave-service server
>>> c = rpyc.classic.connect("hostname")
>>> print c.execute("x = 5")
None
>>> print c.eval("x + 2")
7
>>> print c.modules.os.listdir(".") #doctest: +ELLIPSIS
[...]
>>> print c.modules["xml.dom.minidom"].parseString("<a/>") #doctest: +ELLIPSIS
<xml.dom.minidom.Document instance at ...>
>>> f = c.builtin.open("foobar.txt", "rb") #doctest: +SKIP
>>> print f.read(100) #doctest: +SKIP
...
"""
from rpyc.core import (SocketStream, TunneledSocketStream, PipeStream, Channel,
Connection, Service, BaseNetref, AsyncResult, GenericException,
AsyncResultTimeout, VoidService, SlaveService, inspect_methods)
from rpyc.utils.factory import (connect_stream, connect_channel, connect_pipes,
connect_stdpipes, connect, ssl_connect, discover, connect_by_service, connect_subproc,
connect_thread, ssh_connect)
from rpyc.utils.helpers import async, timed, buffiter, BgServingThread, restricted, enable_local_cache, enable_async_methods, async_value
from rpyc.utils import classic
from rpyc.version import version, version_string, release_date
__author__ = "Tomer Filiba ([email protected])"
__version__ = version
__doc__ = __doc__.replace("$$VERSION$$", version_string).replace("$$DATE$$", release_date)
del version_string, release_date
|
bsd-3-clause
|
niwtr/map-walker
|
src/server/fake_client_test.py
|
1
|
1246
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import socket
import time
import threading
#get the com.
ip_address='119.29.232.198'#socket.gethostbyname(socket.gethostname())#'10.201.12.244'
ds=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
ds.connect((ip_address,9999))
print(ds.recv(1024).decode('utf-8'))
addr=int(ds.recv(1024).decode('utf-8'))
ds.close()
#establish
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Addr:' +str(addr))
s.connect((ip_address,addr))
print(s.recv(1024).decode('utf-8'))
def listen(s,a):
time.sleep(0.01)
dd=s.recv(1024).decode('utf-8')
if(dd=='closed'):
s.close()
return
else:
print(str(addr)+" "+time.asctime()+" get response to "+str(a)+": ", end="")
print(dd)
def speak_to_server(s):
a=2
#threading.Thread(target=listen,args=(s))
while(1):
if a>4:
a=2
else: pass
time.sleep(0.01)
dd=[]
STR=str("echo::"+str(["hello, world"])).encode('utf-8')
if (1):##str
s.sendall(STR)
listen(s,a)
else:
print('No inputs. Type again.')
a=a+1
speak_to_server(s)
|
mit
|
matbu/ansible-modules-extras
|
cloud/amazon/ec2_vpc_net_facts.py
|
23
|
3693
|
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ec2_vpc_net_facts
short_description: Gather facts about ec2 VPCs in AWS
description:
- Gather facts about ec2 VPCs in AWS
version_added: "2.1"
author: "Rob White (@wimnat)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all VPCs
- ec2_vpc_net_facts:
# Gather facts about a particular VPC using VPC ID
- ec2_vpc_net_facts:
filters:
vpc-id: vpc-00112233
# Gather facts about any VPC with a tag key Name and value Example
- ec2_vpc_net_facts:
filters:
"tag:Name": Example
'''
try:
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
def get_vpc_info(vpc):
try:
classic_link = vpc.classic_link_enabled
except AttributeError:
classic_link = False
vpc_info = { 'id': vpc.id,
'instance_tenancy': vpc.instance_tenancy,
'classic_link_enabled': classic_link,
'dhcp_options_id': vpc.dhcp_options_id,
'state': vpc.state,
'is_default': vpc.is_default,
'cidr_block': vpc.cidr_block,
'tags': vpc.tags
}
return vpc_info
def list_ec2_vpcs(connection, module):
filters = module.params.get("filters")
vpc_dict_array = []
try:
all_vpcs = connection.get_all_vpcs(filters=filters)
except BotoServerError as e:
module.fail_json(msg=e.message)
for vpc in all_vpcs:
vpc_dict_array.append(get_vpc_info(vpc))
module.exit_json(vpcs=vpc_dict_array)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters = dict(default=None, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
list_ec2_vpcs(connection, module)
if __name__ == '__main__':
main()
|
gpl-3.0
|
hsnr-gamera/gamera
|
gamera/gui/compat_wx.py
|
1
|
14251
|
#
# Copyright (C) 2018 Jens Dahlmanns
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# This is a compatibility wrapper to enable parallel support of wxPython 3 and 4
from distutils.version import LooseVersion
import wx
IS_WXP4 = wx.VERSION >= (4,0)
import wx.py
import wx.grid
if IS_WXP4:
import wx.adv
def resize_window_virtual(sizer, window):
"""
Tell the sizer to resize the *virtual size* of the *window* to match the
sizer's minimal size. This will not alter the on screen size of the
window, but may cause the addition/removal/alteration of scrollbars
required to view the virtual area in windows which manage it.
:param sizer: Sizer used for resizing
:param window: Window which is resized
"""
if wx.VERSION < (2, 9):
sizer.SetVirtualSizeHints(window)
else:
sizer.FitInside(window)
def create_help_display(parent, docstring):
"""
Creates a help window that contains the information specified by the *docstring*.
:param parent: Window which should be the parent of the help window
:param docstring: Content of the help window
:return: help window
"""
import wx
if wx.VERSION >= (2, 5):
import wx.html
from gamera import util
from gamera.gui.gui_util import docstring_to_html
try:
docstring = util.dedent(docstring)
html = docstring_to_html(docstring)
window = wx.html.HtmlWindow(parent, -1, size=wx.Size(50, 100))
if "gtk2" in wx.PlatformInfo:
window.SetStandardFonts()
window.SetPage(html)
window.SetBackgroundColour(wx.Colour(255, 255, 232))
if wx.VERSION < (2, 8):
window.SetBestFittingSize(wx.Size(50, 150))
else:
window.SetInitialSize(wx.Size(50, 150))
return window
except Exception, e:
print e
else:
from gamera import util
docstring = util.dedent(docstring)
style = (wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH2)
window = wx.TextCtrl(parent, -1, style=style, size=wx.Size(50, 100))
window.SetValue(docstring)
window.SetBackgroundColour(wx.Colour(255, 255, 232))
return window
def configure_size_small_height(grid_sizer, window, min_width, client_area):
if wx.VERSION < (2, 8):
grid_sizer.SetBestFittingSize(wx.Size(0, 0))
window.SetBestFittingSize(wx.Size(min_width, client_area.height/2))
else:
grid_sizer.SetInitialSize(wx.Size(0, 0))
window.SetInitialSize(wx.Size(min_width, client_area.height/2))
def configure_size_normal_height(grid_sizer, window, min_width, min_height):
if wx.VERSION < (2, 8):
grid_sizer.SetBestFittingSize(wx.Size(0, 0))
window.SetBestFittingSize(wx.Size(min_width, min_height))
else:
height = window.GetSize().height
grid_sizer.SetInitialSize(wx.Size(0, 0))
window.SetInitialSize(wx.Size(min_width,height))
if wx.VERSION >= (2, 5):
import wx.html
class Calltip(wx.html.HtmlWindow):
def __init__(self, parent=None, id=-1):
wx.html.HtmlWindow.__init__(self, parent, id)
wx.py.crust.dispatcher.connect(receiver=self.display, signal='Shell.calltip')
if "gtk2" in wx.PlatformInfo:
self.SetStandardFonts()
self.SetBackgroundColour(wx.Colour(255, 255, 232))
self.message_displayed = False
self.cache = {}
def display(self, calltip):
from gamera.gui.gui_util import docstring_to_html
"""Receiver for Shell.calltip signal."""
html = docstring_to_html(calltip)
self.SetPage(html)
self.SetBackgroundColour(wx.Colour(255, 255, 232))
def OnLinkClicked(self, link):
from gamera.gui.gui_util import message
if not self.message_displayed:
message("Clicking on links is not supported.")
self.message_displayed = True
else:
Calltip = wx.py.crust.Calltip
def configure_shell_auto_completion(shell):
if wx.VERSION < (2, 5):
shell.autoComplete = False
def set_dialog_style(dialog):
if wx.VERSION < (2, 8):
dialog.SetStyle(dialog._flags)
def configure_icon_display_style():
"""
Creates a display-style for icons based on the OS und wx-Version.
:return: display-style for icons
"""
if wx.Platform == '__WXMAC__':
style = wx.LC_ICON|wx.LC_SINGLE_SEL
else:
style = wx.LC_LIST|wx.LC_SINGLE_SEL
if not (wx.VERSION >= (2, 5) and wx.Platform == '__WXGTK__'):
style |= wx.LC_ALIGN_TOP
return style
def configure_print_dialog_data(dialog_data):
"""
Configures the specified print dialog-data in a way that an upcoming OnPrint
is called.
:param dialog_data: PrintDialogData
"""
# TODO: This should theoretically work with wxPython 2.5 +,
# but it actually causes OnPrint to never be called.
if wx.VERSION < (2, 5):
dialog_data.EnableHelp(False)
dialog_data.EnablePageNumbers(False)
dialog_data.EnableSelection(False)
def register_get_first_child(tree_ctrl_class):
"""
Defines a GetFirstChild(root, cookie)-method the given TreeCtrl-class in case the
wx-Version is 2.5 or greater.
:param tree_ctrl_class: TreeCtrl-class
"""
# This is a stub to provide compatibility with wx2.4 and wx2.5
if wx.VERSION >= (2, 5):
def GetFirstChild(self, root, cookie):
return wx.TreeCtrl.GetFirstChild(self, root)
tree_ctrl_class.GetFirstChild = GetFirstChild
def register_set_scrollbars(image_display_class):
"""
Defines a SetScrollbars(x_amount, y_amount, w, h, x, y)-method for the given ImageDisplay-class
in case the wx-Version is 2.5 or greater.
:param image_display_class: ImageDisplay-class
"""
if wx.VERSION >= (2, 5):
def SetScrollbars(self, x_amount, y_amount, w, h, x, y):
self.SetVirtualSize((w * x_amount, h * y_amount))
self.SetScrollRate(x_amount, y_amount)
self.Scroll(x, y)
image_display_class.SetScrollbars = SetScrollbars
def set_grid_line_colour(grid):
"""
Sets a colour of (130,130,254) for the grid lines for the specified grid in case
the wx-Version is 2.5 or greater.
:param grid:
"""
if wx.VERSION >= (2, 5):
grid.SetGridLineColour(wx.Colour(130,130,254))
def register_renderer_access(multi_image_display_class):
"""
Registers a renderer-accessor method (get_renderer) for the MultiImageDisplay-class.
:param multi_image_display_class: MultiImageDisplay-class
"""
if wx.VERSION >= (2,5):
def get_renderer(self):
return self.renderer.Clone()
else:
def get_renderer(self):
return self.GetDefaultRenderer()
multi_image_display_class.get_renderer = get_renderer
def init_image_handlers():
"""
Initialization of all image handlers (for versions below 2.9)
"""
#if int(wx.__version__.split('.')[0]) < 3 and int(wx.__version__.split('.')[1]) < 9:
if LooseVersion(wx.__version__) < LooseVersion('2.9'):
wx.InitAllImageHandlers() # deprecated since wxPython 2.9
def set_control_down(key_event):
"""
Sets the control key down for the specified key-event based on the wx-Version.
"""
if LooseVersion(wx.__version__) < LooseVersion('3.0'):
key_event.m_controlDown = True
else:
key_event.SetControlDown(True)
#
# wxPython 4 (Phoenix)
#
DropTarget = wx.DropTarget if IS_WXP4 else wx.PyDropTarget
Validator = wx.Validator if IS_WXP4 else wx.PyValidator
GridCellRenderer = wx.grid.GridCellRenderer if IS_WXP4 else wx.grid.PyGridCellRenderer
SplashScreen = wx.adv.SplashScreen if IS_WXP4 else wx.SplashScreen
SPLASH_CENTRE_ON_SCREEN = wx.adv.SPLASH_CENTRE_ON_SCREEN if IS_WXP4 else wx.SPLASH_CENTRE_ON_SCREEN
SPLASH_NO_TIMEOUT = wx.adv.SPLASH_NO_TIMEOUT if IS_WXP4 else wx.SPLASH_NO_TIMEOUT
ASSERT_SUPPRESS = wx.APP_ASSERT_SUPPRESS if IS_WXP4 else wx.PYAPP_ASSERT_SUPPRESS
FD_SAVE = wx.FD_SAVE if IS_WXP4 else wx.SAVE
# TODO: or wx.grid.EVT_GRID_CELL_CHANGING?
EVT_GRID_CELL_CHANGED = wx.grid.EVT_GRID_CELL_CHANGED if IS_WXP4 else wx.grid.EVT_GRID_CELL_CHANGE
FILE_DROP_DONE = True if IS_WXP4 else None
def __get_version():
if IS_WXP4:
return wx.VERSION[:2]
else:
from wxPython.wx import wxVERSION
return wxVERSION[:2]
def select_version():
# This function is no longer called
try:
import wxversion
wxversion.select(["3.0", "2.9", "2.8", "2.6", "2.5", "2.4"])
except ImportError as e:
version = __get_version()
# Check that the version is correct
if version < (2, 4) or version > (4, 0):
raise RuntimeError("""This version of Gamera requires wxPython 2.4.x, 2.6.x, 2.8.x, 2.9.x, 3.0.x or 4.0.x.
However, it seems that you have wxPython %s installed.""" % ".".join([str(x) for x in version]))
def create_empty_image(width, height, clear=True):
if IS_WXP4:
return wx.Image(width, height, clear)
else:
return wx.EmptyImage(width, height, clear)
def create_empty_bitmap(width, height, depth=wx.BITMAP_SCREEN_DEPTH):
if IS_WXP4:
return wx.Bitmap(width, height, depth)
else:
return wx.EmptyBitmap(width, height, depth)
def create_icon_from_bitmap(bmp):
if IS_WXP4:
return wx.Icon(bmp)
else:
return wx.IconFromBitmap(bmp)
def create_image_from_stream(stream):
if IS_WXP4:
return wx.Image(stream)
else:
return wx.ImageFromStream(stream)
def create_bitmap_from_image(image):
if IS_WXP4:
return wx.Bitmap(image)
else:
return wx.BitmapFromImage(image)
def create_stock_cursor(id):
if IS_WXP4:
return wx.Cursor(id)
else:
return wx.StockCursor(id)
def set_tool_tip(window, tooltip_string):
"""
Sets the tooltip string for the given window.
"""
if IS_WXP4:
window.SetToolTip(tooltip_string)
else:
window.SetToolTipString(tooltip_string)
def is_validator_silent():
"""
Checks whether the wx.Validator is currently silent.
See also:
- wxp4 : wx.Validator#IsSilent()
- older: wx.Validator_IsSilent()
"""
if IS_WXP4:
return wx.Validator.IsSilent()
else:
return wx.Validator_IsSilent()
def begin_drawing(dc):
if not IS_WXP4:
dc.BeginDrawing()
def end_drawing(dc):
if not IS_WXP4:
dc.EndDrawing()
def set_size(window, x, y, width, height, sizeFlags=wx.SIZE_AUTO):
"""
Sets the size of the wx.Window in pixels.
"""
if IS_WXP4:
window.SetSize(x, y, width, height, sizeFlags)
else:
window.SetDimensions(x, y, width, height, sizeFlags)
def create_data_format(format):
if IS_WXP4:
return wx.DataFormat(format)
else:
return wx.CustomDataFormat(format)
def get_window_size(window):
"""
Returns the size of the entire window in pixels, including title bar, border, scrollbars, etc.
"""
if IS_WXP4:
return window.GetSize()
else:
return window.GetSizeTuple()
def add_img_list_icon(image_list, icon):
"""
Adds a new image using an icon to the wx.ImageList.
"""
if IS_WXP4:
return image_list.Add(icon)
else:
return image_list.AddIcon(icon)
def insert_list_img_string_item(list_ctrl, index, label, icon):
"""
Inserts an image/string item to the wx.ListCtrl.
"""
if IS_WXP4:
list_ctrl.InsertItem(index, label, icon)
else:
list_ctrl.InsertImageStringItem(index, label, icon)
def set_list_string_item(list_ctrl, index, column, label, image_id):
"""
Sets an item string field at a particular column for a wx.ListCtrl.
"""
if IS_WXP4:
list_ctrl.SetItem(index, column, label, image_id)
else:
list_ctrl.SetStringItem(index, column, label, image_id)
def get_list_event_item_index(list_event):
"""
Returns the item index of the wx.ListEvent.
"""
if IS_WXP4:
return list_event.GetIndex()
else:
return list_event.m_itemIndex
def get_tree_item_data(tree_ctrl, item):
"""
Returns the wx.TreeCtrl item data associated with the item.
"""
if IS_WXP4:
return tree_ctrl.GetItemData(item)
else:
return tree_ctrl.GetPyData(item)
def set_tree_item_data(tree_ctrl, item, data):
"""
Sets item client data for the specified wx.TreeCtrl.
"""
if IS_WXP4:
tree_ctrl.SetItemData(item, data)
else:
tree_ctrl.SetPyData(item, data)
def extend_menu(menu, menu_item_id, item, sub_menu):
"""
Extends the specified wx.Menu with a sub-menu.
"""
if IS_WXP4:
menu.Append(menu_item_id, item, sub_menu)
else:
menu.AppendMenu(menu_item_id, item, sub_menu)
def handle_event_0(event_handler, event, callable):
"""
Registers an event handler for the specified event.
"""
if IS_WXP4:
event_handler.Bind(event, callable)
else:
event(event_handler, callable)
def handle_event_1(event_handler, event, callable, id1=wx.ID_ANY):
"""
Registers an event handler for the specified event that requires a single ID.
"""
if IS_WXP4:
event_handler.Bind(event, callable, id=id1)
else:
event(event_handler, id1, callable)
def handle_event_2(event_handler, event, callable, id1=wx.ID_ANY, id2=wx.ID_ANY):
"""
Registers an event handler for the specified event that requires two IDs.
"""
if IS_WXP4:
event_handler.Bind(event, callable, id=id1, id2=id2)
else:
event(event_handler, id1, id2, callable)
def handle_timer_event(event_handler, callable, timer_id):
"""
Registers an event handler for the wx.EVT_TIMER-event.
"""
if IS_WXP4:
event_handler.Bind(wx.EVT_TIMER, callable)
else:
wx.EVT_TIMER(event_handler, timer_id, callable)
|
gpl-2.0
|
ba50/nanobeam_electron_diffraction
|
Dm3Reader3_1.py
|
1
|
10395
|
# '<i' == little endian byte ordering
# stworzyc klase tag_group, tag itp.
# sizes of different datatypes in bytes
type_size = { 'char': 1, 'bool': 1, 'i8': 1, 'i16': 2, 'i32': 4, 'float': 4, 'double': 8 }
# format strings for different datatypes to be used inside the struct.unpack() function
type_format = { 'char': 'c', 'uchar': 'B', 'bool': '?', 'i16': 'h', 'ui16': 'H', 'i32': 'i', 'ui32': 'I', 'float': 'f', 'double': 'd' }
#-----------------------------------------------------------------------------------
def Reverse(word):
'''Returns reversed string.
Keyword arguments:
word (string) -- string to be reversed
'''
return word[::-1]
#-----------------------------------------------------------------------------------
def GetTypeSize(type_id):
'''Gives size of the datatype in bytes.
Keyword arguments:
type_id (integer) -- id number of the datatype given by dm3 format
Returns: integer
'''
if type_id == 0:
return 0
elif type_id == 2 or type_id == 4:
return type_size['i16']
elif type_id == 3 or type_id == 5:
return type_size['i32']
elif type_id == 6:
return type_size['float']
elif type_id == 7:
return type_size['double']
elif type_id == 8:
return type_size['bool']
elif type_id == 9:
return type_size['char']
elif type_id == 10:
return type_size['i8']
#------------------------------------------------------------------------------------
def ReadDm3File(dm3_fpath):
'''Reads dm3 file byte after byte to get the image data.
Then saves the image in png format.
Possible future developments: reading and storing values of all dm3 tags.
Keyword arguments:
dm3_fpath (string) -- path of the dm3 file to be read
Returns: None
'''
import sys
import struct
import numpy as np
#sys.stdout = open(dm3_fpath.replace('.dm3', '_log.txt'), 'w')
dm3_file = open(dm3_fpath, 'rb')
#print('Reading DM3 File...')
header_size = 3 * type_size['i32']
header = dm3_file.read(header_size)
header = Reverse(header)
# zamiast Reverse() mozna wczytac stringa w odpowiednim formacie, np.
# header = struct.unpack('>i', dm3_file.read(size))
header_format = '%di' % (len(header) // 4)
header_list = list(struct.unpack(header_format, header))
dm3_items = { 'dm_version': 0, 'file_size': 0, 'byte_order': 0, 'tag_group': 0 }
dm3_items['dm_version'] = header_list[2]
dm3_items['file_size'] = header_list[1]
dm3_items['byte_order'] = header_list[0]
#print('DM version: ' + str(dm3_items['dm_version']) + '\n' \
#'File size: ' + str(dm3_items['file_size']) + ' bytes')
image_dims = []
image_data = []
#main_tag_group_size = dm3_items['file_size'] - header_size
ReadTagGroup(dm3_file, image_dims, image_data)
#SaveDm3AsPng(image_data, image_dims, dm3_fpath)
#print('\nAll done')
#sys.stdout = sys.__stdout__
image1d = np.asarray(image_data)
image2d = np.reshape(image1d, tuple(image_dims))
return image2d
#-----------------------------------------------------------------------------------
def ReadTagGroup(dm3_file, image_dims, image_data):
'''Reads group of dm3 tags.
For every single tag in a group it calls ReadTag() function.
Keyword arguments:
dm3_file (file) -- dm3 file object
image_data (list) -- empty container for the image data
(image data will be stored in this list when tag with 'Data' label will be found)
Returns: None
'''
import struct
#print('\n----------------------------------------\n' + \
#'Tag Group' + \
#'\n----------------------------------------')
tgroup_header_size = 2 * type_size['bool'] + type_size['i32']
tgroup_header = dm3_file.read(tgroup_header_size)
tgroup_header = Reverse(tgroup_header)
tgroup_items = { 'is_sorted': False, 'is_open': False, 'n_tags': 0, 'tag_list': [] }
tgroup_items['n_tags'] = struct.unpack('i', tgroup_header[:4])[0]
tgroup_items['is_open'] = struct.unpack('?', tgroup_header[4:5])[0]
tgroup_items['is_sorted'] = struct.unpack('?', tgroup_header[5:6])[0]
for tag_idx in range(0, tgroup_items['n_tags']): # moze byc tez range(tgroup_items['n_tags'])
ReadTag(dm3_file, image_dims, image_data)
#-----------------------------------------------------------------------------------
def ReadTag(dm3_file, image_dims, image_data):
'''Reads single tag.
If a tag turns out to be a tag group it calls ReadTagGroup() function.
If a tag is a single tag then it calls ReadTagType() function.
Keyword arguments:
dm3_file (file) -- dm3 file object
image_data (list) -- container for the image data
Returns: None
'''
import struct
tag_header_size = type_size['char'] + type_size['i16']
tag_header = dm3_file.read(tag_header_size)
tag_header = Reverse(tag_header)
tag_items = { 'is_group': False, 'label_length': 0, 'label': '', 'tag_content': [] }
tag_items['label_length'] = struct.unpack('h', tag_header[:2])[0]
is_data_or_group = (struct.unpack('c', tag_header[2:3])[0]).decode('utf-8')
tag_items['is_group'] = True if is_data_or_group == chr(20) else False
label_format = '%ds' % tag_items['label_length']
tag_items['label'] = (struct.unpack(label_format, dm3_file.read(tag_items['label_length']))[0]).decode('utf-8', 'ignore')
#print('"%s"' % (tag_items['label']))
#print(str(tag_items['label'])[1:])
has_data = False
if tag_items['label'] == 'Data':
has_data = True
# ---------
has_dims = False
if tag_items['label'] == 'RestoreImageDisplayBounds':
has_dims = True
# ---------
if tag_items['is_group']:
ReadTagGroup(dm3_file, image_dims, image_data)
else:
ReadTagType(dm3_file, has_dims, has_data, image_dims, image_data)
#-----------------------------------------------------------------------------------
def ReadTagType(dm3_file, has_dims, has_data, image_dims, image_data):
'''Reads information about data structure and datatypes of individual values.
Keyword arguments:
dm3_file (file) -- dm3 file object
has_data (boolean) -- specifies if a given tag contains image data (True) or not (False)
image_data (list) -- container for the image data
Returns: None
'''
import struct
ttype_header_size = 2 * type_size['i32']
ttype_header = dm3_file.read(ttype_header_size)
ttype_header = Reverse(ttype_header)
ttype_items = { 'info_array_length': 0, 'info_array': [], 'data': [] }
ttype_items['info_array_length'] = struct.unpack('i', ttype_header[:4])[0]
info_array_size = ttype_items['info_array_length'] * type_size['i32']
info_array_format = '>%di' % (ttype_items['info_array_length'])
ttype_items['info_array'] = struct.unpack(info_array_format, dm3_file.read(info_array_size))
type_id = 0
array_size = 0
data_size = 0
# array
if ttype_items['info_array'][0] == 20: # array type id = 20
array_size = ttype_items['info_array'][ttype_items['info_array_length'] - 1]
# simple array
if ttype_items['info_array_length'] == 3: # length of simple array = 3
type_id = ttype_items['info_array'][1]
data_size = GetTypeSize(type_id) * array_size
# array of groups
elif ttype_items['info_array_length'] == 11: # length of array of groups = 11
type_id = ttype_items['info_array'][5]
for i in range(0, ttype_items['info_array'][3]):
data_size += GetTypeSize(ttype_items['info_array'][5 + 2 * i]) * array_size
# not array
else:
# struct
if ttype_items['info_array_length'] > 1: # length of single entry = 1
type_id = ttype_items['info_array'][4]
for i in range(0, ttype_items['info_array'][2]):
data_size += GetTypeSize(ttype_items['info_array'][4 + 2 * i])
# single entry
else:
type_id = ttype_items['info_array'][0]
data_size = GetTypeSize(ttype_items['info_array'][0])
n_elements = data_size / GetTypeSize(type_id)
ReadTagData(dm3_file, n_elements, type_id, has_dims, has_data, image_dims, image_data)
#-----------------------------------------------------------------------------------
def ReadTagData(dm3_file, n_elements, type_id, has_dims, has_data, image_dims, image_data):
'''Reads data based on the information about datatypes and number of elements.
Keyword arguments:
dm3_file (file) -- dm3 file object
n_elements (integer) -- number of elements (individual values) to be read
type_id (integer) -- id number of the datatype given by dm3 format
has_data (boolean) -- specifies if a given tag contains image data (True) or not (False)
image_data (list) -- container for the image data
Returns: None
'''
import struct
n_elements = int(n_elements)
data = []
data_format = str(n_elements)
type_size = GetTypeSize(type_id)
if type_id == 2:
data_format += type_format['i16']
elif type_id == 4:
data_format += type_format['ui16']
elif type_id == 3:
data_format += type_format['i32']
elif type_id == 5:
data_format += type_format['ui32']
elif type_id == 6:
data_format += type_format['float']
elif type_id == 7:
data_format += type_format['double']
elif type_id == 8:
data_format += type_format['bool']
elif type_id == 9:
data_format += type_format['char']
elif type_id == 10:
data_format += type_format['uchar']
data = struct.unpack(data_format, dm3_file.read(n_elements * type_size))
if has_data:
image_data.clear()
image_data.extend(data)
if has_dims:
image_dims.extend([ int(dim) for dim in data[2:] ])
#------------------------------------------------------------------------------------
def SaveDm3AsPng(image_data, image_dims, dm3_fname):
'''Saves image data as png file.
Image data is a matrix of integer values. Each value corresponds to a single greyscale pixel.
Keyword arguments:
image_data (list) -- data which contains information about pixel values
dm3_fname (string) -- path of the dm3 file
Returns: None
'''
import numpy as np
from PIL import Image as im
image1d = np.asarray(image_data)
image2d = np.reshape(image1d, tuple(image_dims))
image2d_rescaled = ((image2d - image2d.min()) * 255.0 / image2d.max()).astype(np.uint8)
image = im.fromarray(image2d_rescaled)
image.save(dm3_fname.replace('.dm3', '.png'))
#------------------------------------------------------------------------------------
# Run this block of code if a module is run as a standalone program
if __name__ == '__main__':
ReadDm3File('img1.dm3')
|
gpl-3.0
|
autosub-team/autosub
|
VELS_WEB/languages/zh-cn.py
|
142
|
10465
|
# coding: utf8
{
'!langcode!': 'zh-cn',
'!langname!': '中文',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" 应为选择表达式, 格式如 "field1=\'value\'". 但是对 JOIN 的结果不可以使用 update 或者 delete"',
'%s %%{row} deleted': '已删除 %s 笔',
'%s %%{row} updated': '已更新 %s 笔',
'%s selected': '%s 已选择',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(格式类似 "zh-tw")',
'A new version of web2py is available': '新版 web2py 已推出',
'A new version of web2py is available: %s': '新版 web2py 已推出: %s',
'about': '关于',
'About': '关于',
'About application': '关于本应用程序',
'Access Control': 'Access Control',
'Admin is disabled because insecure channel': '管理功能(Admin)在非安全连接环境下自动关闭',
'Admin is disabled because unsecure channel': '管理功能(Admin)在非安全连接环境下自动关闭',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': '点击进入管理界面',
'Administrator Password:': '管理员密码:',
'Ajax Recipes': 'Ajax Recipes',
'An error occured, please %s the page': 'An error occured, please %s the page',
'appadmin is disabled because insecure channel': '管理界面在非安全通道下被禁用',
'Are you sure you want to delete file "%s"?': '确定要删除文件"%s"?',
'Are you sure you want to delete this object?': '确定要删除该对象么?',
'Are you sure you want to uninstall application "%s"': '确定要删除应用程序 "%s"',
'Are you sure you want to uninstall application "%s"?': '确定要删除应用程序 "%s"',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': '注意: 登录管理账号需要安全连接(HTTPS)或是在本地连接(localhost).',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': '注意: 因为在测试模式不保证多线程安全性,所以不可同时执行多个测试案例',
'ATTENTION: you cannot edit the running application!': '注意:不可编辑正在执行的应用程序!',
'Authentication': '验证',
'Available Databases and Tables': '可提供的数据库和数据表',
'Buy this book': '购买本书',
'cache': '高速缓存',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': '不可空白',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': '编译失败:应用程序有错误,请排除错误后再尝试编译.',
'Change Password': '修改密码',
'change password': '修改密码',
'Check to delete': '打勾以示删除',
'Check to delete:': '打勾以示删除:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': '客户端网址(IP)',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': '控件',
'Controllers': '控件',
'Copyright': '版权所有',
'Create new application': '创建应用程序',
'Created By': 'Created By',
'Created On': 'Created On',
'Current request': '当前网络要求(request)',
'Current response': '当前网络响应(response)',
'Current session': '当前网络连接信息(session)',
'customize me!': '请调整我!',
'data uploaded': '数据已上传',
'Database': '数据库',
'Database %s select': '已选择 %s 数据库',
'Date and Time': '日期和时间',
'db': 'db',
'DB Model': '数据库模型',
'Delete': '删除',
'Delete:': '删除:',
'Demo': 'Demo',
'Deploy on Google App Engine': '发布到 Google App Engine',
'Deployment Recipes': 'Deployment Recipes',
'Description': '描述',
'DESIGN': '设计',
'design': '设计',
'Design for': '设计用于',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': '完成!',
'Download': '下载',
'E-mail': '电子邮件',
'EDIT': '编辑',
'Edit': '编辑',
'Edit application': '编辑应用程序',
'Edit current record': '编辑当前记录',
'edit profile': '编辑配置文件',
'Edit Profile': '编辑配置文件',
'Edit This App': '编辑本应用程序',
'Editing file': '编辑文件',
'Editing file "%s"': '编辑文件"%s"',
'Email and SMS': 'Email and SMS',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'Error logs for "%(app)s"': '"%(app)s"的错误记录',
'Errors': 'Errors',
'export as csv file': '以CSV格式导出',
'FAQ': 'FAQ',
'First name': '名',
'Forgot username?': '忘记用户名?',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Functions with no doctests will result in [passed] tests.': '沒有 doctests 的函数会显示 [passed].',
'Group ID': '群组编号',
'Groups': 'Groups',
'Hello World': 'Hello World',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': '导入/导出',
'Index': '索引',
'insert new': '插入新纪录',
'insert new %s': '插入新纪录 %s',
'Installed applications': '已安裝应用程序',
'Internal State': '內部状态',
'Introduction': 'Introduction',
'Invalid action': '非法操作(action)',
'Invalid email': '不符合电子邮件格式',
'Invalid Query': '无效的查询请求',
'invalid request': '网络要求无效',
'Is Active': 'Is Active',
'Key': 'Key',
'Language files (static strings) updated': '语言文件已更新',
'Languages': '各国语言',
'Last name': '姓',
'Last saved on:': '最后保存时间:',
'Layout': '网页布局',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'License for': '软件授权',
'Live Chat': 'Live Chat',
'login': '登录',
'Login': '登录',
'Login to the Administrative Interface': '登录到管理员界面',
'logout': '登出',
'Logout': '登出',
'Lost Password': '忘记密码',
'Lost password?': '忘记密码?',
'Main Menu': '主菜单',
'Manage Cache': 'Manage Cache',
'Menu Model': '菜单模型(menu)',
'Models': '数据模型',
'Modified By': '修改者',
'Modified On': '修改时间',
'Modules': '程序模块',
'My Sites': 'My Sites',
'Name': '名字',
'New Record': '新记录',
'new record inserted': '已插入新记录',
'next 100 rows': '往后 100 笔',
'NO': '否',
'No databases in this application': '该应用程序不含数据库',
'Object or table name': 'Object or table name',
'Online examples': '点击进入在线例子',
'or import from csv file': '或导入CSV文件',
'Origin': '原文',
'Original/Translation': '原文/翻译',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': '概览',
'Password': '密码',
"Password fields don't match": '密码不匹配',
'Peeking at file': '选择文件',
'Plugins': 'Plugins',
'Powered by': '基于下列技术构建:',
'Preface': 'Preface',
'previous 100 rows': '往前 100 笔',
'Python': 'Python',
'Query:': '查询:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': '记录',
'record does not exist': '记录不存在',
'Record ID': '记录编号',
'Record id': '记录编号',
'Register': '注册',
'register': '注册',
'Registration identifier': 'Registration identifier',
'Registration key': '注册密钥',
'reload': 'reload',
'Remember me (for 30 days)': '记住我(30 天)',
'Reset Password key': '重置密码',
'Resolve Conflict file': '解决冲突文件',
'Role': '角色',
'Rows in Table': '在数据表里的记录',
'Rows selected': '笔记录被选择',
'Saved file hash:': '已保存文件的哈希值:',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': '状态',
'Static files': '静态文件',
'Statistics': '统计数据',
'Stylesheet': '网页样式表',
'submit': '提交',
'Submit': '提交',
'Support': 'Support',
'Sure you want to delete this object?': '确定要删除此对象?',
'Table': '数据表',
'Table name': '数据表名称',
'Testing application': '测试中的应用程序',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"query"应是类似 "db.table1.field1==\'value\'" 的条件表达式. "db.table1.field1==db.table2.field2"的形式则代表执行 JOIN SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': '视图',
'There are no controllers': '沒有控件(controllers)',
'There are no models': '沒有数据库模型(models)',
'There are no modules': '沒有程序模块(modules)',
'There are no static files': '沒有静态文件',
'There are no translators, only default language is supported': '沒有对应的语言文件,仅支持原始语言',
'There are no views': '沒有视图',
'This App': '该应用',
'This is the %(filename)s template': '这是%(filename)s文件的模板(template)',
'Ticket': '问题清单',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': '时间戳',
'Twitter': 'Twitter',
'Unable to check for upgrades': '查询新版本失败',
'Unable to download': '无法下载',
'Unable to download app': '无法下载应用程序',
'unable to parse csv file': '无法解析CSV文件',
'Update:': '更新:',
'Upload existing application': '上传已有应用程序',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '使用下列方式可得到更复杂的条件表达式, (...)&(...) 代表必须都满足, (...)|(...) 代表其一, ~(...)则代表否.',
'User %(id)s Logged-in': '用户 %(id)s 已登录',
'User %(id)s Registered': '用户 %(id)s 已注册',
'User ID': '用户编号',
'Verify Password': '验证密码',
'Videos': '视频',
'View': '查看',
'Views': '视图',
'Welcome': '欢迎',
'Welcome %s': '欢迎 %s',
'Welcome to web2py': '欢迎使用 web2py',
'Welcome to web2py!': '欢迎使用 web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'YES': '是',
'You are successfully running web2py': '您已成功运行 web2py',
'You can modify this application and adapt it to your needs': '请根据您的需要修改本程序',
'You visited the url %s': 'You visited the url %s',
}
|
gpl-2.0
|
ezcall-net-tw/EZCall
|
jni/pjsip/sources/doc/pjsip-book/fetch_trac.py
|
61
|
1722
|
import urllib2
import sys
import unicodedata
def fetch_rst(url):
print 'Fetching %s..' % url
req = urllib2.Request(url)
fd = urllib2.urlopen(req, timeout=30)
body = fd.read()
body = body.replace("\r\n", "\n")
body = body.decode('utf8', 'ignore').encode('ascii', 'ignore')
pos = body.find("{{{")
if pos >= 0:
body = body[pos+4:]
pos = body.find("}}}")
if pos >= 0:
body = body[:pos]
pos = body.find("#!rst")
if pos >= 0:
body = body[pos+6:]
pos = url.rfind("/")
if pos >= 0:
filename = url[pos+1:]
else:
filename = url
pos = filename.find('?')
if pos >= 0:
filename = filename[:pos]
filename += ".rst"
f = open(filename, 'w')
f.write(body)
f.close()
def process_index(index):
pages = []
f = open(index + '.rst', 'r')
line = f.readline()
while line:
if line.find('toctree::') >= 0:
break
line = f.readline()
if line.find('toctree::') < 0:
return []
# Skip directive (or whatever it's called
line = f.readline().strip()
while line and line[0] == ':':
line = f.readline().strip()
# Skip empty lines
line = f.readline().strip()
while not line:
line = f.readline().strip()
# Parse names
while line:
pages.append(line)
line = f.readline().strip()
f.close()
return pages
if __name__ == '__main__':
print "** Warning: This will overwrite ALL RST files in current directory. Continue? [n] ",
if sys.stdin.readline().strip() != 'y':
sys.exit(0)
url_format = 'http://trac.pjsip.org/repos/wiki/pjsip-doc/%s?format=txt'
index = url_format % ('index')
fetch_rst(index)
pages = process_index('index')
for page in pages:
#if not 'endpoint' in page:
# continue
url = url_format % (page)
fetch_rst(url)
print 'Done.'
|
lgpl-3.0
|
saguas/jasper_erpnext_report
|
jasper_erpnext_report/core/FrappeTask.py
|
1
|
2201
|
__author__ = 'luissaguas'
#import json
from jnius import PythonJavaClass, java_method
import frappe, re, os
class FrappeTask(PythonJavaClass):
__javainterfaces__ = ['IFrappeTask']
def read_config(self):
config = frappe.get_conf() or {}
curr_site = os.path.join("currentsite.txt")
config.default_site = frappe.read_file(curr_site) or frappe.local.site
return config
def conf(self):
conf = self.read_config()
return conf
def __init__(self, task_id, result):
super(FrappeTask, self).__init__()
self.task_id = "Local-" + task_id
self.result = result
def get_hostname(self, url):
if (not url): return None;
if (url.find("://") > -1):
url = url.split('/')[2]
return url[0:url.find(":")] if (re.search(":", url)) else url
def get_site_name(self):
if (frappe.get_request_header('x-frappe-site-name')):
return self.get_hostname(frappe.get_request_header('x-frappe-site-name'))
conf = self.conf()
if (frappe.get_request_header('host') in ['localhost', '127.0.0.1'] and conf.default_site):
return conf.default_site
if (frappe.get_request_header('origin')):
return self.get_hostname(frappe.get_request_header('origin'))
return self.get_hostname(frappe.get_request_header('host'))
def setResult(self, result):
self.result = result
def emit_via_redis(self):
from frappe.async import emit_via_redis
import frappe
response = {}
response.update({
"status": "Success",
"task_id": self.task_id,
"result": self.result
})
sitename = self.get_site_name() or frappe.local.site
emit_via_redis("task_status_change", response, sitename + ":task_progress:" + self.task_id)
@java_method('()V')
def setReadyTask(self):
self.emit_via_redis()
"""
redis_server = None
def get_redis_server():
global redis_server
if not redis_server:
from redis import Redis
redis_server = Redis.from_url(conf.get("async_redis_server") or "redis://localhost:12311")
return redis_server
def emit_via_redis(event, message, room):
r = get_redis_server()
try:
r.publish('events', frappe.as_json({'event': event, 'message': message, 'room': room}))
except redis.exceptions.ConnectionError:
# print frappe.get_traceback()
pass
"""
|
mit
|
rversteegen/commandergenius
|
project/jni/python/src/Lib/encodings/cp865.py
|
593
|
34874
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP865.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp865',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00a4, # CURRENCY SIGN
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xa4' # 0x00af -> CURRENCY SIGN
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00af, # CURRENCY SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
lgpl-2.1
|
odoousers2014/LibrERP
|
l10n_ch_payment_slip/invoice.py
|
4
|
9610
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp.osv.orm import Model, fields
from openerp.tools import mod10r
class AccountMoveLine(Model):
_inherit = "account.move.line"
_compile_get_ref = re.compile('[^0-9]')
_columns = {
'transaction_ref': fields.char('Transaction Ref.', size=128),
}
def get_bvr_ref(self, cursor, uid, move_line_id, context=None):
"""Retrieve ESR/BVR reference from move line in order to print it
Returns False when no BVR reference should be generated. No
reference is generated when a transaction reference already
exists for the line (likely been generated by a payment service).
"""
res = ''
if isinstance(move_line_id, (tuple, list)):
assert len(move_line_id) == 1, "Only 1 ID expected"
move_line_id = move_line_id[0]
move_line = self.browse(cursor, uid, move_line_id, context=context)
## We check if the type is bvr, if not we return false
if move_line.invoice.partner_bank_id.state != 'bvr':
return ''
##
if move_line.invoice.partner_bank_id.bvr_adherent_num:
res = move_line.invoice.partner_bank_id.bvr_adherent_num
move_number = ''
if move_line.invoice.number:
move_number = self._compile_get_ref.sub('', str(move_line.invoice.number) + str(move_line_id))
reference = mod10r(res + move_number.rjust(26 - len(res), '0'))
if (move_line.transaction_ref and
move_line.transaction_ref != reference):
# the line has already a transaction id and it is not
# a BVR reference
return ''
return reference
class AccountInvoice(Model):
"""Inherit account.invoice in order to add bvr
printing functionnalites. BVR is a Swiss payment vector"""
_inherit = "account.invoice"
_compile_get_ref = re.compile('[^0-9]')
def _get_reference_type(self, cursor, user, context=None):
"""Function use by the function field reference_type in order to initalise available
BVR Reference Types"""
res = super(AccountInvoice, self)._get_reference_type(cursor, user,
context=context)
res.append(('bvr', 'BVR'))
return res
def _compute_full_bvr_name(self, cursor, uid, ids, field_names, arg, context=None):
res = {}
move_line_obj = self.pool.get('account.move.line')
account_obj = self.pool.get('account.account')
tier_account_id = account_obj.search(cursor, uid, [('type', 'in', ['receivable', 'payable'])])
for inv in self.browse(cursor, uid, ids, context=context):
move_lines = move_line_obj.search(cursor, uid, [('move_id', '=', inv.move_id.id),
('account_id', 'in', tier_account_id)])
if move_lines:
if len(move_lines) == 1:
res[inv.id] = self._space(inv.get_bvr_ref())
else:
refs = []
for move_line in move_line_obj.browse(cursor, uid, move_lines, context=context):
refs.append(self._space(move_line.get_bvr_ref()))
res[inv.id] = ' ; '.join(refs)
return res
_columns = {
### BVR reference type BVR or FREE
'reference_type': fields.selection(_get_reference_type,
'Reference Type', required=True),
### Partner bank link between bank and partner id
'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',
help='The partner bank account to pay\nKeep empty to use the default'),
'bvr_reference': fields.function(_compute_full_bvr_name, type="char", size=512, string="BVR REF.",
store=True, readonly=True)
}
def _get_bvr_ref(self, cr, uid, invoice, context=None):
"""Retrieve ESR/BVR reference form invoice in order to print it
Receive a browse record so it can be overloaded without rebrowsing
the invoice.
"""
res = ''
## We check if the type is bvr, if not we return false
if invoice.partner_bank_id.state != 'bvr':
return ''
##
if invoice.partner_bank_id.bvr_adherent_num:
res = invoice.partner_bank_id.bvr_adherent_num
invoice_number = ''
if invoice.number:
invoice_number = self._compile_get_ref.sub('', invoice.number)
return mod10r(res + invoice_number.rjust(26 - len(res), '0'))
def get_bvr_ref(self, cursor, uid, inv_id, context=None):
"""Retrieve ESR/BVR reference form invoice in order to print it
Returns False when no BVR reference should be generated. No
reference is generated when the invoice is not a BVR invoice.
"""
if isinstance(inv_id, (list, tuple)):
assert len(inv_id) == 1, "1 ID expected, got %s" % inv_id
inv_id = inv_id[0]
inv = self.browse(cursor, uid, inv_id, context=context)
return self._get_bvr_ref(cursor, uid, inv, context=context)
def _space(self, nbr, nbrspc=5):
"""Spaces * 5.
Example:
self._space('123456789012345')
'12 34567 89012 345'
"""
return ''.join([' '[(i - 2) % nbrspc:] + c for i, c in enumerate(nbr)])
def _update_ref_on_account_analytic_line(self, cr, uid, ref, move_id, context=None):
cr.execute('UPDATE account_analytic_line SET ref=%s'
' FROM account_move_line '
' WHERE account_move_line.move_id = %s '
' AND account_analytic_line.move_id = account_move_line.id',
(ref, move_id))
return True
def _action_bvr_number_move_line(self, cr, uid, invoice, move_line,
ref, context=None):
if not ref:
return
cr.execute('UPDATE account_move_line SET transaction_ref=%s'
' WHERE id=%s', (ref, move_line.id))
self._update_ref_on_account_analytic_line(cr, uid, ref,
move_line.move_id.id)
def action_number(self, cr, uid, ids, context=None):
""" Copy the BVR/ESR reference in the transaction_ref of move lines.
For customers invoices: the BVR reference is computed using
``get_bvr_ref()`` on the invoice or move lines.
For suppliers invoices: the BVR reference is stored in the reference
field of the invoice.
"""
res = super(AccountInvoice, self).action_number(cr, uid, ids, context=context)
move_line_obj = self.pool.get('account.move.line')
for inv in self.browse(cr, uid, ids, context=context):
move_line_ids = move_line_obj.search(
cr, uid,
[('move_id', '=', inv.move_id.id),
('account_id', '=', inv.account_id.id)],
context=context)
if not move_line_ids:
continue
move_lines = move_line_obj.browse(cr, uid, move_line_ids,
context=context)
for move_line in move_lines:
if inv.type in ('out_invoice', 'out_refund'):
if len(move_lines) == 1:
# We keep this branch for compatibility with single
# BVR report.
# This should be cleaned when porting to V8
ref = inv.get_bvr_ref()
else:
ref = move_line.get_bvr_ref()
elif inv.reference_type == 'bvr' and inv.reference:
ref = inv.reference
else:
ref = False
self._action_bvr_number_move_line(cr, uid, inv,
move_line, ref,
context=context)
return res
def copy(self, cursor, uid, inv_id, default=None, context=None):
default = default or {}
default.update({'reference': False})
return super(AccountInvoice, self).copy(cursor, uid, inv_id, default, context)
class AccountTaxCode(Model):
"""Inherit account tax code in order
to add a Case code"""
_name = 'account.tax.code'
_inherit = "account.tax.code"
_columns = {
'code': fields.char('Case Code', size=512),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
parenthetical-e/simfMRI
|
runclass.py
|
1
|
3264
|
""" A template Class for top-level experimental runs. """
import os
from numpy.random import RandomState
from multiprocessing import Pool
from simfMRI.io import write_hdf, get_model_names
from simfMRI.analysis.plot import hist_t
from simfMRI.mapreduce import create_chunks, reduce_chunks
from simfMRI.misc import process_prng
class Run():
""" A template for an experimental run. """
def __init__(self):
# ----
# An instance of simfMRI.examples.* Class (or similar)
# should go here.
self.BaseClass = None ## = BaseClass()
# ----
# User Globals
self.nrun = None
self.TR = None
self.ISI = None
self.model_conf = None
self.savedir = None
self.ntrial = None
# --
# Optional Globals
self.ncore = None
# ----
# Misc
self.prngs = None ## A list of RandomState() instances
## setup by the go() attr
def __call__(self, (names, prng)):
return self._singleloop((names, prng))
def _single(self, name, prng):
""" Using the BaseClass attribute run a simulation exp named
<name> using the given prng. Returns a dictionary of results. """
print("Experiment {0}.".format(name))
exp = self.BaseClass(self.ntrial, TR=self.TR, ISI=self.ISI, prng=prng)
exp.populate_models(self.model_conf)
return exp.run(name)
def _singleloop(self, (names, prng)):
""" Loop over <names> and run an Exp for each. Each Exp() uses
prng, a RandomState(). Returns a list of results dictionaries. """
return [self._single(name, prng) for name in names]
def go(self, parallel=False):
""" Run an experimental run, results are stored the
results attribute. """
if parallel:
# ----
# Setup chunks and seeds
self.run_chunks = create_chunks(self.nrun, self.ncore)
self.prngs = [process_prng(ii+10) for ii in range(
len(self.run_chunks))]
# ----
# Create a pool, and use it,
# and store the results
pool = Pool(self.ncore)
results_in_chunks = pool.map(self, zip(self.run_chunks, self.prngs))
## Calling self here works via __call__
self.results = reduce_chunks(results_in_chunks)
else:
# Run an experimental Run, and save to
# self.results
self.prngs = [process_prng(42), ]
self.results = self._singleloop((range(self.nrun), self.prngs[0]))
## Calling self here works via __call__
def save_results(self, name):
""" Save results as <name> in the dir specified in the
savedir attribute. """
# ----
# Create savedir if needed
try:
os.mkdir(self.savedir)
except OSError:
pass
print("Writing results to disk.")
savepath = os.path.join(self.savedir, name+".hdf5")
write_hdf(self.results, savepath)
|
bsd-2-clause
|
eonpatapon/nova
|
nova/api/openstack/compute/plugins/v3/assisted_volume_snapshots.py
|
33
|
3882
|
# Copyright 2013 Red Hat, Inc.
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Assisted volume snapshots extension."""
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from webob import exc
from nova.api.openstack.compute.schemas.v3 import assisted_volume_snapshots
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
ALIAS = 'os-assisted-volume-snapshots'
authorize = extensions.os_compute_authorizer(ALIAS)
class AssistedVolumeSnapshotsController(wsgi.Controller):
"""The Assisted volume snapshots API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
super(AssistedVolumeSnapshotsController, self).__init__()
@extensions.expected_errors(400)
@validation.schema(assisted_volume_snapshots.snapshots_create)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
authorize(context, action='create')
snapshot = body['snapshot']
create_info = snapshot['create_info']
volume_id = snapshot['volume_id']
LOG.info(_LI("Create assisted snapshot from volume %s"), volume_id,
context=context)
try:
return self.compute_api.volume_snapshot_create(context, volume_id,
create_info)
except (exception.VolumeBDMNotFound,
exception.InvalidVolume) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
@wsgi.response(204)
@extensions.expected_errors((400, 404))
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
authorize(context, action='delete')
LOG.info(_LI("Delete snapshot with id: %s"), id, context=context)
delete_metadata = {}
delete_metadata.update(req.GET)
try:
delete_info = jsonutils.loads(delete_metadata['delete_info'])
volume_id = delete_info['volume_id']
except (KeyError, ValueError) as e:
raise exc.HTTPBadRequest(explanation=six.text_type(e))
try:
self.compute_api.volume_snapshot_delete(context, volume_id,
id, delete_info)
except (exception.VolumeBDMNotFound,
exception.InvalidVolume) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except exception.NotFound as e:
return exc.HTTPNotFound(explanation=e.format_message())
class AssistedVolumeSnapshots(extensions.V3APIExtensionBase):
"""Assisted volume snapshots."""
name = "AssistedVolumeSnapshots"
alias = ALIAS
version = 1
def get_resources(self):
res = [extensions.ResourceExtension(ALIAS,
AssistedVolumeSnapshotsController())]
return res
def get_controller_extensions(self):
"""It's an abstract function V3APIExtensionBase and the extension
will not be loaded without it.
"""
return []
|
apache-2.0
|
mou4e/zirconium
|
tools/telemetry/telemetry/image_processing/io/frame_generator.py
|
52
|
1479
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import abc
class FrameReadError(Exception):
pass
class FrameGenerator(object):
""" Defines an interface for reading input frames.
Attributes:
_generator: A reference to the created generator.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
""" Initializes the FrameGenerator object. """
self._generator = self._CreateGenerator()
@abc.abstractmethod
def _CreateGenerator(self):
""" Creates a new generator.
Implemented in derived classes.
Raises:
FrameReadError: A error occurred in reading the frame.
"""
raise NotImplementedError
@property
def Generator(self):
""" Returns:
A reference to the created generator.
"""
return self._generator
@abc.abstractproperty
def CurrentTimestamp(self):
""" Returns:
float, The timestamp of the current frame in milliseconds.
"""
raise NotImplementedError
@abc.abstractproperty
def CurrentFrameNumber(self):
""" Returns:
int, The frame index of the current frame.
"""
raise NotImplementedError
@abc.abstractproperty
def Dimensions(self):
""" Returns:
The dimensions of the frame sequence as a tuple int (width, height).
This value should be constant across frames.
"""
raise NotImplementedError
|
bsd-3-clause
|
chapmanb/cwltool
|
cwltool/process.py
|
1
|
32827
|
import abc
import copy
import errno
import hashlib
import json
import logging
import os
import shutil
import stat
import tempfile
import urlparse
import uuid
from collections import Iterable
import functools
import avro.schema
import schema_salad.schema
import schema_salad.validate as validate
from pkg_resources import resource_stream
from rdflib import Graph
from rdflib import URIRef
from rdflib.namespace import RDFS, OWL
from ruamel.yaml.comments import CommentedSeq, CommentedMap
from schema_salad.ref_resolver import Loader, file_uri
from schema_salad.sourceline import SourceLine
from typing import (Any, AnyStr, Callable, cast, Dict, List, Generator, Text,
Tuple, Union)
from .builder import Builder
from .pathmapper import adjustDirObjs, get_listing
from .errors import WorkflowException, UnsupportedRequirement
from .pathmapper import PathMapper, normalizeFilesDirs, visit_class
from .stdfsaccess import StdFsAccess
from .utils import aslist, get_feature
class LogAsDebugFilter(logging.Filter):
def __init__(self, name, parent): # type: (str, logging.Logger) -> None
super(LogAsDebugFilter, self).__init__(name)
self.parent = parent
def filter(self, record):
return self.parent.isEnabledFor(logging.DEBUG)
_logger = logging.getLogger("cwltool")
_logger_validation_warnings = logging.getLogger("cwltool.validation_warnings")
_logger_validation_warnings.setLevel(_logger.getEffectiveLevel())
_logger_validation_warnings.addFilter(LogAsDebugFilter("cwltool.validation_warnings", _logger))
supportedProcessRequirements = ["DockerRequirement",
"SchemaDefRequirement",
"EnvVarRequirement",
"ScatterFeatureRequirement",
"SubworkflowFeatureRequirement",
"MultipleInputFeatureRequirement",
"InlineJavascriptRequirement",
"ShellCommandRequirement",
"StepInputExpressionRequirement",
"ResourceRequirement",
"InitialWorkDirRequirement",
"http://commonwl.org/cwltool#LoadListingRequirement",
"http://commonwl.org/cwltool#InplaceUpdateRequirement"]
cwl_files = (
"Workflow.yml",
"CommandLineTool.yml",
"CommonWorkflowLanguage.yml",
"Process.yml",
"concepts.md",
"contrib.md",
"intro.md",
"invocation.md")
salad_files = ('metaschema.yml',
'metaschema_base.yml',
'salad.md',
'field_name.yml',
'import_include.md',
'link_res.yml',
'ident_res.yml',
'vocab_res.yml',
'vocab_res.yml',
'field_name_schema.yml',
'field_name_src.yml',
'field_name_proc.yml',
'ident_res_schema.yml',
'ident_res_src.yml',
'ident_res_proc.yml',
'link_res_schema.yml',
'link_res_src.yml',
'link_res_proc.yml',
'vocab_res_schema.yml',
'vocab_res_src.yml',
'vocab_res_proc.yml')
SCHEMA_CACHE = {} # type: Dict[Text, Tuple[Loader, Union[avro.schema.Names, avro.schema.SchemaParseException], Dict[Text, Any], Loader]]
SCHEMA_FILE = None # type: Dict[Text, Any]
SCHEMA_DIR = None # type: Dict[Text, Any]
SCHEMA_ANY = None # type: Dict[Text, Any]
custom_schemas = {} # type: Dict[Text, Tuple[Text, Text]]
def use_standard_schema(version):
# type: (Text) -> None
if version in custom_schemas:
del custom_schemas[version]
if version in SCHEMA_CACHE:
del SCHEMA_CACHE[version]
def use_custom_schema(version, name, text):
# type: (Text, Text, Text) -> None
custom_schemas[version] = (name, text)
if version in SCHEMA_CACHE:
del SCHEMA_CACHE[version]
def get_schema(version):
# type: (Text) -> Tuple[Loader, Union[avro.schema.Names, avro.schema.SchemaParseException], Dict[Text,Any], Loader]
if version in SCHEMA_CACHE:
return SCHEMA_CACHE[version]
cache = {} # type: Dict[Text, Text]
version = version.split("#")[-1]
if '.dev' in version:
version = ".".join(version.split(".")[:-1])
for f in cwl_files:
try:
res = resource_stream(__name__, 'schemas/%s/%s' % (version, f))
cache["https://w3id.org/cwl/" + f] = res.read()
res.close()
except IOError:
pass
for f in salad_files:
try:
res = resource_stream(
__name__, 'schemas/%s/salad/schema_salad/metaschema/%s'
% (version, f))
cache["https://w3id.org/cwl/salad/schema_salad/metaschema/"
+ f] = res.read()
res.close()
except IOError:
pass
if version in custom_schemas:
cache[custom_schemas[version][0]] = custom_schemas[version][1]
SCHEMA_CACHE[version] = schema_salad.schema.load_schema(
custom_schemas[version][0], cache=cache)
else:
SCHEMA_CACHE[version] = schema_salad.schema.load_schema(
"https://w3id.org/cwl/CommonWorkflowLanguage.yml", cache=cache)
return SCHEMA_CACHE[version]
def shortname(inputid):
# type: (Text) -> Text
d = urlparse.urlparse(inputid)
if d.fragment:
return d.fragment.split(u"/")[-1]
else:
return d.path.split(u"/")[-1]
def checkRequirements(rec, supportedProcessRequirements):
# type: (Any, Iterable[Any]) -> None
if isinstance(rec, dict):
if "requirements" in rec:
for i, r in enumerate(rec["requirements"]):
with SourceLine(rec["requirements"], i, UnsupportedRequirement):
if r["class"] not in supportedProcessRequirements:
raise UnsupportedRequirement(u"Unsupported requirement %s" % r["class"])
for d in rec:
checkRequirements(rec[d], supportedProcessRequirements)
if isinstance(rec, list):
for d in rec:
checkRequirements(d, supportedProcessRequirements)
def adjustFilesWithSecondary(rec, op, primary=None):
"""Apply a mapping function to each File path in the object `rec`, propagating
the primary file associated with a group of secondary files.
"""
if isinstance(rec, dict):
if rec.get("class") == "File":
rec["path"] = op(rec["path"], primary=primary)
adjustFilesWithSecondary(rec.get("secondaryFiles", []), op,
primary if primary else rec["path"])
else:
for d in rec:
adjustFilesWithSecondary(rec[d], op)
if isinstance(rec, list):
for d in rec:
adjustFilesWithSecondary(d, op, primary)
def stageFiles(pm, stageFunc, ignoreWritable=False):
# type: (PathMapper, Callable[..., Any], bool) -> None
for f, p in pm.items():
if not p.staged:
continue
if not os.path.exists(os.path.dirname(p.target)):
os.makedirs(os.path.dirname(p.target), 0o0755)
if p.type in ("File", "Directory") and (p.resolved.startswith("/") or p.resolved.startswith("file:///")):
stageFunc(p.resolved, p.target)
elif p.type == "Directory" and not os.path.exists(p.target) and p.resolved.startswith("_:"):
os.makedirs(p.target, 0o0755)
elif p.type == "WritableFile" and not ignoreWritable:
shutil.copy(p.resolved, p.target)
elif p.type == "WritableDirectory" and not ignoreWritable:
if p.resolved.startswith("_:"):
os.makedirs(p.target, 0o0755)
else:
shutil.copytree(p.resolved, p.target)
elif p.type == "CreateFile" and not ignoreWritable:
with open(p.target, "w") as n:
n.write(p.resolved.encode("utf-8"))
def collectFilesAndDirs(obj, out):
# type: (Union[Dict[Text, Any], List[Dict[Text, Any]]], List[Dict[Text, Any]]) -> None
if isinstance(obj, dict):
if obj.get("class") in ("File", "Directory"):
out.append(obj)
else:
for v in obj.values():
collectFilesAndDirs(v, out)
if isinstance(obj, list):
for l in obj:
collectFilesAndDirs(l, out)
def relocateOutputs(outputObj, outdir, output_dirs, action, fs_access):
# type: (Union[Dict[Text, Any], List[Dict[Text, Any]]], Text, Set[Text], Text, StdFsAccess) -> Union[Dict[Text, Any], List[Dict[Text, Any]]]
adjustDirObjs(outputObj, functools.partial(get_listing, fs_access, recursive=True))
if action not in ("move", "copy"):
return outputObj
def moveIt(src, dst):
if action == "move":
for a in output_dirs:
if src.startswith(a+"/"):
_logger.debug("Moving %s to %s", src, dst)
if os.path.isdir(src) and os.path.isdir(dst):
# merge directories
for root, dirs, files in os.walk(src):
for f in dirs+files:
moveIt(os.path.join(root, f), os.path.join(dst, f))
else:
shutil.move(src, dst)
return
if src != dst:
_logger.debug("Copying %s to %s", src, dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copy(src, dst)
outfiles = [] # type: List[Dict[Text, Any]]
collectFilesAndDirs(outputObj, outfiles)
pm = PathMapper(outfiles, "", outdir, separateDirs=False)
stageFiles(pm, moveIt)
def _check_adjust(f):
f["location"] = file_uri(pm.mapper(f["location"])[1])
if "contents" in f:
del f["contents"]
return f
visit_class(outputObj, ("File", "Directory"), _check_adjust)
visit_class(outputObj, ("File",), functools.partial(compute_checksums, fs_access))
# If there are symlinks to intermediate output directories, we want to move
# the real files into the final output location. If a file is linked more than once,
# make an internal relative symlink.
if action == "move":
relinked = {} # type: Dict[Text, Text]
for root, dirs, files in os.walk(outdir):
for f in dirs+files:
path = os.path.join(root, f)
rp = os.path.realpath(path)
if path != rp:
if rp in relinked:
os.unlink(path)
os.symlink(os.path.relpath(relinked[rp], path), path)
else:
for od in output_dirs:
if rp.startswith(od+"/"):
os.unlink(path)
os.rename(rp, path)
relinked[rp] = path
break
return outputObj
def cleanIntermediate(output_dirs): # type: (Set[Text]) -> None
for a in output_dirs:
if os.path.exists(a) and empty_subtree(a):
_logger.debug(u"Removing intermediate output directory %s", a)
shutil.rmtree(a, True)
def formatSubclassOf(fmt, cls, ontology, visited):
# type: (Text, Text, Graph, Set[Text]) -> bool
"""Determine if `fmt` is a subclass of `cls`."""
if URIRef(fmt) == URIRef(cls):
return True
if ontology is None:
return False
if fmt in visited:
return False
visited.add(fmt)
uriRefFmt = URIRef(fmt)
for s, p, o in ontology.triples((uriRefFmt, RDFS.subClassOf, None)):
# Find parent classes of `fmt` and search upward
if formatSubclassOf(o, cls, ontology, visited):
return True
for s, p, o in ontology.triples((uriRefFmt, OWL.equivalentClass, None)):
# Find equivalent classes of `fmt` and search horizontally
if formatSubclassOf(o, cls, ontology, visited):
return True
for s, p, o in ontology.triples((None, OWL.equivalentClass, uriRefFmt)):
# Find equivalent classes of `fmt` and search horizontally
if formatSubclassOf(s, cls, ontology, visited):
return True
return False
def checkFormat(actualFile, inputFormats, ontology):
# type: (Union[Dict[Text, Any], List, Text], Union[List[Text], Text], Graph) -> None
for af in aslist(actualFile):
if "format" not in af:
raise validate.ValidationException(u"Missing required 'format' for File %s" % af)
for inpf in aslist(inputFormats):
if af["format"] == inpf or formatSubclassOf(af["format"], inpf, ontology, set()):
return
raise validate.ValidationException(
u"Incompatible file format %s required format(s) %s" % (af["format"], inputFormats))
def fillInDefaults(inputs, job):
# type: (List[Dict[Text, Text]], Dict[Text, Union[Dict[Text, Any], List, Text]]) -> None
for e, inp in enumerate(inputs):
with SourceLine(inputs, e, WorkflowException):
if shortname(inp[u"id"]) in job:
pass
elif shortname(inp[u"id"]) not in job and u"default" in inp:
job[shortname(inp[u"id"])] = copy.copy(inp[u"default"])
elif shortname(inp[u"id"]) not in job and aslist(inp[u"type"])[0] == u"null":
pass
else:
raise WorkflowException("Missing required input parameter `%s`" % shortname(inp["id"]))
def avroize_type(field_type, name_prefix=""):
# type: (Union[List[Dict[Text, Any]], Dict[Text, Any]], Text) -> Any
"""
adds missing information to a type so that CWL types are valid in schema_salad.
"""
if isinstance(field_type, list):
for f in field_type:
avroize_type(f, name_prefix)
elif isinstance(field_type, dict):
if field_type["type"] in ("enum", "record"):
if "name" not in field_type:
field_type["name"] = name_prefix + Text(uuid.uuid4())
if field_type["type"] == "record":
avroize_type(field_type["fields"], name_prefix)
if field_type["type"] == "array":
avroize_type(field_type["items"], name_prefix)
return field_type
class Process(object):
__metaclass__ = abc.ABCMeta
def __init__(self, toolpath_object, **kwargs):
# type: (Dict[Text, Any], **Any) -> None
"""
kwargs:
metadata: tool document metadata
requirements: inherited requirements
hints: inherited hints
loader: schema_salad.ref_resolver.Loader used to load tool document
avsc_names: CWL Avro schema object used to validate document
strict: flag to determine strict validation (fail on unrecognized fields)
"""
self.metadata = kwargs.get("metadata", {}) # type: Dict[Text,Any]
self.names = None # type: avro.schema.Names
global SCHEMA_FILE, SCHEMA_DIR, SCHEMA_ANY # pylint: disable=global-statement
if SCHEMA_FILE is None:
get_schema("v1.0")
SCHEMA_ANY = cast(Dict[Text, Any],
SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/salad#Any"])
SCHEMA_FILE = cast(Dict[Text, Any],
SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/cwl#File"])
SCHEMA_DIR = cast(Dict[Text, Any],
SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/cwl#Directory"])
names = schema_salad.schema.make_avro_schema([SCHEMA_FILE, SCHEMA_DIR, SCHEMA_ANY],
schema_salad.ref_resolver.Loader({}))[0]
if isinstance(names, avro.schema.SchemaParseException):
raise names
else:
self.names = names
self.tool = toolpath_object
self.requirements = kwargs.get("requirements", []) + self.tool.get("requirements", [])
self.hints = kwargs.get("hints", []) + self.tool.get("hints", [])
self.formatgraph = None # type: Graph
if "loader" in kwargs:
self.formatgraph = kwargs["loader"].graph
self.doc_loader = kwargs["loader"]
self.doc_schema = kwargs["avsc_names"]
checkRequirements(self.tool, supportedProcessRequirements)
self.validate_hints(kwargs["avsc_names"], self.tool.get("hints", []),
strict=kwargs.get("strict"))
self.schemaDefs = {} # type: Dict[Text,Dict[Text, Any]]
sd, _ = self.get_requirement("SchemaDefRequirement")
if sd:
sdtypes = sd["types"]
av = schema_salad.schema.make_valid_avro(sdtypes, {t["name"]: t for t in avroize_type(sdtypes)}, set())
for i in av:
self.schemaDefs[i["name"]] = i
avro.schema.make_avsc_object(av, self.names)
# Build record schema from inputs
self.inputs_record_schema = {
"name": "input_record_schema", "type": "record",
"fields": []} # type: Dict[Text, Any]
self.outputs_record_schema = {
"name": "outputs_record_schema", "type": "record",
"fields": []} # type: Dict[Text, Any]
for key in ("inputs", "outputs"):
for i in self.tool[key]:
c = copy.copy(i)
c["name"] = shortname(c["id"])
del c["id"]
if "type" not in c:
raise validate.ValidationException(u"Missing `type` in parameter `%s`" % c["name"])
if "default" in c and "null" not in aslist(c["type"]):
c["type"] = ["null"] + aslist(c["type"])
else:
c["type"] = c["type"]
c["type"] = avroize_type(c["type"], c["name"])
if key == "inputs":
self.inputs_record_schema["fields"].append(c)
elif key == "outputs":
self.outputs_record_schema["fields"].append(c)
try:
self.inputs_record_schema = cast(Dict[unicode, Any], schema_salad.schema.make_valid_avro(self.inputs_record_schema, {}, set()))
avro.schema.make_avsc_object(self.inputs_record_schema, self.names)
except avro.schema.SchemaParseException as e:
raise validate.ValidationException(u"Got error `%s` while processing inputs of %s:\n%s" %
(Text(e), self.tool["id"],
json.dumps(self.inputs_record_schema, indent=4)))
try:
self.outputs_record_schema = cast(Dict[unicode, Any], schema_salad.schema.make_valid_avro(self.outputs_record_schema, {}, set()))
avro.schema.make_avsc_object(self.outputs_record_schema, self.names)
except avro.schema.SchemaParseException as e:
raise validate.ValidationException(u"Got error `%s` while processing outputs of %s:\n%s" %
(Text(e), self.tool["id"],
json.dumps(self.outputs_record_schema, indent=4)))
def _init_job(self, joborder, **kwargs):
# type: (Dict[Text, Text], **Any) -> Builder
"""
kwargs:
eval_timeout: javascript evaluation timeout
use_container: do/don't use Docker when DockerRequirement hint provided
make_fs_access: make an FsAccess() object with given basedir
basedir: basedir for FsAccess
docker_outdir: output directory inside docker for this job
docker_tmpdir: tmpdir inside docker for this job
docker_stagedir: stagedir inside docker for this job
outdir: outdir on host for this job
tmpdir: tmpdir on host for this job
stagedir: stagedir on host for this job
select_resources: callback to select compute resources
"""
builder = Builder()
builder.job = cast(Dict[Text, Union[Dict[Text, Any], List,
Text]], copy.deepcopy(joborder))
# Validate job order
try:
fillInDefaults(self.tool[u"inputs"], builder.job)
normalizeFilesDirs(builder.job)
validate.validate_ex(self.names.get_name("input_record_schema", ""), builder.job,
strict=False, logger=_logger_validation_warnings)
except (validate.ValidationException, WorkflowException) as e:
raise WorkflowException("Invalid job input record:\n" + Text(e))
builder.files = []
builder.bindings = CommentedSeq()
builder.schemaDefs = self.schemaDefs
builder.names = self.names
builder.requirements = self.requirements
builder.hints = self.hints
builder.resources = {}
builder.timeout = kwargs.get("eval_timeout")
builder.debug = kwargs.get("debug")
builder.mutation_manager = kwargs.get("mutation_manager")
dockerReq, is_req = self.get_requirement("DockerRequirement")
if dockerReq and is_req and not kwargs.get("use_container"):
raise WorkflowException(
"Document has DockerRequirement under 'requirements' but use_container is false. DockerRequirement must be under 'hints' or use_container must be true.")
builder.make_fs_access = kwargs.get("make_fs_access") or StdFsAccess
builder.fs_access = builder.make_fs_access(kwargs["basedir"])
loadListingReq, _ = self.get_requirement("http://commonwl.org/cwltool#LoadListingRequirement")
if loadListingReq:
builder.loadListing = loadListingReq.get("loadListing")
if dockerReq and kwargs.get("use_container"):
builder.outdir = builder.fs_access.realpath(
dockerReq.get("dockerOutputDirectory") or kwargs.get("docker_outdir") or "/var/spool/cwl")
builder.tmpdir = builder.fs_access.realpath(kwargs.get("docker_tmpdir") or "/tmp")
builder.stagedir = builder.fs_access.realpath(kwargs.get("docker_stagedir") or "/var/lib/cwl")
else:
builder.outdir = builder.fs_access.realpath(kwargs.get("outdir") or tempfile.mkdtemp())
builder.tmpdir = builder.fs_access.realpath(kwargs.get("tmpdir") or tempfile.mkdtemp())
builder.stagedir = builder.fs_access.realpath(kwargs.get("stagedir") or tempfile.mkdtemp())
if self.formatgraph:
for i in self.tool["inputs"]:
d = shortname(i["id"])
if d in builder.job and i.get("format"):
checkFormat(builder.job[d], builder.do_eval(i["format"]), self.formatgraph)
builder.bindings.extend(builder.bind_input(self.inputs_record_schema, builder.job))
if self.tool.get("baseCommand"):
for n, b in enumerate(aslist(self.tool["baseCommand"])):
builder.bindings.append({
"position": [-1000000, n],
"datum": b
})
if self.tool.get("arguments"):
for i, a in enumerate(self.tool["arguments"]):
lc = self.tool["arguments"].lc.data[i]
fn = self.tool["arguments"].lc.filename
builder.bindings.lc.add_kv_line_col(len(builder.bindings), lc)
if isinstance(a, dict):
a = copy.copy(a)
if a.get("position"):
a["position"] = [a["position"], i]
else:
a["position"] = [0, i]
builder.bindings.append(a)
elif ("$(" in a) or ("${" in a):
cm = CommentedMap((
("position", [0, i]),
("valueFrom", a)
))
cm.lc.add_kv_line_col("valueFrom", lc)
cm.lc.filename = fn
builder.bindings.append(cm)
else:
cm = CommentedMap((
("position", [0, i]),
("datum", a)
))
cm.lc.add_kv_line_col("datum", lc)
cm.lc.filename = fn
builder.bindings.append(cm)
builder.bindings.sort(key=lambda a: a["position"])
builder.resources = self.evalResources(builder, kwargs)
return builder
def evalResources(self, builder, kwargs):
# type: (Builder, Dict[AnyStr, Any]) -> Dict[Text, Union[int, Text]]
resourceReq, _ = self.get_requirement("ResourceRequirement")
if resourceReq is None:
resourceReq = {}
request = {
"coresMin": 1,
"coresMax": 1,
"ramMin": 1024,
"ramMax": 1024,
"tmpdirMin": 1024,
"tmpdirMax": 1024,
"outdirMin": 1024,
"outdirMax": 1024
}
for a in ("cores", "ram", "tmpdir", "outdir"):
mn = None
mx = None
if resourceReq.get(a + "Min"):
mn = builder.do_eval(resourceReq[a + "Min"])
if resourceReq.get(a + "Max"):
mx = builder.do_eval(resourceReq[a + "Max"])
if mn is None:
mn = mx
elif mx is None:
mx = mn
if mn:
request[a + "Min"] = mn
request[a + "Max"] = mx
if kwargs.get("select_resources"):
return kwargs["select_resources"](request)
else:
return {
"cores": request["coresMin"],
"ram": request["ramMin"],
"tmpdirSize": request["tmpdirMin"],
"outdirSize": request["outdirMin"],
}
def validate_hints(self, avsc_names, hints, strict):
# type: (Any, List[Dict[Text, Any]], bool) -> None
for i, r in enumerate(hints):
sl = SourceLine(hints, i, validate.ValidationException)
with sl:
if avsc_names.get_name(r["class"], "") is not None:
plain_hint = dict((key, r[key]) for key in r if key not in
self.doc_loader.identifiers) # strip identifiers
validate.validate_ex(
avsc_names.get_name(plain_hint["class"], ""),
plain_hint, strict=strict)
else:
_logger.info(sl.makeError(u"Unknown hint %s" % (r["class"])))
def get_requirement(self, feature): # type: (Any) -> Tuple[Any, bool]
return get_feature(self, feature)
def visit(self, op): # type: (Callable[[Dict[Text, Any]], None]) -> None
op(self.tool)
@abc.abstractmethod
def job(self,
job_order, # type: Dict[Text, Text]
output_callbacks, # type: Callable[[Any, Any], Any]
**kwargs # type: Any
):
# type: (...) -> Generator[Any, None, None]
return None
def empty_subtree(dirpath): # type: (Text) -> bool
# Test if a directory tree contains any files (does not count empty
# subdirectories)
for d in os.listdir(dirpath):
d = os.path.join(dirpath, d)
try:
if stat.S_ISDIR(os.stat(d).st_mode):
if empty_subtree(d) is False:
return False
else:
return False
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
return True
_names = set() # type: Set[Text]
def uniquename(stem, names=None): # type: (Text, Set[Text]) -> Text
global _names
if names is None:
names = _names
c = 1
u = stem
while u in names:
c += 1
u = u"%s_%s" % (stem, c)
names.add(u)
return u
def nestdir(base, deps):
# type: (Text, Dict[Text, Any]) -> Dict[Text, Any]
dirname = os.path.dirname(base) + "/"
subid = deps["location"]
if subid.startswith(dirname):
s2 = subid[len(dirname):]
sp = s2.split('/')
sp.pop()
while sp:
nx = sp.pop()
deps = {
"class": "Directory",
"basename": nx,
"listing": [deps]
}
return deps
def mergedirs(listing):
# type: (List[Dict[Text, Any]]) -> List[Dict[Text, Any]]
r = [] # type: List[Dict[Text, Any]]
ents = {} # type: Dict[Text, Any]
for e in listing:
if e["basename"] not in ents:
ents[e["basename"]] = e
elif e["class"] == "Directory" and e.get("listing"):
ents[e["basename"]].setdefault("listing", []).extend(e["listing"])
for e in ents.itervalues():
if e["class"] == "Directory" and "listing" in e:
e["listing"] = mergedirs(e["listing"])
r.extend(ents.itervalues())
return r
def scandeps(base, doc, reffields, urlfields, loadref, urljoin=urlparse.urljoin):
# type: (Text, Any, Set[Text], Set[Text], Callable[[Text, Text], Any], Callable[[Text, Text], Text]) -> List[Dict[Text, Text]]
r = [] # type: List[Dict[Text, Text]]
deps = None # type: Dict[Text, Any]
if isinstance(doc, dict):
if "id" in doc:
if doc["id"].startswith("file://"):
df, _ = urlparse.urldefrag(doc["id"])
if base != df:
r.append({
"class": "File",
"location": df
})
base = df
if doc.get("class") in ("File", "Directory") and "location" in urlfields:
u = doc.get("location", doc.get("path"))
if u and not u.startswith("_:"):
deps = {
"class": doc["class"],
"location": urljoin(base, u)
}
if doc["class"] == "Directory" and "listing" in doc:
deps["listing"] = doc["listing"]
if doc["class"] == "File" and "secondaryFiles" in doc:
deps["secondaryFiles"] = doc["secondaryFiles"]
deps = nestdir(base, deps)
r.append(deps)
else:
if doc["class"] == "Directory" and "listing" in doc:
r.extend(scandeps(base, doc["listing"], reffields, urlfields, loadref, urljoin=urljoin))
elif doc["class"] == "File" and "secondaryFiles" in doc:
r.extend(scandeps(base, doc["secondaryFiles"], reffields, urlfields, loadref, urljoin=urljoin))
for k, v in doc.iteritems():
if k in reffields:
for u in aslist(v):
if isinstance(u, dict):
r.extend(scandeps(base, u, reffields, urlfields, loadref, urljoin=urljoin))
else:
sub = loadref(base, u)
subid = urljoin(base, u)
deps = {
"class": "File",
"location": subid
}
sf = scandeps(subid, sub, reffields, urlfields, loadref, urljoin=urljoin)
if sf:
deps["secondaryFiles"] = sf
deps = nestdir(base, deps)
r.append(deps)
elif k in urlfields and k != "location":
for u in aslist(v):
deps = {
"class": "File",
"location": urljoin(base, u)
}
deps = nestdir(base, deps)
r.append(deps)
elif k not in ("listing", "secondaryFiles"):
r.extend(scandeps(base, v, reffields, urlfields, loadref, urljoin=urljoin))
elif isinstance(doc, list):
for d in doc:
r.extend(scandeps(base, d, reffields, urlfields, loadref, urljoin=urljoin))
if r:
normalizeFilesDirs(r)
r = mergedirs(r)
return r
def compute_checksums(fs_access, fileobj):
if "checksum" not in fileobj:
checksum = hashlib.sha1()
with fs_access.open(fileobj["location"], "rb") as f:
contents = f.read(1024 * 1024)
while contents != "":
checksum.update(contents)
contents = f.read(1024 * 1024)
f.seek(0, 2)
filesize = f.tell()
fileobj["checksum"] = "sha1$%s" % checksum.hexdigest()
fileobj["size"] = filesize
|
apache-2.0
|
WorldViews/Spirals
|
YEI/examples/pairing_wireless_devices.py
|
1
|
3016
|
## Pairing the YEI 3-Space Sensor Wireless devices with the YEI 3-Space Sensor
## Dongle devices for a wireless connection with Python 2.7, PySerial 2.6, and
## YEI 3-Space Python API
import threespace_api as ts_api
## If the COM port is not known or the device type is not known for the 3-Space
## Sensor device, we must do a search for the devices. We can do this by calling
## the getComPorts function which returns a lists of COM port information.
## (both known 3-Space Sensor devices and unknown devices)
## getComPorts also as a parameter called filter that takes a mask that denotes
## what type of 3-Space Sensor device can be found. If filter is not used or set
## to None all connected 3-Space Sensor devices and unknown devices are found.
## Each COM port information is a list containing
## (COM port name, friendly name, 3-Space Sensor device type)
## This example makes use of the filter parameter of getComPorts and just
## searches for Wireless devices and Dongle devices.
device_list = ts_api.getComPorts(filter=ts_api.TSS_FIND_DNG|ts_api.TSS_FIND_WL)
## Now go through our known list of 3-Space Sensor devices and create the
## appropriate instance by using the devices' type and COM port
dng_device = None
wl_device = None
for device_port in device_list:
com_port, friendly_name, device_type = device_port
if device_type == "DNG":
dng_device = ts_api.TSDongle(com_port=com_port)
elif device_type == "WL":
wl_device = ts_api.TSWLSensor(com_port=com_port)
## If a connection to the COM port fails, None is returned.
if dng_device is not None and wl_device is not None:
## We must first set the Pan ID and Channel of the Dongle device and
## Wireless device to the same value before pairing.
pan_id = 1
channel = 26
dng_device.setWirelessPanID(pan_id)
dng_device.setWirelessChannel(channel)
wl_device.setWirelessPanID(pan_id)
wl_device.setWirelessChannel(channel)
## Now we can start pairing the Dongle device and Wireless device.
## The TSDongle class has a convenience function for pairing with Wireless
## devices.
## This function is setSensorToDongle. It has 2 parameters:
## idx - the index into the Dongle device's wireless table
## hw_id - the serial number of the Wireless device to be paired
dng_device.setSensorToDongle(idx=0, hw_id=wl_device.serial_number)
## serial_number is just one of many certain attributes that the 3-Space
## Sensor classes have that hold information of the 3-Space Sensor devices
## that would be redundant to always request from the device and would cost
## a lot of time.
## Now we can check if the Wireless device was paired by indexing into the
## TSDongle instance like it was a list.
print(dng_device[0])
## Now commit our wireless settings
dng_device.commitWirelessSettings()
wl_device.commitWirelessSettings()
## Now close the ports.
dng_device.close()
wl_device.close()
|
mit
|
MER-GROUP/intellij-community
|
plugins/hg4idea/testData/bin/hgext/blackbox.py
|
94
|
5258
|
# blackbox.py - log repository events to a file for post-mortem debugging
#
# Copyright 2010 Nicolas Dumazet
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""log repository events to a blackbox for debugging
Logs event information to .hg/blackbox.log to help debug and diagnose problems.
The events that get logged can be configured via the blackbox.track config key.
Examples::
[blackbox]
track = *
[blackbox]
track = command, commandfinish, commandexception, exthook, pythonhook
[blackbox]
track = incoming
[blackbox]
# limit the size of a log file
maxsize = 1.5 MB
# rotate up to N log files when the current one gets too big
maxfiles = 3
"""
from mercurial import util, cmdutil
from mercurial.i18n import _
import errno, os, re
cmdtable = {}
command = cmdutil.command(cmdtable)
testedwith = 'internal'
lastblackbox = None
def wrapui(ui):
class blackboxui(ui.__class__):
@util.propertycache
def track(self):
return self.configlist('blackbox', 'track', ['*'])
def _openlogfile(self):
def rotate(oldpath, newpath):
try:
os.unlink(newpath)
except OSError, err:
if err.errno != errno.ENOENT:
self.debug("warning: cannot remove '%s': %s\n" %
(newpath, err.strerror))
try:
if newpath:
os.rename(oldpath, newpath)
except OSError, err:
if err.errno != errno.ENOENT:
self.debug("warning: cannot rename '%s' to '%s': %s\n" %
(newpath, oldpath, err.strerror))
fp = self._bbopener('blackbox.log', 'a')
maxsize = self.configbytes('blackbox', 'maxsize', 1048576)
if maxsize > 0:
st = os.fstat(fp.fileno())
if st.st_size >= maxsize:
path = fp.name
fp.close()
maxfiles = self.configint('blackbox', 'maxfiles', 7)
for i in xrange(maxfiles - 1, 1, -1):
rotate(oldpath='%s.%d' % (path, i - 1),
newpath='%s.%d' % (path, i))
rotate(oldpath=path,
newpath=maxfiles > 0 and path + '.1')
fp = self._bbopener('blackbox.log', 'a')
return fp
def log(self, event, *msg, **opts):
global lastblackbox
super(blackboxui, self).log(event, *msg, **opts)
if not '*' in self.track and not event in self.track:
return
if util.safehasattr(self, '_blackbox'):
blackbox = self._blackbox
elif util.safehasattr(self, '_bbopener'):
try:
self._blackbox = self._openlogfile()
except (IOError, OSError), err:
self.debug('warning: cannot write to blackbox.log: %s\n' %
err.strerror)
del self._bbopener
self._blackbox = None
blackbox = self._blackbox
else:
# certain ui instances exist outside the context of
# a repo, so just default to the last blackbox that
# was seen.
blackbox = lastblackbox
if blackbox:
date = util.datestr(None, '%Y/%m/%d %H:%M:%S')
user = util.getuser()
formattedmsg = msg[0] % msg[1:]
try:
blackbox.write('%s %s> %s' % (date, user, formattedmsg))
except IOError, err:
self.debug('warning: cannot write to blackbox.log: %s\n' %
err.strerror)
lastblackbox = blackbox
def setrepo(self, repo):
self._bbopener = repo.opener
ui.__class__ = blackboxui
def uisetup(ui):
wrapui(ui)
def reposetup(ui, repo):
# During 'hg pull' a httppeer repo is created to represent the remote repo.
# It doesn't have a .hg directory to put a blackbox in, so we don't do
# the blackbox setup for it.
if not repo.local():
return
if util.safehasattr(ui, 'setrepo'):
ui.setrepo(repo)
@command('^blackbox',
[('l', 'limit', 10, _('the number of events to show')),
],
_('hg blackbox [OPTION]...'))
def blackbox(ui, repo, *revs, **opts):
'''view the recent repository events
'''
if not os.path.exists(repo.join('blackbox.log')):
return
limit = opts.get('limit')
blackbox = repo.opener('blackbox.log', 'r')
lines = blackbox.read().split('\n')
count = 0
output = []
for line in reversed(lines):
if count >= limit:
break
# count the commands by matching lines like: 2013/01/23 19:13:36 root>
if re.match('^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} .*> .*', line):
count += 1
output.append(line)
ui.status('\n'.join(reversed(output)))
|
apache-2.0
|
mitdbg/modeldb
|
client/verta/verta/_protos/public/uac/Telemetry_pb2.py
|
2
|
5162
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: uac/Telemetry.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from ..common import CommonService_pb2 as common_dot_CommonService__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='uac/Telemetry.proto',
package='ai.verta.uac',
syntax='proto3',
serialized_options=b'P\001Z:github.com/VertaAI/modeldb/protos/gen/go/protos/public/uac',
serialized_pb=b'\n\x13uac/Telemetry.proto\x12\x0c\x61i.verta.uac\x1a\x1cgoogle/api/annotations.proto\x1a\x1a\x63ommon/CommonService.proto\"f\n\x10\x43ollectTelemetry\x12\n\n\x02id\x18\x01 \x01(\t\x12*\n\x07metrics\x18\x02 \x03(\x0b\x32\x19.ai.verta.common.KeyValue\x1a\x1a\n\x08Response\x12\x0e\n\x06status\x18\x01 \x01(\x08\x32\x9b\x01\n\x10TelemetryService\x12\x86\x01\n\x10\x63ollectTelemetry\x12\x1e.ai.verta.uac.CollectTelemetry\x1a\'.ai.verta.uac.CollectTelemetry.Response\")\x82\xd3\xe4\x93\x02#\"\x1e/v1/telemetry/collectTelemetry:\x01*B>P\x01Z:github.com/VertaAI/modeldb/protos/gen/go/protos/public/uacb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,common_dot_CommonService__pb2.DESCRIPTOR,])
_COLLECTTELEMETRY_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.uac.CollectTelemetry.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ai.verta.uac.CollectTelemetry.Response.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=171,
serialized_end=197,
)
_COLLECTTELEMETRY = _descriptor.Descriptor(
name='CollectTelemetry',
full_name='ai.verta.uac.CollectTelemetry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.uac.CollectTelemetry.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metrics', full_name='ai.verta.uac.CollectTelemetry.metrics', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_COLLECTTELEMETRY_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=95,
serialized_end=197,
)
_COLLECTTELEMETRY_RESPONSE.containing_type = _COLLECTTELEMETRY
_COLLECTTELEMETRY.fields_by_name['metrics'].message_type = common_dot_CommonService__pb2._KEYVALUE
DESCRIPTOR.message_types_by_name['CollectTelemetry'] = _COLLECTTELEMETRY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CollectTelemetry = _reflection.GeneratedProtocolMessageType('CollectTelemetry', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _COLLECTTELEMETRY_RESPONSE,
'__module__' : 'uac.Telemetry_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.uac.CollectTelemetry.Response)
})
,
'DESCRIPTOR' : _COLLECTTELEMETRY,
'__module__' : 'uac.Telemetry_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.uac.CollectTelemetry)
})
_sym_db.RegisterMessage(CollectTelemetry)
_sym_db.RegisterMessage(CollectTelemetry.Response)
DESCRIPTOR._options = None
_TELEMETRYSERVICE = _descriptor.ServiceDescriptor(
name='TelemetryService',
full_name='ai.verta.uac.TelemetryService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=200,
serialized_end=355,
methods=[
_descriptor.MethodDescriptor(
name='collectTelemetry',
full_name='ai.verta.uac.TelemetryService.collectTelemetry',
index=0,
containing_service=None,
input_type=_COLLECTTELEMETRY,
output_type=_COLLECTTELEMETRY_RESPONSE,
serialized_options=b'\202\323\344\223\002#\"\036/v1/telemetry/collectTelemetry:\001*',
),
])
_sym_db.RegisterServiceDescriptor(_TELEMETRYSERVICE)
DESCRIPTOR.services_by_name['TelemetryService'] = _TELEMETRYSERVICE
# @@protoc_insertion_point(module_scope)
|
mit
|
dundeemt/SoCo
|
soco/music_library.py
|
1
|
21591
|
# -*- coding: utf-8 -*-
"""Music Library access."""
from __future__ import unicode_literals
import logging
from . import discovery
from .data_structures import (
SearchResult,
from_didl_string,
DidlResource,
DidlObject,
DidlMusicAlbum
)
from .exceptions import SoCoUPnPException
from .utils import url_escape_path, really_unicode, camel_to_underscore
_LOG = logging.getLogger(__name__)
# pylint: disable=protected-access
class MusicLibrary(object):
"""The Music Library """
# Key words used when performing searches
SEARCH_TRANSLATION = {'artists': 'A:ARTIST',
'album_artists': 'A:ALBUMARTIST',
'albums': 'A:ALBUM',
'genres': 'A:GENRE',
'composers': 'A:COMPOSER',
'tracks': 'A:TRACKS',
'playlists': 'A:PLAYLISTS',
'share': 'S:',
'sonos_playlists': 'SQ:',
'categories': 'A:'}
# pylint: disable=invalid-name
def __init__(self, soco=None):
"""
Args:
soco (SoCo), optional: A SoCo instance to query for music library
information. If None, or not supplied, a random SoCo will be used.
"""
self.soco = soco if soco is not None else discovery.any_soco()
self.contentDirectory = soco.contentDirectory
def _build_album_art_full_uri(self, url):
"""Ensure an Album Art URI is an absolute URI.
:param url: The album art URI
"""
# Add on the full album art link, as the URI version
# does not include the ipaddress
if not url.startswith(('http:', 'https:')):
url = 'http://' + self.soco.ip_address + ':1400' + url
return url
def get_artists(self, *args, **kwargs):
"""Convenience method for :py:meth:`get_music_library_information`
with `search_type='artists'`. For details on remaining arguments refer
to the docstring for that method.
"""
args = tuple(['artists'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_album_artists(self, *args, **kwargs):
"""Convenience method for :py:meth:`get_music_library_information`
with `search_type='album_artists'`. For details on remaining arguments
refer to the docstring for that method.
"""
args = tuple(['album_artists'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_albums(self, *args, **kwargs):
"""Convenience method for :py:meth:`get_music_library_information`
with `search_type='albums'`. For details on remaining arguments refer
to the docstring for that method.
"""
args = tuple(['albums'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_genres(self, *args, **kwargs):
""" onvenience method for :py:meth:`get_music_library_information`
with `search_type='genres'`. For details on remaining arguments refer
to the docstring for that method.
"""
args = tuple(['genres'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_composers(self, *args, **kwargs):
"""Convenience method for :py:meth:`get_music_library_information`
with `search_type='composers'`. For details on remaining arguments
refer to the docstring for that method.
"""
args = tuple(['composers'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_tracks(self, *args, **kwargs):
"""Convenience method for :py:meth:`get_music_library_information`
with `search_type='tracks'`. For details on remaining arguments refer
to the docstring for that method.
"""
args = tuple(['tracks'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_playlists(self, *args, **kwargs):
"""Convenience method for :py:meth:`get_music_library_information`
with `search_type='playlists'`. For details on remaining arguments
refer to the docstring for that method.
NOTE: The playlists that are referred to here are the playlist (files)
imported from the music library, they are not the Sonos playlists.
"""
args = tuple(['playlists'] + list(args))
return self.get_music_library_information(*args, **kwargs)
# pylint: disable=too-many-locals, too-many-arguments,
# too-many-branches
def get_music_library_information(self, search_type, start=0,
max_items=100, full_album_art_uri=False,
search_term=None, subcategories=None,
complete_result=False):
"""Retrieve music information objects from the music library.
This method is the main method to get music information items, like
e.g. tracks, albums etc., from the music library with. It can be used
in a few different ways:
The **search_term** argument performs a fuzzy search on that string in
the results, so e.g calling::
get_music_library_items('artist', search_term='Metallica')
will perform a fuzzy search for the term 'Metallica' among all the
artists.
Using the **subcategories** argument, will jump directly into that
subcategory of the search and return results from there. So. e.g
knowing that among the artist is one called 'Metallica', calling::
get_music_library_items('artist', subcategories=['Metallica'])
will jump directly into the 'Metallica' sub category and return the
albums associated with Metallica and::
get_music_library_items('artist', subcategories=['Metallica',
'Black'])
will return the tracks of the album 'Black' by the artist 'Metallica'.
The order of sub category types is: Genres->Artists->Albums->Tracks.
It is also possible to combine the two, to perform a fuzzy search in a
sub category.
The **start**, **max_items** and **complete_result** arguments all
has to do with paging of the results. Per default, the searches are
always paged, because there is a limit to how many items we can get at
a time. This paging is exposed to the user with the start and max_items
arguments. So calling::
get_music_library_items('artists', start=0, max_items=100)
get_music_library_items('artists', start=100, max_items=100)
will get the first and next 100 items, respectively. It is also
possible to ask for all the elements at once::
get_music_library_items('artists', complete_result=True)
This will perform the paging internally and simply return all the
items.
:param search_type: The kind of information to retrieve. Can be one of:
'artists', 'album_artists', 'albums', 'genres', 'composers',
'tracks', 'share', 'sonos_playlists', and 'playlists', where
playlists are the imported file based playlists from the
music library
:param start: Starting number of returned matches (zero based).
:param max_items: Maximum number of returned matches. NOTE: The maximum
may be restricted by the unit, presumably due to transfer
size consideration, so check the returned number against the
requested.
:param full_album_art_uri: If the album art URI should include the
IP address
:param search_term: A string that will be used to perform a fuzzy
search among the search results. If used in combination with
subcategories, the fuzzy search will be performed in the
subcategory
:param subcategories: A list of strings that indicate one or more
subcategories to dive into
:param complete_result: Will disable paging (ignore start and
max_items) and return all results for the search. WARNING! Getting
e.g. all the tracks in a large collection might take some time.
:returns: A :py:class:`~.soco.data_structures.SearchResult` object
:raises: :py:class:`SoCoException` upon errors
NOTE: The playlists that are returned with the 'playlists' search, are
the playlists imported from (files in) the music library, they are not
the Sonos playlists.
The information about the which searches can be performed and the form
of the query has been gathered from the Janos project:
http://sourceforge.net/projects/janos/ Props to the authors of that
project.
"""
search = self.SEARCH_TRANSLATION[search_type]
# Add sub categories
if subcategories is not None:
for category in subcategories:
search += '/' + url_escape_path(really_unicode(category))
# Add fuzzy search
if search_term is not None:
search += ':' + url_escape_path(really_unicode(search_term))
item_list = []
metadata = {'total_matches': 100000}
while len(item_list) < metadata['total_matches']:
# Change start and max for complete searches
if complete_result:
start, max_items = len(item_list), 100000
# Try and get this batch of results
try:
response, metadata = \
self._music_lib_search(search, start, max_items)
except SoCoUPnPException as exception:
# 'No such object' UPnP errors
if exception.error_code == '701':
return SearchResult([], search_type, 0, 0, None)
else:
raise exception
# Parse the results
items = from_didl_string(response['Result'])
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self.soco._update_album_art_to_full_uri(item)
# Append the item to the list
item_list.append(item)
# If we are not after the complete results, the stop after 1
# iteration
if not complete_result:
break
metadata['search_type'] = search_type
if complete_result:
metadata['number_returned'] = len(item_list)
# pylint: disable=star-args
return SearchResult(item_list, **metadata)
def browse(self, ml_item=None, start=0, max_items=100,
full_album_art_uri=False, search_term=None, subcategories=None):
"""Browse (get sub-elements) a music library item.
:param ml_item: The MusicLibraryItem to browse, if left out or passed
None, the items at the base level will be returned
:type ml_item: MusicLibraryItem
:param start: The starting index of the results
:type start: int
:param max_items: The maximum number of items to return
:type max_items: int
:param full_album_art_uri: If the album art URI should include the IP
address
:type full_album_art_uri: bool
:param search_term: A string that will be used to perform a fuzzy
search among the search results. If used in combination with
subcategories, the fuzzy search will be performed on the
subcategory. NOTE: Searching will not work if ml_item is None.
:type search_term: str
:param subcategories: A list of strings that indicate one or more
subcategories to dive into. NOTE: Providing sub categories will
not work if ml_item is None.
:type subcategories: list
:returns: A :py:class:`~.soco.data_structures.SearchResult` object
:rtype: :py:class:`~.soco.data_structures.SearchResult`
:raises: AttributeError: If ``ml_item`` has no ``item_id`` attribute
SoCoUPnPException: With ``error_code='701'`` if the item cannot be
browsed
"""
if ml_item is None:
search = 'A:'
else:
search = ml_item.item_id
# Add sub categories
if subcategories is not None:
for category in subcategories:
search += '/' + url_escape_path(really_unicode(category))
# Add fuzzy search
if search_term is not None:
search += ':' + url_escape_path(really_unicode(search_term))
try:
response, metadata = \
self._music_lib_search(search, start, max_items)
except SoCoUPnPException as exception:
# 'No such object' UPnP errors
if exception.error_code == '701':
return SearchResult([], 'browse', 0, 0, None)
else:
raise exception
metadata['search_type'] = 'browse'
# Parse the results
containers = from_didl_string(response['Result'])
item_list = []
for container in containers:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self.soco._update_album_art_to_full_uri(container)
item_list.append(container)
# pylint: disable=star-args
return SearchResult(item_list, **metadata)
# pylint: disable=too-many-arguments
def browse_by_idstring(self, search_type, idstring, start=0,
max_items=100, full_album_art_uri=False):
"""Browse (get sub-elements) a given type.
:param search_type: The kind of information to retrieve. Can be one of:
'artists', 'album_artists', 'albums', 'genres', 'composers',
'tracks', 'share', 'sonos_playlists', and 'playlists', where
playlists are the imported file based playlists from the
music library
:param idstring: String ID to search for
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches. NOTE: The maximum
may be restricted by the unit, presumably due to transfer
size consideration, so check the returned number against the
requested.
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A dictionary with metadata for the search, with the
keys 'number_returned', 'update_id', 'total_matches' and an
'item_list' list with the search results.
"""
search = self.SEARCH_TRANSLATION[search_type]
# Check if the string ID already has the type, if so we do not want to
# add one also Imported playlist have a full path to them, so they do
# not require the A:PLAYLISTS part first
if idstring.startswith(search) or (search_type == 'playlists'):
search = ""
search_item_id = search + idstring
search_uri = "#" + search_item_id
# Not sure about the res protocol. But this seems to work
res = [DidlResource(
uri=search_uri, protocol_info="x-rincon-playlist:*:*:*")]
search_item = DidlObject(
resources=res, title='', parent_id='',
item_id=search_item_id)
# Call the base version
return self.browse(search_item, start, max_items, full_album_art_uri)
def _music_lib_search(self, search, start, max_items):
"""Perform a music library search and extract search numbers.
You can get an overview of all the relevant search prefixes (like
'A:') and their meaning with the request:
.. code ::
response = device.contentDirectory.Browse([
('ObjectID', '0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 100),
('SortCriteria', '')
])
Args:
search (str): The ID to search
start: The index of the forst item to return
max_items: The maximum number of items to return
Returns:
tuple: (response, metadata) where response is the returned metadata
and metadata is a dict with the 'number_returned',
'total_matches' and 'update_id' integers
"""
response = self.contentDirectory.Browse([
('ObjectID', search),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
# Get result information
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
return response, metadata
@property
def library_updating(self):
"""True if the music library is in the process of being updated.
:returns: True if the music library is in the process of being updated
:rtype: bool
"""
result = self.contentDirectory.GetShareIndexInProgress()
return result['IsIndexing'] != '0'
def start_library_update(self, album_artist_display_option=''):
"""Start an update of the music library.
If specified, album_artist_display_option changes the album
artist compilation setting (see also album_artist_display_option).
"""
return self.contentDirectory.RefreshShareIndex([
('AlbumArtistDisplayOption', album_artist_display_option),
])
def search_track(self, artist, album=None, track=None,
full_album_art_uri=False):
"""Search for an artist, artist's albums, or specific track.
:param artist: Artist name
:type artist: str
:param album: Album name
:type album: str
:param track: Track name
:type track: str
:param full_album_art_uri: If the album art URI should include the
IP address
:type full_album_art_uri: bool
:returns: A :py:class:`~.soco.data_structures.SearchResult` object.
:rtype: :py:class:`~.soco.data_structures.SearchResult`
"""
subcategories = [artist]
subcategories.append(album or '')
# Perform the search
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories, search_term=track,
complete_result=True)
result._metadata['search_type'] = 'search_track'
return result
def get_albums_for_artist(self, artist, full_album_art_uri=False):
"""Get albums for an artist.
:param artist: Artist name
:type artist: str
:param full_album_art_uri: If the album art URI should include the
IP address
:type full_album_art_uri: bool
:returns: A :py:class:`~.soco.data_structures.SearchResult` object.
:rtype: :py:class:`~.soco.data_structures.SearchResult`
"""
subcategories = [artist]
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories,
complete_result=True)
reduced = [item for item in result if item.__class__ == DidlMusicAlbum]
# It is necessary to update the list of items in two places, due to
# a bug in SearchResult
result[:] = reduced
result._metadata.update({
'item_list': reduced,
'search_type': 'albums_for_artist',
'number_returned': len(reduced),
'total_matches': len(reduced)
})
return result
def get_tracks_for_album(self, artist, album, full_album_art_uri=False):
"""Get tracks for an artist's album.
:param artist: Artist name
:type artist: str
:param album: Album name
:type album: str
:param full_album_art_uri: If the album art URI should include the
IP address
:type full_album_art_uri: bool
:returns: A :py:class:`~.soco.data_structures.SearchResult` object.
:rtype: :py:class:`~.soco.data_structures.SearchResult`
"""
subcategories = [artist, album]
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories,
complete_result=True)
result._metadata['search_type'] = 'tracks_for_album'
return result
@property
def album_artist_display_option(self):
"""Return the current value of the album artist compilation
setting (see
http://www.sonos.com/support/help/3.4/en/sonos_user_guide/
Chap07_new/Compilation_albums.htm)
This is a string. Possible values:
* "WMP" - Use Album Artists
* "ITUNES" - Use iTunes® Compilations
* "NONE" - Do not group compilations
To change the current setting, call `start_library_update` and
pass the new setting.
"""
result = self.contentDirectory.GetAlbumArtistDisplayOption()
return result['AlbumArtistDisplayOption']
|
mit
|
dpassante/ansible
|
lib/ansible/module_utils/facts/hardware/linux.py
|
23
|
33933
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import errno
import glob
import json
import os
import re
import sys
import time
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
from ansible.module_utils._text import to_text
from ansible.module_utils.six import iteritems
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.text.formatters import bytes_to_human
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size
# import this as a module to ensure we get the same module instance
from ansible.module_utils.facts import timeout
def get_partition_uuid(partname):
try:
uuids = os.listdir("/dev/disk/by-uuid")
except OSError:
return
for uuid in uuids:
dev = os.path.realpath("/dev/disk/by-uuid/" + uuid)
if dev == ("/dev/" + partname):
return uuid
return None
class LinuxHardware(Hardware):
"""
Linux-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'Linux'
# Originally only had these four as toplevelfacts
ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
# Now we have all of these in a dict structure
MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
# regex used against findmnt output to detect bind mounts
BIND_MOUNT_RE = re.compile(r'.*\]')
# regex used against mtab content to find entries that are bind mounts
MTAB_BIND_MOUNT_RE = re.compile(r'.*bind.*"')
# regex used for replacing octal escape sequences
OCTAL_ESCAPE_RE = re.compile(r'\\[0-9]{3}')
def populate(self, collected_facts=None):
hardware_facts = {}
self.module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_NUMERIC': 'C'}
cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
memory_facts = self.get_memory_facts()
dmi_facts = self.get_dmi_facts()
device_facts = self.get_device_facts()
uptime_facts = self.get_uptime_facts()
lvm_facts = self.get_lvm_facts()
mount_facts = {}
try:
mount_facts = self.get_mount_facts()
except timeout.TimeoutError:
pass
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(device_facts)
hardware_facts.update(uptime_facts)
hardware_facts.update(lvm_facts)
hardware_facts.update(mount_facts)
return hardware_facts
def get_memory_facts(self):
memory_facts = {}
if not os.access("/proc/meminfo", os.R_OK):
return memory_facts
memstats = {}
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in self.ORIGINAL_MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memory_facts["%s_mb" % key.lower()] = int(val) // 1024
if key in self.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memstats[key.lower()] = int(val) // 1024
if None not in (memstats.get('memtotal'), memstats.get('memfree')):
memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
memory_facts['memory_mb'] = {
'real': {
'total': memstats.get('memtotal'),
'used': memstats.get('real:used'),
'free': memstats.get('memfree'),
},
'nocache': {
'free': memstats.get('nocache:free'),
'used': memstats.get('nocache:used'),
},
'swap': {
'total': memstats.get('swaptotal'),
'free': memstats.get('swapfree'),
'used': memstats.get('swap:used'),
'cached': memstats.get('swapcached'),
},
}
return memory_facts
def get_cpu_facts(self, collected_facts=None):
cpu_facts = {}
collected_facts = collected_facts or {}
i = 0
vendor_id_occurrence = 0
model_name_occurrence = 0
processor_occurence = 0
physid = 0
coreid = 0
sockets = {}
cores = {}
xen = False
xen_paravirt = False
try:
if os.path.exists('/proc/xen'):
xen = True
else:
for line in get_file_lines('/sys/hypervisor/type'):
if line.strip() == 'xen':
xen = True
# Only interested in the first line
break
except IOError:
pass
if not os.access("/proc/cpuinfo", os.R_OK):
return cpu_facts
cpu_facts['processor'] = []
for line in get_file_lines('/proc/cpuinfo'):
data = line.split(":", 1)
key = data[0].strip()
try:
val = data[1].strip()
except IndexError:
val = ""
if xen:
if key == 'flags':
# Check for vme cpu flag, Xen paravirt does not expose this.
# Need to detect Xen paravirt because it exposes cpuinfo
# differently than Xen HVM or KVM and causes reporting of
# only a single cpu core.
if 'vme' not in val:
xen_paravirt = True
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
# 'ncpus active' is SPARC attribute
if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor', 'processor']:
if 'processor' not in cpu_facts:
cpu_facts['processor'] = []
cpu_facts['processor'].append(val)
if key == 'vendor_id':
vendor_id_occurrence += 1
if key == 'model name':
model_name_occurrence += 1
if key == 'processor':
processor_occurence += 1
i += 1
elif key == 'physical id':
physid = val
if physid not in sockets:
sockets[physid] = 1
elif key == 'core id':
coreid = val
if coreid not in sockets:
cores[coreid] = 1
elif key == 'cpu cores':
sockets[physid] = int(val)
elif key == 'siblings':
cores[coreid] = int(val)
elif key == '# processors':
cpu_facts['processor_cores'] = int(val)
elif key == 'ncpus active':
i = int(val)
# Skip for platforms without vendor_id/model_name in cpuinfo (e.g ppc64le)
if vendor_id_occurrence > 0:
if vendor_id_occurrence == model_name_occurrence:
i = vendor_id_occurrence
# The fields for ARM CPUs do not always include 'vendor_id' or 'model name',
# and sometimes includes both 'processor' and 'Processor'.
# The fields for Power CPUs include 'processor' and 'cpu'.
# Always use 'processor' count for ARM and Power systems
if collected_facts.get('ansible_architecture', '').startswith(('armv', 'aarch', 'ppc')):
i = processor_occurence
# FIXME
if collected_facts.get('ansible_architecture') != 's390x':
if xen_paravirt:
cpu_facts['processor_count'] = i
cpu_facts['processor_cores'] = i
cpu_facts['processor_threads_per_core'] = 1
cpu_facts['processor_vcpus'] = i
else:
if sockets:
cpu_facts['processor_count'] = len(sockets)
else:
cpu_facts['processor_count'] = i
socket_values = list(sockets.values())
if socket_values and socket_values[0]:
cpu_facts['processor_cores'] = socket_values[0]
else:
cpu_facts['processor_cores'] = 1
core_values = list(cores.values())
if core_values:
cpu_facts['processor_threads_per_core'] = core_values[0] // cpu_facts['processor_cores']
else:
cpu_facts['processor_threads_per_core'] = 1 // cpu_facts['processor_cores']
cpu_facts['processor_vcpus'] = (cpu_facts['processor_threads_per_core'] *
cpu_facts['processor_count'] * cpu_facts['processor_cores'])
# if the number of processors available to the module's
# thread cannot be determined, the processor count
# reported by /proc will be the default:
cpu_facts['processor_nproc'] = processor_occurence
try:
cpu_facts['processor_nproc'] = len(
os.sched_getaffinity(0)
)
except AttributeError:
# In Python < 3.3, os.sched_getaffinity() is not available
try:
cmd = get_bin_path('nproc')
except ValueError:
pass
else:
rc, out, _err = self.module.run_command(cmd)
if rc == 0:
cpu_facts['processor_nproc'] = int(out)
return cpu_facts
def get_dmi_facts(self):
''' learn dmi facts from system
Try /sys first for dmi related facts.
If that is not available, fall back to dmidecode executable '''
dmi_facts = {}
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
# Use kernel DMI info, if available
# DMI SPEC -- https://www.dmtf.org/sites/default/files/standards/documents/DSP0134_3.2.0.pdf
FORM_FACTOR = ["Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA", "Blade", "Blade Enclosure",
"Tablet", "Convertible", "Detachable", "IoT Gateway",
"Embedded PC", "Mini PC", "Stick PC"]
DMI_DICT = {
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
'bios_vendor': '/sys/devices/virtual/dmi/id/bios_vendor',
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
'board_asset_tag': '/sys/devices/virtual/dmi/id/board_asset_tag',
'board_name': '/sys/devices/virtual/dmi/id/board_name',
'board_serial': '/sys/devices/virtual/dmi/id/board_serial',
'board_vendor': '/sys/devices/virtual/dmi/id/board_vendor',
'board_version': '/sys/devices/virtual/dmi/id/board_version',
'chassis_asset_tag': '/sys/devices/virtual/dmi/id/chassis_asset_tag',
'chassis_serial': '/sys/devices/virtual/dmi/id/chassis_serial',
'chassis_vendor': '/sys/devices/virtual/dmi/id/chassis_vendor',
'chassis_version': '/sys/devices/virtual/dmi/id/chassis_version',
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor',
}
for (key, path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
try:
dmi_facts['form_factor'] = FORM_FACTOR[int(data)]
except IndexError:
dmi_facts['form_factor'] = 'unknown (%s)' % data
else:
dmi_facts[key] = data
else:
dmi_facts[key] = 'NA'
else:
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_vendor': 'bios-vendor',
'bios_version': 'bios-version',
'board_asset_tag': 'baseboard-asset-tag',
'board_name': 'baseboard-product-name',
'board_serial': 'baseboard-serial-number',
'board_vendor': 'baseboard-manufacturer',
'board_version': 'baseboard-version',
'chassis_asset_tag': 'chassis-asset-tag',
'chassis_serial': 'chassis-serial-number',
'chassis_vendor': 'chassis-manufacturer',
'chassis_version': 'chassis-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer',
}
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')])
try:
json.dumps(thisvalue)
except UnicodeDecodeError:
thisvalue = "NA"
dmi_facts[k] = thisvalue
else:
dmi_facts[k] = 'NA'
else:
dmi_facts[k] = 'NA'
return dmi_facts
def _run_lsblk(self, lsblk_path):
# call lsblk and collect all uuids
# --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts
# this uses the linux major device number
# for details see https://www.kernel.org/doc/Documentation/devices.txt
args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID', '--exclude', '2']
cmd = [lsblk_path] + args
rc, out, err = self.module.run_command(cmd)
return rc, out, err
def _lsblk_uuid(self):
uuids = {}
lsblk_path = self.module.get_bin_path("lsblk")
if not lsblk_path:
return uuids
rc, out, err = self._run_lsblk(lsblk_path)
if rc != 0:
return uuids
# each line will be in format:
# <devicename><some whitespace><uuid>
# /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
for lsblk_line in out.splitlines():
if not lsblk_line:
continue
line = lsblk_line.strip()
fields = line.rsplit(None, 1)
if len(fields) < 2:
continue
device_name, uuid = fields[0].strip(), fields[1].strip()
if device_name in uuids:
continue
uuids[device_name] = uuid
return uuids
def _udevadm_uuid(self, device):
# fallback for versions of lsblk <= 2.23 that don't have --paths, see _run_lsblk() above
uuid = 'N/A'
udevadm_path = self.module.get_bin_path('udevadm')
if not udevadm_path:
return uuid
cmd = [udevadm_path, 'info', '--query', 'property', '--name', device]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
return uuid
# a snippet of the output of the udevadm command below will be:
# ...
# ID_FS_TYPE=ext4
# ID_FS_USAGE=filesystem
# ID_FS_UUID=57b1a3e7-9019-4747-9809-7ec52bba9179
# ...
m = re.search('ID_FS_UUID=(.*)\n', out)
if m:
uuid = m.group(1)
return uuid
def _run_findmnt(self, findmnt_path):
args = ['--list', '--noheadings', '--notruncate']
cmd = [findmnt_path] + args
rc, out, err = self.module.run_command(cmd, errors='surrogate_then_replace')
return rc, out, err
def _find_bind_mounts(self):
bind_mounts = set()
findmnt_path = self.module.get_bin_path("findmnt")
if not findmnt_path:
return bind_mounts
rc, out, err = self._run_findmnt(findmnt_path)
if rc != 0:
return bind_mounts
# find bind mounts, in case /etc/mtab is a symlink to /proc/mounts
for line in out.splitlines():
fields = line.split()
# fields[0] is the TARGET, fields[1] is the SOURCE
if len(fields) < 2:
continue
# bind mounts will have a [/directory_name] in the SOURCE column
if self.BIND_MOUNT_RE.match(fields[1]):
bind_mounts.add(fields[0])
return bind_mounts
def _mtab_entries(self):
mtab_file = '/etc/mtab'
if not os.path.exists(mtab_file):
mtab_file = '/proc/mounts'
mtab = get_file_content(mtab_file, '')
mtab_entries = []
for line in mtab.splitlines():
fields = line.split()
if len(fields) < 4:
continue
mtab_entries.append(fields)
return mtab_entries
@staticmethod
def _replace_octal_escapes_helper(match):
# Convert to integer using base8 and then convert to character
return chr(int(match.group()[1:], 8))
def _replace_octal_escapes(self, value):
return self.OCTAL_ESCAPE_RE.sub(self._replace_octal_escapes_helper, value)
def get_mount_info(self, mount, device, uuids):
mount_size = get_mount_size(mount)
# _udevadm_uuid is a fallback for versions of lsblk <= 2.23 that don't have --paths
# see _run_lsblk() above
# https://github.com/ansible/ansible/issues/36077
uuid = uuids.get(device, self._udevadm_uuid(device))
return mount_size, uuid
def get_mount_facts(self):
mounts = []
# gather system lists
bind_mounts = self._find_bind_mounts()
uuids = self._lsblk_uuid()
mtab_entries = self._mtab_entries()
# start threads to query each mount
results = {}
pool = ThreadPool(processes=min(len(mtab_entries), cpu_count()))
maxtime = globals().get('GATHER_TIMEOUT') or timeout.DEFAULT_GATHER_TIMEOUT
for fields in mtab_entries:
# Transform octal escape sequences
fields = [self._replace_octal_escapes(field) for field in fields]
device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3]
if not device.startswith(('/', '\\')) and ':/' not in device or fstype == 'none':
continue
mount_info = {'mount': mount,
'device': device,
'fstype': fstype,
'options': options}
if mount in bind_mounts:
# only add if not already there, we might have a plain /etc/mtab
if not self.MTAB_BIND_MOUNT_RE.match(options):
mount_info['options'] += ",bind"
results[mount] = {'info': mount_info,
'extra': pool.apply_async(self.get_mount_info, (mount, device, uuids)),
'timelimit': time.time() + maxtime}
pool.close() # done with new workers, start gc
# wait for workers and get results
while results:
for mount in results:
res = results[mount]['extra']
if res.ready():
if res.successful():
mount_size, uuid = res.get()
if mount_size:
results[mount]['info'].update(mount_size)
results[mount]['info']['uuid'] = uuid or 'N/A'
else:
# give incomplete data
errmsg = to_text(res.get())
self.module.warn("Error prevented getting extra info for mount %s: %s." % (mount, errmsg))
results[mount]['info']['note'] = 'Could not get extra information: %s.' % (errmsg)
mounts.append(results[mount]['info'])
del results[mount]
break
elif time.time() > results[mount]['timelimit']:
results[mount]['info']['note'] = 'Timed out while attempting to get extra information.'
mounts.append(results[mount]['info'])
del results[mount]
break
else:
# avoid cpu churn
time.sleep(0.1)
return {'mounts': mounts}
def get_device_links(self, link_dir):
if not os.path.exists(link_dir):
return {}
try:
retval = collections.defaultdict(set)
for entry in os.listdir(link_dir):
try:
target = os.path.basename(os.readlink(os.path.join(link_dir, entry)))
retval[target].add(entry)
except OSError:
continue
return dict((k, list(sorted(v))) for (k, v) in iteritems(retval))
except OSError:
return {}
def get_all_device_owners(self):
try:
retval = collections.defaultdict(set)
for path in glob.glob('/sys/block/*/slaves/*'):
elements = path.split('/')
device = elements[3]
target = elements[5]
retval[target].add(device)
return dict((k, list(sorted(v))) for (k, v) in iteritems(retval))
except OSError:
return {}
def get_all_device_links(self):
return {
'ids': self.get_device_links('/dev/disk/by-id'),
'uuids': self.get_device_links('/dev/disk/by-uuid'),
'labels': self.get_device_links('/dev/disk/by-label'),
'masters': self.get_all_device_owners(),
}
def get_holders(self, block_dev_dict, sysdir):
block_dev_dict['holders'] = []
if os.path.isdir(sysdir + "/holders"):
for folder in os.listdir(sysdir + "/holders"):
if not folder.startswith("dm-"):
continue
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
if name:
block_dev_dict['holders'].append(name)
else:
block_dev_dict['holders'].append(folder)
def get_device_facts(self):
device_facts = {}
device_facts['devices'] = {}
lspci = self.module.get_bin_path('lspci')
if lspci:
rc, pcidata, err = self.module.run_command([lspci, '-D'], errors='surrogate_then_replace')
else:
pcidata = None
try:
block_devs = os.listdir("/sys/block")
except OSError:
return device_facts
devs_wwn = {}
try:
devs_by_id = os.listdir("/dev/disk/by-id")
except OSError:
pass
else:
for link_name in devs_by_id:
if link_name.startswith("wwn-"):
try:
wwn_link = os.readlink(os.path.join("/dev/disk/by-id", link_name))
except OSError:
continue
devs_wwn[os.path.basename(wwn_link)] = link_name[4:]
links = self.get_all_device_links()
device_facts['device_links'] = links
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1
else:
continue
sysdir = os.path.join("/sys/block", path)
if sysfs_no_links == 1:
for folder in os.listdir(sysdir):
if "device" in folder:
virtual = 0
break
d = {}
d['virtual'] = virtual
d['links'] = {}
for (link_type, link_values) in iteritems(links):
d['links'][link_type] = link_values.get(block, [])
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']:
d[key] = get_file_content(sysdir + "/device/" + key)
sg_inq = self.module.get_bin_path('sg_inq')
# we can get NVMe device's serial number from /sys/block/<name>/device/serial
serial_path = "/sys/block/%s/device/serial" % (block)
if sg_inq:
device = "/dev/%s" % (block)
rc, drivedata, err = self.module.run_command([sg_inq, device])
if rc == 0:
serial = re.search(r"Unit serial number:\s+(\w+)", drivedata)
if serial:
d['serial'] = serial.group(1)
else:
serial = get_file_content(serial_path)
if serial:
d['serial'] = serial
for key, test in [('removable', '/removable'),
('support_discard', '/queue/discard_granularity'),
]:
d[key] = get_file_content(sysdir + test)
if diskname in devs_wwn:
d['wwn'] = devs_wwn[diskname]
d['partitions'] = {}
for folder in os.listdir(sysdir):
m = re.search("(" + diskname + r"[p]?\d+)", folder)
if m:
part = {}
partname = m.group(1)
part_sysdir = sysdir + "/" + partname
part['links'] = {}
for (link_type, link_values) in iteritems(links):
part['links'][link_type] = link_values.get(partname, [])
part['start'] = get_file_content(part_sysdir + "/start", 0)
part['sectors'] = get_file_content(part_sysdir + "/size", 0)
part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size", 512)
part['size'] = bytes_to_human((float(part['sectors']) * 512.0))
part['uuid'] = get_partition_uuid(partname)
self.get_holders(part, part_sysdir)
d['partitions'][partname] = part
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
d['scheduler_mode'] = ""
scheduler = get_file_content(sysdir + "/queue/scheduler")
if scheduler is not None:
m = re.match(r".*?(\[(.*)\])", scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size", 512)
d['size'] = bytes_to_human(float(d['sectors']) * 512.0)
d['host'] = ""
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
m = re.match(r".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
if m and pcidata:
pciid = m.group(1)
did = re.escape(pciid)
m = re.search("^" + did + r"\s(.*)$", pcidata, re.MULTILINE)
if m:
d['host'] = m.group(1)
self.get_holders(d, sysdir)
device_facts['devices'][diskname] = d
return device_facts
def get_uptime_facts(self):
uptime_facts = {}
uptime_file_content = get_file_content('/proc/uptime')
if uptime_file_content:
uptime_seconds_string = uptime_file_content.split(' ')[0]
uptime_facts['uptime_seconds'] = int(float(uptime_seconds_string))
return uptime_facts
def _find_mapper_device_name(self, dm_device):
dm_prefix = '/dev/dm-'
mapper_device = dm_device
if dm_device.startswith(dm_prefix):
dmsetup_cmd = self.module.get_bin_path('dmsetup', True)
mapper_prefix = '/dev/mapper/'
rc, dm_name, err = self.module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
if rc == 0:
mapper_device = mapper_prefix + dm_name.rstrip()
return mapper_device
def get_lvm_facts(self):
""" Get LVM Facts if running as root and lvm utils are available """
lvm_facts = {}
if os.getuid() == 0 and self.module.get_bin_path('vgs'):
lvm_util_options = '--noheadings --nosuffix --units g --separator ,'
vgs_path = self.module.get_bin_path('vgs')
# vgs fields: VG #PV #LV #SN Attr VSize VFree
vgs = {}
if vgs_path:
rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_path, lvm_util_options))
for vg_line in vg_lines.splitlines():
items = vg_line.strip().split(',')
vgs[items[0]] = {'size_g': items[-2],
'free_g': items[-1],
'num_lvs': items[2],
'num_pvs': items[1]}
lvs_path = self.module.get_bin_path('lvs')
# lvs fields:
# LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
lvs = {}
if lvs_path:
rc, lv_lines, err = self.module.run_command('%s %s' % (lvs_path, lvm_util_options))
for lv_line in lv_lines.splitlines():
items = lv_line.strip().split(',')
lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}
pvs_path = self.module.get_bin_path('pvs')
# pvs fields: PV VG #Fmt #Attr PSize PFree
pvs = {}
if pvs_path:
rc, pv_lines, err = self.module.run_command('%s %s' % (pvs_path, lvm_util_options))
for pv_line in pv_lines.splitlines():
items = pv_line.strip().split(',')
pvs[self._find_mapper_device_name(items[0])] = {
'size_g': items[4],
'free_g': items[5],
'vg': items[1]}
lvm_facts['lvm'] = {'lvs': lvs, 'vgs': vgs, 'pvs': pvs}
return lvm_facts
class LinuxHardwareCollector(HardwareCollector):
_platform = 'Linux'
_fact_class = LinuxHardware
required_facts = set(['platform'])
|
gpl-3.0
|
polimediaupv/edx-platform
|
common/test/acceptance/tests/studio/test_studio_library.py
|
11
|
27414
|
"""
Acceptance tests for Content Libraries in Studio
"""
from ddt import ddt, data
from nose.plugins.attrib import attr
from flaky import flaky
from .base_studio_test import StudioLibraryTest
from ...fixtures.course import XBlockFixtureDesc
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.utils import add_component
from ...pages.studio.library import LibraryEditPage
from ...pages.studio.users import LibraryUsersPage
@attr('shard_4')
@ddt
class LibraryEditPageTest(StudioLibraryTest):
"""
Test the functionality of the library edit page.
"""
def setUp(self): # pylint: disable=arguments-differ
"""
Ensure a library exists and navigate to the library edit page.
"""
super(LibraryEditPageTest, self).setUp()
self.lib_page = LibraryEditPage(self.browser, self.library_key)
self.lib_page.visit()
self.lib_page.wait_until_ready()
def test_page_header(self):
"""
Scenario: Ensure that the library's name is displayed in the header and title.
Given I have a library in Studio
And I navigate to Library Page in Studio
Then I can see library name in page header title
And I can see library name in browser page title
"""
self.assertIn(self.library_info['display_name'], self.lib_page.get_header_title())
self.assertIn(self.library_info['display_name'], self.browser.title)
def test_add_duplicate_delete_actions(self):
"""
Scenario: Ensure that we can add an HTML block, duplicate it, then delete the original.
Given I have a library in Studio with no XBlocks
And I navigate to Library Page in Studio
Then there are no XBlocks displayed
When I add Text XBlock
Then one XBlock is displayed
When I duplicate first XBlock
Then two XBlocks are displayed
And those XBlocks locators' are different
When I delete first XBlock
Then one XBlock is displayed
And displayed XBlock are second one
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
# Create a new block:
add_component(self.lib_page, "html", "Text")
self.assertEqual(len(self.lib_page.xblocks), 1)
first_block_id = self.lib_page.xblocks[0].locator
# Duplicate the block:
self.lib_page.click_duplicate_button(first_block_id)
self.assertEqual(len(self.lib_page.xblocks), 2)
second_block_id = self.lib_page.xblocks[1].locator
self.assertNotEqual(first_block_id, second_block_id)
# Delete the first block:
self.lib_page.click_delete_button(first_block_id, confirm=True)
self.assertEqual(len(self.lib_page.xblocks), 1)
self.assertEqual(self.lib_page.xblocks[0].locator, second_block_id)
def test_no_edit_visibility_button(self):
"""
Scenario: Ensure that library xblocks do not have 'edit visibility' buttons.
Given I have a library in Studio with no XBlocks
And I navigate to Library Page in Studio
When I add Text XBlock
Then one XBlock is displayed
And no 'edit visibility' button is shown
"""
add_component(self.lib_page, "html", "Text")
self.assertFalse(self.lib_page.xblocks[0].has_edit_visibility_button)
def test_add_edit_xblock(self):
"""
Scenario: Ensure that we can add an XBlock, edit it, then see the resulting changes.
Given I have a library in Studio with no XBlocks
And I navigate to Library Page in Studio
Then there are no XBlocks displayed
When I add Multiple Choice XBlock
Then one XBlock is displayed
When I edit first XBlock
And I go to basic tab
And set it's text to a fairly trivial question about Battlestar Galactica
And save XBlock
Then one XBlock is displayed
And first XBlock student content contains at least part of text I set
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
# Create a new problem block:
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertEqual(len(self.lib_page.xblocks), 1)
problem_block = self.lib_page.xblocks[0]
# Edit it:
problem_block.edit()
problem_block.open_basic_tab()
problem_block.set_codemirror_text(
"""
>>Who is "Starbuck"?<<
(x) Kara Thrace
( ) William Adama
( ) Laura Roslin
( ) Lee Adama
( ) Gaius Baltar
"""
)
problem_block.save_settings()
# Check that the save worked:
self.assertEqual(len(self.lib_page.xblocks), 1)
problem_block = self.lib_page.xblocks[0]
self.assertIn("Laura Roslin", problem_block.student_content)
def test_no_discussion_button(self):
"""
Ensure the UI is not loaded for adding discussions.
"""
self.assertFalse(self.browser.find_elements_by_css_selector('span.large-discussion-icon'))
@flaky # TODO fix this, see TNL-2322
def test_library_pagination(self):
"""
Scenario: Ensure that adding several XBlocks to a library results in pagination.
Given that I have a library in Studio with no XBlocks
And I create 10 Multiple Choice XBlocks
Then 10 are displayed.
When I add one more Multiple Choice XBlock
Then 1 XBlock will be displayed
When I delete that XBlock
Then 10 are displayed.
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
for _ in range(10):
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertEqual(len(self.lib_page.xblocks), 10)
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertEqual(len(self.lib_page.xblocks), 1)
self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator)
self.assertEqual(len(self.lib_page.xblocks), 10)
@data('top', 'bottom')
def test_nav_present_but_disabled(self, position):
"""
Scenario: Ensure that the navigation buttons aren't active when there aren't enough XBlocks.
Given that I have a library in Studio with no XBlocks
The Navigation buttons should be disabled.
When I add a multiple choice problem
The Navigation buttons should be disabled.
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
self.assertTrue(self.lib_page.nav_disabled(position))
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertTrue(self.lib_page.nav_disabled(position))
def test_delete_deletes_only_desired_block(self):
"""
Scenario: Ensure that when deleting XBlock only desired XBlock is deleted
Given that I have a library in Studio with no XBlocks
And I create Blank Common Problem XBlock
And I create Checkboxes XBlock
When I delete Blank Problem XBlock
Then Checkboxes XBlock is not deleted
And Blank Common Problem XBlock is deleted
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
add_component(self.lib_page, "problem", "Blank Common Problem")
add_component(self.lib_page, "problem", "Checkboxes")
self.assertEqual(len(self.lib_page.xblocks), 2)
self.assertIn("Blank Common Problem", self.lib_page.xblocks[0].name)
self.assertIn("Checkboxes", self.lib_page.xblocks[1].name)
self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator)
self.assertEqual(len(self.lib_page.xblocks), 1)
problem_block = self.lib_page.xblocks[0]
self.assertIn("Checkboxes", problem_block.name)
@attr('shard_4')
@ddt
class LibraryNavigationTest(StudioLibraryTest):
"""
Test common Navigation actions
"""
def setUp(self): # pylint: disable=arguments-differ
"""
Ensure a library exists and navigate to the library edit page.
"""
super(LibraryNavigationTest, self).setUp()
self.lib_page = LibraryEditPage(self.browser, self.library_key)
self.lib_page.visit()
self.lib_page.wait_until_ready()
def populate_library_fixture(self, library_fixture):
"""
Create four pages worth of XBlocks, and offset by one so each is named
after the number they should be in line by the user's perception.
"""
# pylint: disable=attribute-defined-outside-init
self.blocks = [XBlockFixtureDesc('html', str(i)) for i in xrange(1, 41)]
library_fixture.add_children(*self.blocks)
def test_arbitrary_page_selection(self):
"""
Scenario: I can pick a specific page number of a Library at will.
Given that I have a library in Studio with 40 XBlocks
When I go to the 3rd page
The first XBlock should be the 21st XBlock
When I go to the 4th Page
The first XBlock should be the 31st XBlock
When I go to the 1st page
The first XBlock should be the 1st XBlock
When I go to the 2nd page
The first XBlock should be the 11th XBlock
"""
self.lib_page.go_to_page(3)
self.assertEqual(self.lib_page.xblocks[0].name, '21')
self.lib_page.go_to_page(4)
self.assertEqual(self.lib_page.xblocks[0].name, '31')
self.lib_page.go_to_page(1)
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.lib_page.go_to_page(2)
self.assertEqual(self.lib_page.xblocks[0].name, '11')
def test_bogus_page_selection(self):
"""
Scenario: I can't pick a nonsense page number of a Library
Given that I have a library in Studio with 40 XBlocks
When I attempt to go to the 'a'th page
The input field will be cleared and no change of XBlocks will be made
When I attempt to visit the 5th page
The input field will be cleared and no change of XBlocks will be made
When I attempt to visit the -1st page
The input field will be cleared and no change of XBlocks will be made
When I attempt to visit the 0th page
The input field will be cleared and no change of XBlocks will be made
"""
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.lib_page.go_to_page('a')
self.assertTrue(self.lib_page.check_page_unchanged('1'))
self.lib_page.go_to_page(-1)
self.assertTrue(self.lib_page.check_page_unchanged('1'))
self.lib_page.go_to_page(5)
self.assertTrue(self.lib_page.check_page_unchanged('1'))
self.lib_page.go_to_page(0)
self.assertTrue(self.lib_page.check_page_unchanged('1'))
@data('top', 'bottom')
def test_nav_buttons(self, position):
"""
Scenario: Ensure that the navigation buttons work.
Given that I have a library in Studio with 40 XBlocks
The previous button should be disabled.
The first XBlock should be the 1st XBlock
Then if I hit the next button
The first XBlock should be the 11th XBlock
Then if I hit the next button
The first XBlock should be the 21st XBlock
Then if I hit the next button
The first XBlock should be the 31st XBlock
And the next button should be disabled
Then if I hit the previous button
The first XBlock should be the 21st XBlock
Then if I hit the previous button
The first XBlock should be the 11th XBlock
Then if I hit the previous button
The first XBlock should be the 1st XBlock
And the previous button should be disabled
"""
# Check forward navigation
self.assertTrue(self.lib_page.nav_disabled(position, ['previous']))
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.lib_page.move_forward(position)
self.assertEqual(self.lib_page.xblocks[0].name, '11')
self.lib_page.move_forward(position)
self.assertEqual(self.lib_page.xblocks[0].name, '21')
self.lib_page.move_forward(position)
self.assertEqual(self.lib_page.xblocks[0].name, '31')
self.lib_page.nav_disabled(position, ['next'])
# Check backward navigation
self.lib_page.move_back(position)
self.assertEqual(self.lib_page.xblocks[0].name, '21')
self.lib_page.move_back(position)
self.assertEqual(self.lib_page.xblocks[0].name, '11')
self.lib_page.move_back(position)
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.assertTrue(self.lib_page.nav_disabled(position, ['previous']))
def test_library_pagination(self):
"""
Scenario: Ensure that adding several XBlocks to a library results in pagination.
Given that I have a library in Studio with 40 XBlocks
Then 10 are displayed
And the first XBlock will be the 1st one
And I'm on the 1st page
When I add 1 Multiple Choice XBlock
Then 1 XBlock will be displayed
And I'm on the 5th page
The first XBlock will be the newest one
When I delete that XBlock
Then 10 are displayed
And I'm on the 4th page
And the first XBlock is the 31st one
And the last XBlock is the 40th one.
"""
self.assertEqual(len(self.lib_page.xblocks), 10)
self.assertEqual(self.lib_page.get_page_number(), '1')
self.assertEqual(self.lib_page.xblocks[0].name, '1')
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertEqual(len(self.lib_page.xblocks), 1)
self.assertEqual(self.lib_page.get_page_number(), '5')
self.assertEqual(self.lib_page.xblocks[0].name, "Multiple Choice")
self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator)
self.assertEqual(len(self.lib_page.xblocks), 10)
self.assertEqual(self.lib_page.get_page_number(), '4')
self.assertEqual(self.lib_page.xblocks[0].name, '31')
self.assertEqual(self.lib_page.xblocks[-1].name, '40')
def test_delete_shifts_blocks(self):
"""
Scenario: Ensure that removing an XBlock shifts other blocks back.
Given that I have a library in Studio with 40 XBlocks
Then 10 are displayed
And I will be on the first page
When I delete the third XBlock
There will be 10 displayed
And the first XBlock will be the first one
And the last XBlock will be the 11th one
And I will be on the first page
"""
self.assertEqual(len(self.lib_page.xblocks), 10)
self.assertEqual(self.lib_page.get_page_number(), '1')
self.lib_page.click_delete_button(self.lib_page.xblocks[2].locator, confirm=True)
self.assertEqual(len(self.lib_page.xblocks), 10)
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.assertEqual(self.lib_page.xblocks[-1].name, '11')
self.assertEqual(self.lib_page.get_page_number(), '1')
def test_previews(self):
"""
Scenario: Ensure the user is able to hide previews of XBlocks.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
Then the previews will not be visible
And when I click the toggle previews button
Then the previews are visible
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.assertFalse(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.assertTrue(self.lib_page.are_previews_showing())
def test_previews_navigation(self):
"""
Scenario: Ensure preview settings persist across navigation.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
And click the next page button
Then the previews will not be visible
And the first XBlock will be the 11th one
And the last XBlock will be the 20th one
And when I click the toggle previews button
And I click the previous page button
Then the previews will be visible
And the first XBlock will be the first one
And the last XBlock will be the 11th one
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
# Which set of arrows shouldn't matter for this test.
self.lib_page.move_forward('top')
self.assertFalse(self.lib_page.are_previews_showing())
self.assertEqual(self.lib_page.xblocks[0].name, '11')
self.assertEqual(self.lib_page.xblocks[-1].name, '20')
self.lib_page.toggle_previews()
self.lib_page.move_back('top')
self.assertTrue(self.lib_page.are_previews_showing())
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.assertEqual(self.lib_page.xblocks[-1].name, '10')
def test_preview_state_persistance(self):
"""
Scenario: Ensure preview state persists between page loads.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
And I revisit the page
Then the previews will not be visible
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.lib_page.visit()
self.lib_page.wait_until_ready()
self.assertFalse(self.lib_page.are_previews_showing())
def test_preview_add_xblock(self):
"""
Scenario: Ensure previews are shown when adding new blocks, regardless of preview setting.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
Then the previews will not be visible
And when I add an XBlock
Then I will be on the 5th page
And the XBlock will have loaded a preview
And when I revisit the library
And I go to the 5th page
Then the top XBlock will be the one I added
And it will not have a preview
And when I add an XBlock
Then the XBlock I added will have a preview
And the top XBlock will not have one.
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.assertFalse(self.lib_page.are_previews_showing())
add_component(self.lib_page, "problem", "Checkboxes")
self.assertEqual(self.lib_page.get_page_number(), '5')
first_added = self.lib_page.xblocks[0]
self.assertIn("Checkboxes", first_added.name)
self.assertFalse(self.lib_page.xblocks[0].is_placeholder())
self.lib_page.visit()
self.lib_page.wait_until_ready()
self.lib_page.go_to_page(5)
self.assertTrue(self.lib_page.xblocks[0].is_placeholder())
add_component(self.lib_page, "problem", "Multiple Choice")
# DOM has detatched the element since last assignment
first_added = self.lib_page.xblocks[0]
second_added = self.lib_page.xblocks[1]
self.assertIn("Multiple Choice", second_added.name)
self.assertFalse(second_added.is_placeholder())
self.assertTrue(first_added.is_placeholder())
def test_edit_with_preview(self):
"""
Scenario: Editing an XBlock should show me a preview even if previews are hidden.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
Then the previews will not be visible
And when I edit the first XBlock
Then the first XBlock will show a preview
And the other XBlocks will still be placeholders
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.assertFalse(self.lib_page.are_previews_showing())
target = self.lib_page.xblocks[0]
target.edit()
target.save_settings()
self.assertFalse(target.is_placeholder())
self.assertTrue(all([xblock.is_placeholder() for xblock in self.lib_page.xblocks[1:]]))
def test_duplicate_xblock_pagination(self):
"""
Scenario: Duplicating an XBlock should not shift the page if the XBlock is not at the end.
Given that I have a library in Studio with 40 XBlocks
When I duplicate the third XBlock
Then the page should not change
And the duplicate XBlock should be there
And it should show a preview
And there should not be more than 10 XBlocks visible.
"""
third_block_id = self.lib_page.xblocks[2].locator
self.lib_page.click_duplicate_button(third_block_id)
self.lib_page.wait_until_ready()
target = self.lib_page.xblocks[3]
self.assertIn('Duplicate', target.name)
self.assertFalse(target.is_placeholder())
self.assertEqual(len(self.lib_page.xblocks), 10)
def test_duplicate_xblock_pagination_end(self):
"""
Scenario: Duplicating an XBlock if it's the last one should bring me to the next page with a preview.
Given that I have a library in Studio with 40 XBlocks
And when I hide previews
And I duplicate the last XBlock
The page should change to page 2
And the duplicate XBlock should be the first XBlock
And it should not be a placeholder
"""
self.lib_page.toggle_previews()
last_block_id = self.lib_page.xblocks[-1].locator
self.lib_page.click_duplicate_button(last_block_id)
self.lib_page.wait_until_ready()
self.assertEqual(self.lib_page.get_page_number(), '2')
target_block = self.lib_page.xblocks[0]
self.assertIn('Duplicate', target_block.name)
self.assertFalse(target_block.is_placeholder())
class LibraryUsersPageTest(StudioLibraryTest):
"""
Test the functionality of the library "Instructor Access" page.
"""
def setUp(self):
super(LibraryUsersPageTest, self).setUp()
# Create a second user for use in these tests:
AutoAuthPage(self.browser, username="second", email="[email protected]", no_login=True).visit()
self.page = LibraryUsersPage(self.browser, self.library_key)
self.page.visit()
def _refresh_page(self):
"""
Reload the page.
"""
self.page = LibraryUsersPage(self.browser, self.library_key)
self.page.visit()
self.page.wait_until_no_loading_indicator()
@flaky # TODO fix this; see TNL-2647
def test_user_management(self):
"""
Scenario: Ensure that we can edit the permissions of users.
Given I have a library in Studio where I am the only admin
assigned (which is the default for a newly-created library)
And I navigate to Library "Instructor Access" Page in Studio
Then there should be one user listed (myself), and I must
not be able to remove myself or my instructor privilege.
When I click Add Instructor
Then I see a form to complete
When I complete the form and submit it
Then I can see the new user is listed as a "User" of the library
When I click to Add Staff permissions to the new user
Then I can see the new user has staff permissions and that I am now
able to promote them to an Admin or remove their staff permissions.
When I click to Add Admin permissions to the new user
Then I can see the new user has admin permissions and that I can now
remove Admin permissions from either user.
"""
def check_is_only_admin(user):
"""
Ensure user is an admin user and cannot be removed.
(There must always be at least one admin user.)
"""
self.assertIn("admin", user.role_label.lower())
self.assertFalse(user.can_promote)
self.assertFalse(user.can_demote)
self.assertFalse(user.can_delete)
self.assertTrue(user.has_no_change_warning)
self.assertIn("Promote another member to Admin to remove your admin rights", user.no_change_warning_text)
self.assertEqual(len(self.page.users), 1)
user = self.page.users[0]
self.assertTrue(user.is_current_user)
check_is_only_admin(user)
# Add a new user:
self.assertTrue(self.page.has_add_button)
self.assertFalse(self.page.new_user_form_visible)
self.page.click_add_button()
self.assertTrue(self.page.new_user_form_visible)
self.page.set_new_user_email('[email protected]')
self.page.click_submit_new_user_form()
# Check the new user's listing:
def get_two_users():
"""
Expect two users to be listed, one being me, and another user.
Returns me, them
"""
users = self.page.users
self.assertEqual(len(users), 2)
self.assertEqual(len([u for u in users if u.is_current_user]), 1)
if users[0].is_current_user:
return users[0], users[1]
else:
return users[1], users[0]
self._refresh_page()
user_me, them = get_two_users()
check_is_only_admin(user_me)
self.assertIn("user", them.role_label.lower())
self.assertTrue(them.can_promote)
self.assertIn("Add Staff Access", them.promote_button_text)
self.assertFalse(them.can_demote)
self.assertTrue(them.can_delete)
self.assertFalse(them.has_no_change_warning)
# Add Staff permissions to the new user:
them.click_promote()
self._refresh_page()
user_me, them = get_two_users()
check_is_only_admin(user_me)
self.assertIn("staff", them.role_label.lower())
self.assertTrue(them.can_promote)
self.assertIn("Add Admin Access", them.promote_button_text)
self.assertTrue(them.can_demote)
self.assertIn("Remove Staff Access", them.demote_button_text)
self.assertTrue(them.can_delete)
self.assertFalse(them.has_no_change_warning)
# Add Admin permissions to the new user:
them.click_promote()
self._refresh_page()
user_me, them = get_two_users()
self.assertIn("admin", user_me.role_label.lower())
self.assertFalse(user_me.can_promote)
self.assertTrue(user_me.can_demote)
self.assertTrue(user_me.can_delete)
self.assertFalse(user_me.has_no_change_warning)
self.assertIn("admin", them.role_label.lower())
self.assertFalse(them.can_promote)
self.assertTrue(them.can_demote)
self.assertIn("Remove Admin Access", them.demote_button_text)
self.assertTrue(them.can_delete)
self.assertFalse(them.has_no_change_warning)
# Delete the new user:
them.click_delete()
self._refresh_page()
self.assertEqual(len(self.page.users), 1)
user = self.page.users[0]
self.assertTrue(user.is_current_user)
|
agpl-3.0
|
moijes12/oh-mainline
|
vendor/packages/python-social-auth/social/backends/evernote.py
|
61
|
2704
|
"""
Evernote OAuth1 backend (with sandbox mode support), docs at:
http://psa.matiasaguirre.net/docs/backends/evernote.html
"""
from requests import HTTPError
from social.exceptions import AuthCanceled
from social.backends.oauth import BaseOAuth1
class EvernoteOAuth(BaseOAuth1):
"""
Evernote OAuth authentication backend.
Possible Values:
{'edam_expires': ['1367525289541'],
'edam_noteStoreUrl': [
'https://sandbox.evernote.com/shard/s1/notestore'
],
'edam_shard': ['s1'],
'edam_userId': ['123841'],
'edam_webApiUrlPrefix': ['https://sandbox.evernote.com/shard/s1/'],
'oauth_token': [
'S=s1:U=1e3c1:E=13e66dbee45:C=1370f2ac245:P=185:A=my_user:' \
'H=411443c5e8b20f8718ed382a19d4ae38'
]}
"""
name = 'evernote'
ID_KEY = 'edam_userId'
AUTHORIZATION_URL = 'https://www.evernote.com/OAuth.action'
REQUEST_TOKEN_URL = 'https://www.evernote.com/oauth'
ACCESS_TOKEN_URL = 'https://www.evernote.com/oauth'
EXTRA_DATA = [
('access_token', 'access_token'),
('oauth_token', 'oauth_token'),
('edam_noteStoreUrl', 'store_url'),
('edam_expires', 'expires')
]
def get_user_details(self, response):
"""Return user details from Evernote account"""
return {'username': response['edam_userId'],
'email': ''}
def access_token(self, token):
"""Return request for access token value"""
try:
return self.get_querystring(self.ACCESS_TOKEN_URL,
auth=self.oauth_auth(token))
except HTTPError as err:
# Evernote returns a 401 error when AuthCanceled
if err.response.status_code == 401:
raise AuthCanceled(self)
else:
raise
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
data = super(EvernoteOAuth, self).extra_data(user, uid, response,
details, *args, **kwargs)
# Evernote returns expiration timestamp in miliseconds, so it needs to
# be normalized.
if 'expires' in data:
data['expires'] = int(data['expires']) / 1000
return data
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
return access_token.copy()
class EvernoteSandboxOAuth(EvernoteOAuth):
name = 'evernote-sandbox'
AUTHORIZATION_URL = 'https://sandbox.evernote.com/OAuth.action'
REQUEST_TOKEN_URL = 'https://sandbox.evernote.com/oauth'
ACCESS_TOKEN_URL = 'https://sandbox.evernote.com/oauth'
|
agpl-3.0
|
CARocha/sitioreddes
|
sitioreddes/urls.py
|
1
|
2187
|
from django.conf.urls import patterns, include, url
from settings import *
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from noticias.feeds import CategoriaFeed
from noticias.feeds import NoticiasFeed
urlpatterns = patterns('',
url(r'^$', 'noticias.views.index', name='index'),
url(r'^envivos/$', 'noticias.views.envivo_lista', name='envivo'),
url(r'^noticias/', include('noticias.urls')),
url(r'^publicaciones/', include('publicaciones.urls')),
url(r'^multimedias/', include('multimedia.urls')),
url(r'^eventos/', include('eventos.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^ckeditor/', include('ckeditor.urls')),
url(r'^search/', include('googlesearch.urls')),
url(r'^ver_mapa_completo_dos/$', 'noticias.views.mapa_completo_dos', name='mapa-completo-dos'),
url(r'^ver_mapa_completo/$', 'noticias.views.mapa_completo', name='mapa-completo'),
url(r'^mapas/$', 'noticias.views.mapa', name='mapas'),
url(r'^lista/socios/$', 'noticias.views.socios', name='lista-socios'),
url(r'^socios/(?P<id>\d+)/$', 'noticias.views.ficha_socio', name='ficha-socio'),
url(r'^socios_pais/(?P<id_pais>\d+)/$', 'noticias.views.socios_pais', name='socios_pais'),
url(r'^posicion_mapa/$', 'noticias.views.mapa_pais', name="posicion_mapa"),
url(r'^recursos/$', 'noticias.views.recursos', name='recursos'),
url('^pages/', include('django.contrib.flatpages.urls')),
url(r'^contactenos/$', 'noticias.views.contacto', name='contactenos'),
url(r'^contacto_ajax/$', 'noticias.views.contacto_ajax', name='contacto_ajax'),
url(r'^captcha/', include('captcha.urls')),
url(r'^feeds/$', NoticiasFeed(), name='noticias_feeds_noticias'),
url(r'^categories/(?P<slug>[-\w]+)/$', CategoriaFeed(), name='noticias_feeds_categoria'),
)
urlpatterns += staticfiles_urlpatterns()
if DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': MEDIA_ROOT}),
)
|
mit
|
Mozta/pagina-diagnostijuego
|
venv/lib/python2.7/site-packages/django/contrib/auth/password_validation.py
|
57
|
7538
|
from __future__ import unicode_literals
import gzip
import os
import re
from difflib import SequenceMatcher
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils.html import format_html
from django.utils.module_loading import import_string
from django.utils.six import string_types, text_type
from django.utils.translation import ugettext as _, ungettext
@lru_cache.lru_cache(maxsize=None)
def get_default_password_validators():
return get_password_validators(settings.AUTH_PASSWORD_VALIDATORS)
def get_password_validators(validator_config):
validators = []
for validator in validator_config:
try:
klass = import_string(validator['NAME'])
except ImportError:
msg = "The module in NAME could not be imported: %s. Check your AUTH_PASSWORD_VALIDATORS setting."
raise ImproperlyConfigured(msg % validator['NAME'])
validators.append(klass(**validator.get('OPTIONS', {})))
return validators
def validate_password(password, user=None, password_validators=None):
"""
Validate whether the password meets all validator requirements.
If the password is valid, return ``None``.
If the password is invalid, raise ValidationError with all error messages.
"""
errors = []
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
try:
validator.validate(password, user)
except ValidationError as error:
errors.append(error)
if errors:
raise ValidationError(errors)
def password_changed(password, user=None, password_validators=None):
"""
Inform all validators that have implemented a password_changed() method
that the password has been changed.
"""
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
password_changed = getattr(validator, 'password_changed', lambda *a: None)
password_changed(password, user)
def password_validators_help_texts(password_validators=None):
"""
Return a list of all help texts of all configured validators.
"""
help_texts = []
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
help_texts.append(validator.get_help_text())
return help_texts
def _password_validators_help_text_html(password_validators=None):
"""
Return an HTML string with all help texts of all configured validators
in an <ul>.
"""
help_texts = password_validators_help_texts(password_validators)
help_items = [format_html('<li>{}</li>', help_text) for help_text in help_texts]
return '<ul>%s</ul>' % ''.join(help_items) if help_items else ''
password_validators_help_text_html = lazy(_password_validators_help_text_html, text_type)
class MinimumLengthValidator(object):
"""
Validate whether the password is of a minimum length.
"""
def __init__(self, min_length=8):
self.min_length = min_length
def validate(self, password, user=None):
if len(password) < self.min_length:
raise ValidationError(
ungettext(
"This password is too short. It must contain at least %(min_length)d character.",
"This password is too short. It must contain at least %(min_length)d characters.",
self.min_length
),
code='password_too_short',
params={'min_length': self.min_length},
)
def get_help_text(self):
return ungettext(
"Your password must contain at least %(min_length)d character.",
"Your password must contain at least %(min_length)d characters.",
self.min_length
) % {'min_length': self.min_length}
class UserAttributeSimilarityValidator(object):
"""
Validate whether the password is sufficiently different from the user's
attributes.
If no specific attributes are provided, look at a sensible list of
defaults. Attributes that don't exist are ignored. Comparison is made to
not only the full attribute value, but also its components, so that, for
example, a password is validated against either part of an email address,
as well as the full address.
"""
DEFAULT_USER_ATTRIBUTES = ('username', 'first_name', 'last_name', 'email')
def __init__(self, user_attributes=DEFAULT_USER_ATTRIBUTES, max_similarity=0.7):
self.user_attributes = user_attributes
self.max_similarity = max_similarity
def validate(self, password, user=None):
if not user:
return
for attribute_name in self.user_attributes:
value = getattr(user, attribute_name, None)
if not value or not isinstance(value, string_types):
continue
value_parts = re.split('\W+', value) + [value]
for value_part in value_parts:
if SequenceMatcher(a=password.lower(), b=value_part.lower()).quick_ratio() > self.max_similarity:
verbose_name = force_text(user._meta.get_field(attribute_name).verbose_name)
raise ValidationError(
_("The password is too similar to the %(verbose_name)s."),
code='password_too_similar',
params={'verbose_name': verbose_name},
)
def get_help_text(self):
return _("Your password can't be too similar to your other personal information.")
class CommonPasswordValidator(object):
"""
Validate whether the password is a common password.
The password is rejected if it occurs in a provided list, which may be gzipped.
The list Django ships with contains 1000 common passwords, created by Mark Burnett:
https://xato.net/passwords/more-top-worst-passwords/
"""
DEFAULT_PASSWORD_LIST_PATH = os.path.join(
os.path.dirname(os.path.realpath(upath(__file__))), 'common-passwords.txt.gz'
)
def __init__(self, password_list_path=DEFAULT_PASSWORD_LIST_PATH):
try:
common_passwords_lines = gzip.open(password_list_path).read().decode('utf-8').splitlines()
except IOError:
with open(password_list_path) as f:
common_passwords_lines = f.readlines()
self.passwords = {p.strip() for p in common_passwords_lines}
def validate(self, password, user=None):
if password.lower().strip() in self.passwords:
raise ValidationError(
_("This password is too common."),
code='password_too_common',
)
def get_help_text(self):
return _("Your password can't be a commonly used password.")
class NumericPasswordValidator(object):
"""
Validate whether the password is alphanumeric.
"""
def validate(self, password, user=None):
if password.isdigit():
raise ValidationError(
_("This password is entirely numeric."),
code='password_entirely_numeric',
)
def get_help_text(self):
return _("Your password can't be entirely numeric.")
|
gpl-3.0
|
akashsinghal/Speech-Memorization-App
|
speech/Swift/Speech-gRPC-Streaming/env/lib/python3.6/site-packages/pip/_vendor/requests/compat.py
|
327
|
1687
|
# -*- coding: utf-8 -*-
"""
requests.compat
~~~~~~~~~~~~~~~
This module handles import compatibility issues between Python 2 and
Python 3.
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
# Note: We've patched out simplejson support in pip because it prevents
# upgrading simplejson on Windows.
# try:
# import simplejson as json
# except (ImportError, SyntaxError):
# # simplejson does not support Python 3.2, it throws a SyntaxError
# # because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
|
apache-2.0
|
Brainiq7/Ananse
|
ananse_dl/extractor/tenplay.py
|
31
|
3423
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class TenPlayIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ten(play)?\.com\.au/.+'
_TEST = {
'url': 'http://tenplay.com.au/ten-insider/extra/season-2013/tenplay-tv-your-way',
'info_dict': {
'id': '2695695426001',
'ext': 'flv',
'title': 'TENplay: TV your way',
'description': 'Welcome to a new TV experience. Enjoy a taste of the TENplay benefits.',
'timestamp': 1380150606.889,
'upload_date': '20130925',
'uploader': 'TENplay',
},
'params': {
'skip_download': True, # Requires rtmpdump
}
}
_video_fields = [
"id", "name", "shortDescription", "longDescription", "creationDate",
"publishedDate", "lastModifiedDate", "customFields", "videoStillURL",
"thumbnailURL", "referenceId", "length", "playsTotal",
"playsTrailingWeek", "renditions", "captioning", "startDate", "endDate"]
def _real_extract(self, url):
webpage = self._download_webpage(url, url)
video_id = self._html_search_regex(
r'videoID: "(\d+?)"', webpage, 'video_id')
api_token = self._html_search_regex(
r'apiToken: "([a-zA-Z0-9-_\.]+?)"', webpage, 'api_token')
title = self._html_search_regex(
r'<meta property="og:title" content="\s*(.*?)\s*"\s*/?\s*>',
webpage, 'title')
json = self._download_json('https://api.brightcove.com/services/library?command=find_video_by_id&video_id=%s&token=%s&video_fields=%s' % (video_id, api_token, ','.join(self._video_fields)), title)
formats = []
for rendition in json['renditions']:
url = rendition['remoteUrl'] or rendition['url']
protocol = 'rtmp' if url.startswith('rtmp') else 'http'
ext = 'flv' if protocol == 'rtmp' else rendition['videoContainer'].lower()
if protocol == 'rtmp':
url = url.replace('&mp4:', '')
formats.append({
'format_id': '_'.join(['rtmp', rendition['videoContainer'].lower(), rendition['videoCodec'].lower()]),
'width': rendition['frameWidth'],
'height': rendition['frameHeight'],
'tbr': rendition['encodingRate'] / 1024,
'filesize': rendition['size'],
'protocol': protocol,
'ext': ext,
'vcodec': rendition['videoCodec'].lower(),
'container': rendition['videoContainer'].lower(),
'url': url,
})
return {
'id': video_id,
'display_id': json['referenceId'],
'title': json['name'],
'description': json['shortDescription'] or json['longDescription'],
'formats': formats,
'thumbnails': [{
'url': json['videoStillURL']
}, {
'url': json['thumbnailURL']
}],
'thumbnail': json['videoStillURL'],
'duration': json['length'] / 1000,
'timestamp': float(json['creationDate']) / 1000,
'uploader': json['customFields']['production_company_distributor'] if 'production_company_distributor' in json['customFields'] else 'TENplay',
'view_count': json['playsTotal']
}
|
unlicense
|
OptiPop/external_chromium_org
|
tools/telemetry/telemetry/timeline/thread.py
|
33
|
9408
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.timeline.async_slice as async_slice_module
import telemetry.timeline.event_container as event_container
import telemetry.timeline.flow_event as flow_event_module
import telemetry.timeline.sample as sample_module
import telemetry.timeline.slice as slice_module
class Thread(event_container.TimelineEventContainer):
''' A Thread stores all the trace events collected for a particular
thread. We organize the synchronous slices on a thread by "subrows," where
subrow 0 has all the root slices, subrow 1 those nested 1 deep, and so on.
The asynchronous slices are stored in an AsyncSliceGroup object.
'''
def __init__(self, process, tid):
super(Thread, self).__init__('thread %s' % tid, parent=process)
self.tid = tid
self._async_slices = []
self._flow_events = []
self._samples = []
self._toplevel_slices = []
self._all_slices = []
# State only valid during import.
self._open_slices = []
self._newly_added_slices = []
@property
def toplevel_slices(self):
return self._toplevel_slices
@property
def all_slices(self):
return self._all_slices
@property
def samples(self):
return self._samples
@property
def async_slices(self):
return self._async_slices
@property
def open_slice_count(self):
return len(self._open_slices)
def IterChildContainers(self):
return
yield # pylint: disable=W0101
def IterEventsInThisContainer(self, event_type_predicate, event_predicate):
if event_type_predicate(slice_module.Slice):
for s in self._newly_added_slices:
if event_predicate(s):
yield s
for s in self._all_slices:
if event_predicate(s):
yield s
if event_type_predicate(async_slice_module.AsyncSlice):
for async_slice in self._async_slices:
if event_predicate(async_slice):
yield async_slice
for sub_slice in async_slice.IterEventsInThisContainerRecrusively():
if event_predicate(sub_slice):
yield sub_slice
if event_type_predicate(flow_event_module.FlowEvent):
for flow_event in self._flow_events:
if event_predicate(flow_event):
yield flow_event
if event_type_predicate(sample_module.Sample):
for sample in self._samples:
if event_predicate(sample):
yield sample
def AddSample(self, category, name, timestamp, args=None):
if len(self._samples) and timestamp < self._samples[-1].start:
raise ValueError(
'Samples must be added in increasing timestamp order')
sample = sample_module.Sample(self,
category, name, timestamp, args=args)
self._samples.append(sample)
def AddAsyncSlice(self, async_slice):
self._async_slices.append(async_slice)
def AddFlowEvent(self, flow_event):
self._flow_events.append(flow_event)
def BeginSlice(self, category, name, timestamp, thread_timestamp=None,
args=None):
"""Opens a new slice for the thread.
Calls to beginSlice and endSlice must be made with
non-monotonically-decreasing timestamps.
* category: Category to which the slice belongs.
* name: Name of the slice to add.
* timestamp: The timetsamp of the slice, in milliseconds.
* thread_timestamp: Thread specific clock (scheduled) timestamp of the
slice, in milliseconds.
* args: Arguments associated with
Returns newly opened slice
"""
if len(self._open_slices) > 0 and timestamp < self._open_slices[-1].start:
raise ValueError(
'Slices must be added in increasing timestamp order')
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
self._open_slices.append(new_slice)
new_slice.did_not_finish = True
self.PushSlice(new_slice)
return new_slice
def EndSlice(self, end_timestamp, end_thread_timestamp=None):
""" Ends the last begun slice in this group and pushes it onto the slice
array.
* end_timestamp: Timestamp when the slice ended in milliseconds
* end_thread_timestamp: Timestamp when the scheduled time of the slice ended
in milliseconds
returns completed slice.
"""
if not len(self._open_slices):
raise ValueError(
'EndSlice called without an open slice')
curr_slice = self._open_slices.pop()
if end_timestamp < curr_slice.start:
raise ValueError(
'Slice %s end time is before its start.' % curr_slice.name)
curr_slice.duration = end_timestamp - curr_slice.start
if end_thread_timestamp != None:
if curr_slice.thread_start == None:
raise ValueError(
'EndSlice with thread_timestamp called on open slice without ' +
'thread_timestamp')
curr_slice.thread_duration = (end_thread_timestamp -
curr_slice.thread_start)
curr_slice.did_not_finish = False
return curr_slice
def PushCompleteSlice(self, category, name, timestamp, duration,
thread_timestamp, thread_duration, args=None):
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
if duration == None:
new_slice.did_not_finish = True
else:
new_slice.duration = duration
new_slice.thread_duration = thread_duration
self.PushSlice(new_slice)
return new_slice
def PushSlice(self, new_slice):
self._newly_added_slices.append(new_slice)
return new_slice
def AutoCloseOpenSlices(self, max_timestamp, max_thread_timestamp):
for s in self._newly_added_slices:
if s.did_not_finish:
s.duration = max_timestamp - s.start
assert s.duration >= 0
if s.thread_start != None:
s.thread_duration = max_thread_timestamp - s.thread_start
assert s.thread_duration >= 0
self._open_slices = []
def IsTimestampValidForBeginOrEnd(self, timestamp):
if not len(self._open_slices):
return True
return timestamp >= self._open_slices[-1].start
def FinalizeImport(self):
self._BuildSliceSubRows()
def _BuildSliceSubRows(self):
'''This function works by walking through slices by start time.
The basic idea here is to insert each slice as deep into the subrow
list as it can go such that every subslice is fully contained by its
parent slice.
Visually, if we start with this:
0: [ a ]
1: [ b ]
2: [c][d]
To place this slice:
[e]
We first check row 2's last item, [d]. [e] wont fit into [d] (they dont
even intersect). So we go to row 1. That gives us [b], and [d] wont fit
into that either. So, we go to row 0 and its last slice, [a]. That can
completely contain [e], so that means we should add [e] as a subslice
of [a]. That puts it on row 1, yielding:
0: [ a ]
1: [ b ][e]
2: [c][d]
If we then get this slice:
[f]
We do the same deepest-to-shallowest walk of the subrows trying to fit
it. This time, it doesn't fit in any open slice. So, we simply append
it to row 0 (a root slice):
0: [ a ] [f]
1: [ b ][e]
'''
def CompareSlices(s1, s2):
if s1.start == s2.start:
# Break ties by having the slice with the greatest
# end timestamp come first.
return cmp(s2.end, s1.end)
return cmp(s1.start, s2.start)
assert len(self._toplevel_slices) == 0
assert len(self._all_slices) == 0
if not len(self._newly_added_slices):
return
self._all_slices.extend(self._newly_added_slices)
sorted_slices = sorted(self._newly_added_slices, cmp=CompareSlices)
root_slice = sorted_slices[0]
self._toplevel_slices.append(root_slice)
for s in sorted_slices[1:]:
if not self._AddSliceIfBounds(root_slice, s):
root_slice = s
self._toplevel_slices.append(root_slice)
self._newly_added_slices = []
def _AddSliceIfBounds(self, root, child):
''' Adds a child slice to a root slice its proper row.
Return False if the child slice is not in the bounds
of the root slice.
Because we know that the start time of child is >= the start time
of all other slices seen so far, we can just check the last slice
of each row for bounding.
'''
# The source trace data is in microseconds but we store it as milliseconds
# in floating-point. Since we can't represent micros as millis perfectly,
# two end=start+duration combos that should be the same will be slightly
# different. Round back to micros to ensure equality below.
child_end_micros = round(child.end * 1000)
root_end_micros = round(root.end * 1000)
if child.start >= root.start and child_end_micros <= root_end_micros:
if len(root.sub_slices) > 0:
if self._AddSliceIfBounds(root.sub_slices[-1], child):
return True
child.parent_slice = root
root.AddSubSlice(child)
return True
return False
|
bsd-3-clause
|
exploreodoo/datStruct
|
odoo/openerp/tools/convert.py
|
205
|
41282
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cStringIO
import csv
import logging
import os.path
import pickle
import re
import sys
# for eval context:
import time
import openerp
import openerp.release
import openerp.workflow
from yaml_import import convert_yaml_import
import assertion_report
_logger = logging.getLogger(__name__)
try:
import pytz
except:
_logger.warning('could not find pytz library, please install it')
class pytzclass(object):
all_timezones=[]
pytz=pytzclass()
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from lxml import etree, builder
import misc
from config import config
from translate import _
# List of etree._Element subclasses that we choose to ignore when parsing XML.
from misc import SKIPPED_ELEMENT_TYPES
from misc import unquote
from openerp import SUPERUSER_ID
# Import of XML records requires the unsafe eval as well,
# almost everywhere, which is ok because it supposedly comes
# from trusted data, but at least we make it obvious now.
unsafe_eval = eval
from safe_eval import safe_eval as eval
class ParseError(Exception):
def __init__(self, msg, text, filename, lineno):
self.msg = msg
self.text = text
self.filename = filename
self.lineno = lineno
def __str__(self):
return '"%s" while parsing %s:%s, near\n%s' \
% (self.msg, self.filename, self.lineno, self.text)
def _ref(self, cr):
return lambda x: self.id_get(cr, x)
def _obj(pool, cr, uid, model_str, context=None):
model = pool[model_str]
return lambda x: model.browse(cr, uid, x, context=context)
def _get_idref(self, cr, uid, model_str, context, idref):
idref2 = dict(idref,
time=time,
DateTime=datetime,
datetime=datetime,
timedelta=timedelta,
relativedelta=relativedelta,
version=openerp.release.major_version,
ref=_ref(self, cr),
pytz=pytz)
if len(model_str):
idref2['obj'] = _obj(self.pool, cr, uid, model_str, context=context)
return idref2
def _fix_multiple_roots(node):
"""
Surround the children of the ``node`` element of an XML field with a
single root "data" element, to prevent having a document with multiple
roots once parsed separately.
XML nodes should have one root only, but we'd like to support
direct multiple roots in our partial documents (like inherited view architectures).
As a convention we'll surround multiple root with a container "data" element, to be
ignored later when parsing.
"""
real_nodes = [x for x in node if not isinstance(x, SKIPPED_ELEMENT_TYPES)]
if len(real_nodes) > 1:
data_node = etree.Element("data")
for child in node:
data_node.append(child)
node.append(data_node)
def _eval_xml(self, node, pool, cr, uid, idref, context=None):
if context is None:
context = {}
if node.tag in ('field','value'):
t = node.get('type','char')
f_model = node.get('model', '').encode('utf-8')
if node.get('search'):
f_search = node.get("search",'').encode('utf-8')
f_use = node.get("use",'id').encode('utf-8')
f_name = node.get("name",'').encode('utf-8')
idref2 = {}
if f_search:
idref2 = _get_idref(self, cr, uid, f_model, context, idref)
q = unsafe_eval(f_search, idref2)
ids = pool[f_model].search(cr, uid, q)
if f_use != 'id':
ids = map(lambda x: x[f_use], pool[f_model].read(cr, uid, ids, [f_use]))
_cols = pool[f_model]._columns
if (f_name in _cols) and _cols[f_name]._type=='many2many':
return ids
f_val = False
if len(ids):
f_val = ids[0]
if isinstance(f_val, tuple):
f_val = f_val[0]
return f_val
a_eval = node.get('eval','')
if a_eval:
idref2 = _get_idref(self, cr, uid, f_model, context, idref)
try:
return unsafe_eval(a_eval, idref2)
except Exception:
logging.getLogger('openerp.tools.convert.init').error(
'Could not eval(%s) for %s in %s', a_eval, node.get('name'), context)
raise
def _process(s, idref):
matches = re.finditer('[^%]%\((.*?)\)[ds]', s)
done = []
for m in matches:
found = m.group()[1:]
if found in done:
continue
done.append(found)
id = m.groups()[0]
if not id in idref:
idref[id] = self.id_get(cr, id)
s = s.replace(found, str(idref[id]))
s = s.replace('%%', '%') # Quite wierd but it's for (somewhat) backward compatibility sake
return s
if t == 'xml':
_fix_multiple_roots(node)
return '<?xml version="1.0"?>\n'\
+_process("".join([etree.tostring(n, encoding='utf-8')
for n in node]), idref)
if t == 'html':
return _process("".join([etree.tostring(n, encoding='utf-8')
for n in node]), idref)
data = node.text
if node.get('file'):
with openerp.tools.file_open(node.get('file'), 'rb') as f:
data = f.read()
if t == 'file':
from ..modules import module
path = data.strip()
if not module.get_module_resource(self.module, path):
raise IOError("No such file or directory: '%s' in %s" % (
path, self.module))
return '%s,%s' % (self.module, path)
if t == 'char':
return data
if t == 'base64':
return data.encode('base64')
if t == 'int':
d = data.strip()
if d == 'None':
return None
return int(d)
if t == 'float':
return float(data.strip())
if t in ('list','tuple'):
res=[]
for n in node.iterchildren(tag='value'):
res.append(_eval_xml(self,n,pool,cr,uid,idref))
if t=='tuple':
return tuple(res)
return res
elif node.tag == "function":
args = []
a_eval = node.get('eval','')
# FIXME: should probably be exclusive
if a_eval:
idref['ref'] = lambda x: self.id_get(cr, x)
args = unsafe_eval(a_eval, idref)
for n in node:
return_val = _eval_xml(self,n, pool, cr, uid, idref, context)
if return_val is not None:
args.append(return_val)
model = pool[node.get('model', '')]
method = node.get('name')
res = getattr(model, method)(cr, uid, *args)
return res
elif node.tag == "test":
return node.text
escape_re = re.compile(r'(?<!\\)/')
def escape(x):
return x.replace('\\/', '/')
class xml_import(object):
@staticmethod
def nodeattr2bool(node, attr, default=False):
if not node.get(attr):
return default
val = node.get(attr).strip()
if not val:
return default
return val.lower() not in ('0', 'false', 'off')
def isnoupdate(self, data_node=None):
return self.noupdate or (len(data_node) and self.nodeattr2bool(data_node, 'noupdate', False))
def get_context(self, data_node, node, eval_dict):
data_node_context = (len(data_node) and data_node.get('context','').encode('utf8'))
node_context = node.get("context",'').encode('utf8')
context = {}
for ctx in (data_node_context, node_context):
if ctx:
try:
ctx_res = unsafe_eval(ctx, eval_dict)
if isinstance(context, dict):
context.update(ctx_res)
else:
context = ctx_res
except NameError:
# Some contexts contain references that are only valid at runtime at
# client-side, so in that case we keep the original context string
# as it is. We also log it, just in case.
context = ctx
_logger.debug('Context value (%s) for element with id "%s" or its data node does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
ctx, node.get('id','n/a'), exc_info=True)
return context
def get_uid(self, cr, uid, data_node, node):
node_uid = node.get('uid','') or (len(data_node) and data_node.get('uid',''))
if node_uid:
return self.id_get(cr, node_uid)
return uid
def _test_xml_id(self, xml_id):
id = xml_id
if '.' in xml_id:
module, id = xml_id.split('.', 1)
assert '.' not in id, """The ID reference "%s" must contain
maximum one dot. They are used to refer to other modules ID, in the
form: module.record_id""" % (xml_id,)
if module != self.module:
modcnt = self.pool['ir.module.module'].search_count(self.cr, self.uid, ['&', ('name', '=', module), ('state', 'in', ['installed'])])
assert modcnt == 1, """The ID "%s" refers to an uninstalled module""" % (xml_id,)
if len(id) > 64:
_logger.error('id: %s is to long (max: 64)', id)
def _tag_delete(self, cr, rec, data_node=None, mode=None):
d_model = rec.get("model")
d_search = rec.get("search",'').encode('utf-8')
d_id = rec.get("id")
ids = []
if d_search:
idref = _get_idref(self, cr, self.uid, d_model, context={}, idref={})
try:
ids = self.pool[d_model].search(cr, self.uid, unsafe_eval(d_search, idref))
except ValueError:
_logger.warning('Skipping deletion for failed search `%r`', d_search, exc_info=True)
pass
if d_id:
try:
ids.append(self.id_get(cr, d_id))
except ValueError:
# d_id cannot be found. doesn't matter in this case
_logger.warning('Skipping deletion for missing XML ID `%r`', d_id, exc_info=True)
pass
if ids:
self.pool[d_model].unlink(cr, self.uid, ids)
def _remove_ir_values(self, cr, name, value, model):
ir_values_obj = self.pool['ir.values']
ir_value_ids = ir_values_obj.search(cr, self.uid, [('name','=',name),('value','=',value),('model','=',model)])
if ir_value_ids:
ir_values_obj.unlink(cr, self.uid, ir_value_ids)
return True
def _tag_report(self, cr, rec, data_node=None, mode=None):
res = {}
for dest,f in (('name','string'),('model','model'),('report_name','name')):
res[dest] = rec.get(f,'').encode('utf8')
assert res[dest], "Attribute %s of report is empty !" % (f,)
for field,dest in (('rml','report_rml'),('file','report_rml'),('xml','report_xml'),('xsl','report_xsl'),
('attachment','attachment'),('attachment_use','attachment_use'), ('usage','usage'),
('report_type', 'report_type'), ('parser', 'parser')):
if rec.get(field):
res[dest] = rec.get(field).encode('utf8')
if rec.get('auto'):
res['auto'] = eval(rec.get('auto','False'))
if rec.get('sxw'):
sxw_content = misc.file_open(rec.get('sxw')).read()
res['report_sxw_content'] = sxw_content
if rec.get('header'):
res['header'] = eval(rec.get('header','False'))
res['multi'] = rec.get('multi') and eval(rec.get('multi','False'))
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
res['groups_id'] = groups_value
id = self.pool['ir.model.data']._update(cr, self.uid, "ir.actions.report.xml", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
if not rec.get('menu') or eval(rec.get('menu','False')):
keyword = str(rec.get('keyword', 'client_print_multi'))
value = 'ir.actions.report.xml,'+str(id)
replace = rec.get('replace', True)
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', keyword, res['name'], [res['model']], value, replace=replace, isobject=True, xml_id=xml_id)
elif self.mode=='update' and eval(rec.get('menu','False'))==False:
# Special check for report having attribute menu=False on update
value = 'ir.actions.report.xml,'+str(id)
self._remove_ir_values(cr, res['name'], value, res['model'])
return id
def _tag_function(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
context = self.get_context(data_node, rec, {'ref': _ref(self, cr)})
uid = self.get_uid(cr, self.uid, data_node, rec)
_eval_xml(self,rec, self.pool, cr, uid, self.idref, context=context)
return
def _tag_url(self, cr, rec, data_node=None, mode=None):
url = rec.get("url",'').encode('utf8')
target = rec.get("target",'').encode('utf8')
name = rec.get("name",'').encode('utf8')
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
res = {'name': name, 'url': url, 'target':target}
id = self.pool['ir.model.data']._update(cr, self.uid, "ir.actions.act_url", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
def _tag_act_window(self, cr, rec, data_node=None, mode=None):
name = rec.get('name','').encode('utf-8')
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
type = rec.get('type','').encode('utf-8') or 'ir.actions.act_window'
view_id = False
if rec.get('view_id'):
view_id = self.id_get(cr, rec.get('view_id','').encode('utf-8'))
domain = rec.get('domain','').encode('utf-8') or '[]'
res_model = rec.get('res_model','').encode('utf-8')
src_model = rec.get('src_model','').encode('utf-8')
view_type = rec.get('view_type','').encode('utf-8') or 'form'
view_mode = rec.get('view_mode','').encode('utf-8') or 'tree,form'
usage = rec.get('usage','').encode('utf-8')
limit = rec.get('limit','').encode('utf-8')
auto_refresh = rec.get('auto_refresh','').encode('utf-8')
uid = self.uid
# Act_window's 'domain' and 'context' contain mostly literals
# but they can also refer to the variables provided below
# in eval_context, so we need to eval() them before storing.
# Among the context variables, 'active_id' refers to
# the currently selected items in a list view, and only
# takes meaning at runtime on the client side. For this
# reason it must remain a bare variable in domain and context,
# even after eval() at server-side. We use the special 'unquote'
# class to achieve this effect: a string which has itself, unquoted,
# as representation.
active_id = unquote("active_id")
active_ids = unquote("active_ids")
active_model = unquote("active_model")
def ref(str_id):
return self.id_get(cr, str_id)
# Include all locals() in eval_context, for backwards compatibility
eval_context = {
'name': name,
'xml_id': xml_id,
'type': type,
'view_id': view_id,
'domain': domain,
'res_model': res_model,
'src_model': src_model,
'view_type': view_type,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
'auto_refresh': auto_refresh,
'uid' : uid,
'active_id': active_id,
'active_ids': active_ids,
'active_model': active_model,
'ref' : ref,
}
context = self.get_context(data_node, rec, eval_context)
try:
domain = unsafe_eval(domain, eval_context)
except NameError:
# Some domains contain references that are only valid at runtime at
# client-side, so in that case we keep the original domain string
# as it is. We also log it, just in case.
_logger.debug('Domain value (%s) for element with id "%s" does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
domain, xml_id or 'n/a', exc_info=True)
res = {
'name': name,
'type': type,
'view_id': view_id,
'domain': domain,
'context': context,
'res_model': res_model,
'src_model': src_model,
'view_type': view_type,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
'auto_refresh': auto_refresh,
}
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
res['groups_id'] = groups_value
if rec.get('target'):
res['target'] = rec.get('target','')
if rec.get('multi'):
res['multi'] = eval(rec.get('multi', 'False'))
id = self.pool['ir.model.data']._update(cr, self.uid, 'ir.actions.act_window', self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
if src_model:
#keyword = 'client_action_relate'
keyword = rec.get('key2','').encode('utf-8') or 'client_action_relate'
value = 'ir.actions.act_window,'+str(id)
replace = rec.get('replace','') or True
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', keyword, xml_id, [src_model], value, replace=replace, isobject=True, xml_id=xml_id)
# TODO add remove ir.model.data
def _tag_ir_set(self, cr, rec, data_node=None, mode=None):
if self.mode != 'init':
return
res = {}
for field in rec.findall('./field'):
f_name = field.get("name",'').encode('utf-8')
f_val = _eval_xml(self,field,self.pool, cr, self.uid, self.idref)
res[f_name] = f_val
self.pool['ir.model.data'].ir_set(cr, self.uid, res['key'], res['key2'], res['name'], res['models'], res['value'], replace=res.get('replace',True), isobject=res.get('isobject', False), meta=res.get('meta',None))
def _tag_workflow(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
model = rec.get('model').encode('ascii')
w_ref = rec.get('ref')
if w_ref:
id = self.id_get(cr, w_ref)
else:
number_children = len(rec)
assert number_children > 0,\
'You must define a child node if you dont give a ref'
assert number_children == 1,\
'Only one child node is accepted (%d given)' % number_children
id = _eval_xml(self, rec[0], self.pool, cr, self.uid, self.idref)
uid = self.get_uid(cr, self.uid, data_node, rec)
openerp.workflow.trg_validate(
uid, model, id, rec.get('action').encode('ascii'), cr)
#
# Support two types of notation:
# name="Inventory Control/Sending Goods"
# or
# action="action_id"
# parent="parent_id"
#
def _tag_menuitem(self, cr, rec, data_node=None, mode=None):
rec_id = rec.get("id",'').encode('ascii')
self._test_xml_id(rec_id)
m_l = map(escape, escape_re.split(rec.get("name",'').encode('utf8')))
values = {'parent_id': False}
if rec.get('parent', False) is False and len(m_l) > 1:
# No parent attribute specified and the menu name has several menu components,
# try to determine the ID of the parent according to menu path
pid = False
res = None
values['name'] = m_l[-1]
m_l = m_l[:-1] # last part is our name, not a parent
for idx, menu_elem in enumerate(m_l):
if pid:
cr.execute('select id from ir_ui_menu where parent_id=%s and name=%s', (pid, menu_elem))
else:
cr.execute('select id from ir_ui_menu where parent_id is null and name=%s', (menu_elem,))
res = cr.fetchone()
if res:
pid = res[0]
else:
# the menuitem does't exist but we are in branch (not a leaf)
_logger.warning('Warning no ID for submenu %s of menu %s !', menu_elem, str(m_l))
pid = self.pool['ir.ui.menu'].create(cr, self.uid, {'parent_id' : pid, 'name' : menu_elem})
values['parent_id'] = pid
else:
# The parent attribute was specified, if non-empty determine its ID, otherwise
# explicitly make a top-level menu
if rec.get('parent'):
menu_parent_id = self.id_get(cr, rec.get('parent',''))
else:
# we get here with <menuitem parent="">, explicit clear of parent, or
# if no parent attribute at all but menu name is not a menu path
menu_parent_id = False
values = {'parent_id': menu_parent_id}
if rec.get('name'):
values['name'] = rec.get('name')
try:
res = [ self.id_get(cr, rec.get('id','')) ]
except:
res = None
if rec.get('action'):
a_action = rec.get('action','').encode('utf8')
# determine the type of action
action_type, action_id = self.model_id_get(cr, a_action)
action_type = action_type.split('.')[-1] # keep only type part
if not values.get('name') and action_type in ('act_window', 'wizard', 'url', 'client', 'server'):
a_table = 'ir_act_%s' % action_type.replace('act_', '')
cr.execute('select name from "%s" where id=%%s' % a_table, (int(action_id),))
resw = cr.fetchone()
if resw:
values['name'] = resw[0]
if not values.get('name'):
# ensure menu has a name
values['name'] = rec_id or '?'
if rec.get('sequence'):
values['sequence'] = int(rec.get('sequence'))
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
values['groups_id'] = groups_value
pid = self.pool['ir.model.data']._update(cr, self.uid, 'ir.ui.menu', self.module, values, rec_id, noupdate=self.isnoupdate(data_node), mode=self.mode, res_id=res and res[0] or False)
if rec_id and pid:
self.idref[rec_id] = int(pid)
if rec.get('action') and pid:
action = "ir.actions.%s,%d" % (action_type, action_id)
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', 'tree_but_open', 'Menuitem', [('ir.ui.menu', int(pid))], action, True, True, xml_id=rec_id)
return 'ir.ui.menu', pid
def _assert_equals(self, f1, f2, prec=4):
return not round(f1 - f2, prec)
def _tag_assert(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
rec_model = rec.get("model",'').encode('ascii')
model = self.pool[rec_model]
rec_id = rec.get("id",'').encode('ascii')
self._test_xml_id(rec_id)
rec_src = rec.get("search",'').encode('utf8')
rec_src_count = rec.get("count")
rec_string = rec.get("string",'').encode('utf8') or 'unknown'
ids = None
eval_dict = {'ref': _ref(self, cr)}
context = self.get_context(data_node, rec, eval_dict)
uid = self.get_uid(cr, self.uid, data_node, rec)
if rec_id:
ids = [self.id_get(cr, rec_id)]
elif rec_src:
q = unsafe_eval(rec_src, eval_dict)
ids = self.pool[rec_model].search(cr, uid, q, context=context)
if rec_src_count:
count = int(rec_src_count)
if len(ids) != count:
self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' Incorrect search count:\n' \
' expected count: %d\n' \
' obtained count: %d\n' \
% (rec_string, count, len(ids))
_logger.error(msg)
return
assert ids is not None,\
'You must give either an id or a search criteria'
ref = _ref(self, cr)
for id in ids:
brrec = model.browse(cr, uid, id, context)
class d(dict):
def __getitem__(self2, key):
if key in brrec:
return brrec[key]
return dict.__getitem__(self2, key)
globals_dict = d()
globals_dict['floatEqual'] = self._assert_equals
globals_dict['ref'] = ref
globals_dict['_ref'] = ref
for test in rec.findall('./test'):
f_expr = test.get("expr",'').encode('utf-8')
expected_value = _eval_xml(self, test, self.pool, cr, uid, self.idref, context=context) or True
expression_value = unsafe_eval(f_expr, globals_dict)
if expression_value != expected_value: # assertion failed
self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' xmltag: %s\n' \
' expected value: %r\n' \
' obtained value: %r\n' \
% (rec_string, etree.tostring(test), expected_value, expression_value)
_logger.error(msg)
return
else: # all tests were successful for this assertion tag (no break)
self.assertion_report.record_success()
def _tag_record(self, cr, rec, data_node=None, mode=None):
rec_model = rec.get("model").encode('ascii')
model = self.pool[rec_model]
rec_id = rec.get("id",'').encode('ascii')
rec_context = rec.get("context", None)
if rec_context:
rec_context = unsafe_eval(rec_context)
self._test_xml_id(rec_id)
# in update mode, the record won't be updated if the data node explicitely
# opt-out using @noupdate="1". A second check will be performed in
# ir.model.data#_update() using the record's ir.model.data `noupdate` field.
if self.isnoupdate(data_node) and self.mode != 'init':
# check if the xml record has no id, skip
if not rec_id:
return None
if '.' in rec_id:
module,rec_id2 = rec_id.split('.')
else:
module = self.module
rec_id2 = rec_id
id = self.pool['ir.model.data']._update_dummy(cr, self.uid, rec_model, module, rec_id2)
if id:
# if the resource already exists, don't update it but store
# its database id (can be useful)
self.idref[rec_id] = int(id)
return None
elif not self.nodeattr2bool(rec, 'forcecreate', True):
# if it doesn't exist and we shouldn't create it, skip it
return None
# else create it normally
res = {}
for field in rec.findall('./field'):
#TODO: most of this code is duplicated above (in _eval_xml)...
f_name = field.get("name").encode('utf-8')
f_ref = field.get("ref",'').encode('utf-8')
f_search = field.get("search",'').encode('utf-8')
f_model = field.get("model",'').encode('utf-8')
if not f_model and f_name in model._fields:
f_model = model._fields[f_name].comodel_name
f_use = field.get("use",'').encode('utf-8') or 'id'
f_val = False
if f_search:
q = unsafe_eval(f_search, self.idref)
assert f_model, 'Define an attribute model="..." in your .XML file !'
f_obj = self.pool[f_model]
# browse the objects searched
s = f_obj.browse(cr, self.uid, f_obj.search(cr, self.uid, q))
# column definitions of the "local" object
_fields = self.pool[rec_model]._fields
# if the current field is many2many
if (f_name in _fields) and _fields[f_name].type == 'many2many':
f_val = [(6, 0, map(lambda x: x[f_use], s))]
elif len(s):
# otherwise (we are probably in a many2one field),
# take the first element of the search
f_val = s[0][f_use]
elif f_ref:
if f_name in model._fields and model._fields[f_name].type == 'reference':
val = self.model_id_get(cr, f_ref)
f_val = val[0] + ',' + str(val[1])
else:
f_val = self.id_get(cr, f_ref)
else:
f_val = _eval_xml(self,field, self.pool, cr, self.uid, self.idref)
if f_name in model._fields:
if model._fields[f_name].type == 'integer':
f_val = int(f_val)
res[f_name] = f_val
id = self.pool['ir.model.data']._update(cr, self.uid, rec_model, self.module, res, rec_id or False, not self.isnoupdate(data_node), noupdate=self.isnoupdate(data_node), mode=self.mode, context=rec_context )
if rec_id:
self.idref[rec_id] = int(id)
if config.get('import_partial'):
cr.commit()
return rec_model, id
def _tag_template(self, cr, el, data_node=None, mode=None):
# This helper transforms a <template> element into a <record> and forwards it
tpl_id = el.get('id', el.get('t-name', '')).encode('ascii')
full_tpl_id = tpl_id
if '.' not in full_tpl_id:
full_tpl_id = '%s.%s' % (self.module, tpl_id)
# set the full template name for qweb <module>.<id>
if not el.get('inherit_id'):
el.set('t-name', full_tpl_id)
el.tag = 't'
else:
el.tag = 'data'
el.attrib.pop('id', None)
record_attrs = {
'id': tpl_id,
'model': 'ir.ui.view',
}
for att in ['forcecreate', 'context']:
if att in el.keys():
record_attrs[att] = el.attrib.pop(att)
Field = builder.E.field
name = el.get('name', tpl_id)
record = etree.Element('record', attrib=record_attrs)
record.append(Field(name, name='name'))
record.append(Field("qweb", name='type'))
record.append(Field(el.get('priority', "16"), name='priority'))
if 'inherit_id' in el.attrib:
record.append(Field(name='inherit_id', ref=el.get('inherit_id')))
if el.get('active') in ("True", "False"):
view_id = self.id_get(cr, tpl_id, raise_if_not_found=False)
if mode != "update" or not view_id:
record.append(Field(name='active', eval=el.get('active')))
if el.get('customize_show') in ("True", "False"):
record.append(Field(name='customize_show', eval=el.get('customize_show')))
groups = el.attrib.pop('groups', None)
if groups:
grp_lst = map(lambda x: "ref('%s')" % x, groups.split(','))
record.append(Field(name="groups_id", eval="[(6, 0, ["+', '.join(grp_lst)+"])]"))
if el.attrib.pop('page', None) == 'True':
record.append(Field(name="page", eval="True"))
if el.get('primary') == 'True':
# Pseudo clone mode, we'll set the t-name to the full canonical xmlid
el.append(
builder.E.xpath(
builder.E.attribute(full_tpl_id, name='t-name'),
expr=".",
position="attributes",
)
)
record.append(Field('primary', name='mode'))
# inject complete <template> element (after changing node name) into
# the ``arch`` field
record.append(Field(el, name="arch", type="xml"))
return self._tag_record(cr, record, data_node)
def id_get(self, cr, id_str, raise_if_not_found=True):
if id_str in self.idref:
return self.idref[id_str]
res = self.model_id_get(cr, id_str, raise_if_not_found)
if res and len(res)>1: res = res[1]
return res
def model_id_get(self, cr, id_str, raise_if_not_found=True):
model_data_obj = self.pool['ir.model.data']
mod = self.module
if '.' not in id_str:
id_str = '%s.%s' % (mod, id_str)
return model_data_obj.xmlid_to_res_model_res_id(
cr, self.uid, id_str,
raise_if_not_found=raise_if_not_found)
def parse(self, de, mode=None):
if de.tag != 'openerp':
raise Exception("Mismatch xml format: root tag must be `openerp`.")
for n in de.findall('./data'):
for rec in n:
if rec.tag in self._tags:
try:
self._tags[rec.tag](self.cr, rec, n, mode=mode)
except Exception, e:
self.cr.rollback()
exc_info = sys.exc_info()
raise ParseError, (misc.ustr(e), etree.tostring(rec).rstrip(), rec.getroottree().docinfo.URL, rec.sourceline), exc_info[2]
return True
def __init__(self, cr, module, idref, mode, report=None, noupdate=False):
self.mode = mode
self.module = module
self.cr = cr
self.idref = idref
self.pool = openerp.registry(cr.dbname)
self.uid = 1
if report is None:
report = assertion_report.assertion_report()
self.assertion_report = report
self.noupdate = noupdate
self._tags = {
'record': self._tag_record,
'delete': self._tag_delete,
'function': self._tag_function,
'menuitem': self._tag_menuitem,
'template': self._tag_template,
'workflow': self._tag_workflow,
'report': self._tag_report,
'ir_set': self._tag_ir_set,
'act_window': self._tag_act_window,
'url': self._tag_url,
'assert': self._tag_assert,
}
def convert_file(cr, module, filename, idref, mode='update', noupdate=False, kind=None, report=None, pathname=None):
if pathname is None:
pathname = os.path.join(module, filename)
fp = misc.file_open(pathname)
ext = os.path.splitext(filename)[1].lower()
try:
if ext == '.csv':
convert_csv_import(cr, module, pathname, fp.read(), idref, mode, noupdate)
elif ext == '.sql':
convert_sql_import(cr, fp)
elif ext == '.yml':
convert_yaml_import(cr, module, fp, kind, idref, mode, noupdate, report)
elif ext == '.xml':
convert_xml_import(cr, module, fp, idref, mode, noupdate, report)
elif ext == '.js':
pass # .js files are valid but ignored here.
else:
_logger.warning("Can't load unknown file type %s.", filename)
finally:
fp.close()
def convert_sql_import(cr, fp):
queries = fp.read().split(';')
for query in queries:
new_query = ' '.join(query.split())
if new_query:
cr.execute(new_query)
def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
noupdate=False):
'''Import csv file :
quote: "
delimiter: ,
encoding: utf-8'''
if not idref:
idref={}
model = ('.'.join(fname.split('.')[:-1]).split('-'))[0]
#remove folder path from model
head, model = os.path.split(model)
input = cStringIO.StringIO(csvcontent) #FIXME
reader = csv.reader(input, quotechar='"', delimiter=',')
fields = reader.next()
fname_partial = ""
if config.get('import_partial'):
fname_partial = module + '/'+ fname
if not os.path.isfile(config.get('import_partial')):
pickle.dump({}, file(config.get('import_partial'),'w+'))
else:
data = pickle.load(file(config.get('import_partial')))
if fname_partial in data:
if not data[fname_partial]:
return
else:
for i in range(data[fname_partial]):
reader.next()
if not (mode == 'init' or 'id' in fields):
_logger.error("Import specification does not contain 'id' and we are in init mode, Cannot continue.")
return
uid = 1
datas = []
for line in reader:
if not (line and any(line)):
continue
try:
datas.append(map(misc.ustr, line))
except:
_logger.error("Cannot import the line: %s", line)
registry = openerp.registry(cr.dbname)
result, rows, warning_msg, dummy = registry[model].import_data(cr, uid, fields, datas,mode, module, noupdate, filename=fname_partial)
if result < 0:
# Report failed import and abort module install
raise Exception(_('Module loading %s failed: file %s could not be processed:\n %s') % (module, fname, warning_msg))
if config.get('import_partial'):
data = pickle.load(file(config.get('import_partial')))
data[fname_partial] = 0
pickle.dump(data, file(config.get('import_partial'),'wb'))
cr.commit()
#
# xml import/export
#
def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=False, report=None):
doc = etree.parse(xmlfile)
relaxng = etree.RelaxNG(
etree.parse(os.path.join(config['root_path'],'import_xml.rng' )))
try:
relaxng.assert_(doc)
except Exception:
_logger.error('The XML file does not fit the required schema !')
_logger.error(misc.ustr(relaxng.error_log.last_error))
raise
if idref is None:
idref={}
obj = xml_import(cr, module, idref, mode, report=report, noupdate=noupdate)
obj.parse(doc.getroot(), mode=mode)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
gpl-2.0
|
SimVascular/VTK
|
ThirdParty/Twisted/twisted/trial/test/test_doctest.py
|
31
|
1715
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test Twisted's doctest support.
"""
from twisted.trial import itrial, runner, unittest, reporter
from twisted.trial.test import mockdoctest
class TestRunners(unittest.SynchronousTestCase):
"""
Tests for Twisted's doctest support.
"""
def test_id(self):
"""
Check that the id() of the doctests' case object contains the FQPN of
the actual tests.
"""
loader = runner.TestLoader()
suite = loader.loadDoctests(mockdoctest)
idPrefix = 'twisted.trial.test.mockdoctest.Counter'
for test in suite._tests:
self.assertIn(idPrefix, itrial.ITestCase(test).id())
def test_basicTrialIntegration(self):
"""
L{loadDoctests} loads all of the doctests in the given module.
"""
loader = runner.TestLoader()
suite = loader.loadDoctests(mockdoctest)
self.assertEqual(7, suite.countTestCases())
def _testRun(self, suite):
"""
Run C{suite} and check the result.
"""
result = reporter.TestResult()
suite.run(result)
self.assertEqual(5, result.successes)
self.assertEqual(2, len(result.failures))
def test_expectedResults(self, count=1):
"""
Trial can correctly run doctests with its xUnit test APIs.
"""
suite = runner.TestLoader().loadDoctests(mockdoctest)
self._testRun(suite)
def test_repeatable(self):
"""
Doctests should be runnable repeatably.
"""
suite = runner.TestLoader().loadDoctests(mockdoctest)
self._testRun(suite)
self._testRun(suite)
|
bsd-3-clause
|
RaphaelCavalcante/pymtp
|
tools/tabs2spaces.py
|
2
|
2259
|
#!/usr/bin/python
#-------------------------------------------------------------------------------
# Name: tabs2spaces.py
# Purpose: Takes one filename as the first argument and converts each tab
# into the number of spaces specified with the second argument.
#
# Usage: tabs2spaces.py <filename> <number-of-spaces-per-tab>
#
# Author: Wayne Koorts
# Website: http://www.wkoorts.com
# Created: 21/09/2008
# Copyright: Copyright 2008 Wayne Koorts
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
if len(sys.argv) < 3:
print "Usage: tabs2spaces.py <filename> <number-of-spaces-per-tab>"
sys.exit(1)
tabs_filename = sys.argv[1]
num_spaces = sys.argv[2]
tabsfree_filename = "notabs_" + tabs_filename
spaces_str = ""
for x in range(0, int(num_spaces)):
spaces_str = spaces_str + " "
try:
tabs_file = open(tabs_filename, "r")
except:
print "Error opening file \"" + tabs_filename + "\""
print "Chances are you accidentally mis-spelt the name"
sys.exit(1)
try:
tabsfree_file = open(tabsfree_filename, "w")
except:
print "Can't create new file \"" + tabsfree_filename + "\""
sys.stdout.write("Creating tabs-free file... ")
while tabs_file.read(1) != "":
tabs_file.seek(tabs_file.tell() - 1) # Loop condition already incremented file position
char = tabs_file.read(1)
if char == "\t":
tabsfree_file.write(spaces_str)
else:
tabsfree_file.write(char)
sys.stdout.write("done!\n")
tabs_file.close()
tabsfree_file.close()
print "Tabs removed successfully! The new tabs-free file is:"
print tabsfree_filename
|
gpl-3.0
|
mendhak/onetimepass
|
onetimepass/__init__.py
|
1
|
5850
|
"""
onetimepass module is designed to work for one-time passwords - HMAC-based and
time-based. It is compatible with Google Authenticator application and
applications based on it.
@version: 0.2.2
@author: Tomasz Jaskowski
@contact: http://github.com/tadeck
@license: MIT
>>> secret = b'MFRGGZDFMZTWQ2LK'
>>> get_hotp(secret, 1) == 765705
True
>>> get_hotp(secret, 1, as_string=True) == b'765705'
True
>>> valid_hotp(get_hotp(secret, 123), secret)
123
>>> valid_hotp(get_hotp(secret, 123), secret, last=123)
False
>>> valid_totp(get_totp(secret), secret)
True
>>> valid_totp(get_totp(secret)+1, secret)
False
>>> valid_hotp(get_totp(secret), secret)
False
>>> valid_totp(get_hotp(secret, 1), secret)
False
"""
import base64
import hashlib
import hmac
import six
import struct
import time
__author__ = 'Tomasz Jaskowski <[email protected]>'
__date__ = '12 July 2013'
__version_info__ = (0, 2, 2)
__version__ = '%s.%s.%s' % __version_info__
__license__ = 'MIT'
def _is_possible_token(token):
"""Determines if given value is acceptable as a token. Used when validating
tokens.
Currently allows only numeric tokens no longer than 6 chars.
:param token: token value to be checked
:type token: int or str
:return: True if can be a candidate for token, False otherwise
:rtype: bool
>>> _is_possible_token(123456)
True
>>> _is_possible_token(b'123456')
True
>>> _is_possible_token(b'abcdef')
False
>>> _is_possible_token(b'12345678')
False
"""
if not isinstance(token, bytes):
token = six.b(str(token))
return token.isdigit() and len(token) <= 6
def get_hotp(secret, intervals_no, as_string=False, casefold=True):
"""
Get HMAC-based one-time password on the basis of given secret and
interval number.
:param secret: the base32-encoded string acting as secret key
:type secret: str or unicode
:param intervals_no: interval number used for getting different tokens, it
is incremented with each use
:type intervals_no: int
:param as_string: True if result should be padded string, False otherwise
:type as_string: bool
:param casefold: True (default), if should accept also lowercase alphabet
:type casefold: bool
:return: generated HOTP token
:rtype: int or str
>>> get_hotp(b'MFRGGZDFMZTWQ2LK', intervals_no=1)
765705
>>> get_hotp(b'MFRGGZDFMZTWQ2LK', intervals_no=2)
816065
>>> result = get_hotp(b'MFRGGZDFMZTWQ2LK', intervals_no=2, as_string=True)
>>> result == b'816065'
True
"""
if isinstance(secret, six.string_types):
# It is unicode, convert it to bytes
secret = secret.encode('utf-8')
try:
key = base64.b32decode(secret, casefold=casefold)
except (TypeError):
raise TypeError('Incorrect secret')
msg = struct.pack('>Q', intervals_no)
hmac_digest = hmac.new(key, msg, hashlib.sha1).digest()
ob = hmac_digest[19] if six.PY3 else ord(hmac_digest[19])
o = ob & 15
token_base = struct.unpack('>I', hmac_digest[o:o + 4])[0] & 0x7fffffff
token = token_base % 1000000
if as_string:
# TODO: should as_string=True return unicode, not bytes?
return six.b('{:06d}'.format(token))
else:
return token
def get_totp(secret, as_string=False):
"""Get time-based one-time password on the basis of given secret and time.
:param secret: the base32-encoded string acting as secret key
:type secret: str
:param as_string: True if result should be padded string, False otherwise
:type as_string: bool
:return: generated TOTP token
:rtype: int or str
>>> get_hotp(b'MFRGGZDFMZTWQ2LK', int(time.time())//30) == \
get_totp(b'MFRGGZDFMZTWQ2LK')
True
>>> get_hotp(b'MFRGGZDFMZTWQ2LK', int(time.time())//30) == \
get_totp(b'MFRGGZDFMZTWQ2LK', as_string=True)
False
"""
interv_no = int(time.time()) // 30
return get_hotp(secret, intervals_no=interv_no, as_string=as_string)
def valid_hotp(token, secret, last=1, trials=1000):
"""Check if given token is valid for given secret. Return interval number
that was successful, or False if not found.
:param token: token being checked
:type token: int or str
:param secret: secret for which token is checked
:type secret: str
:param last: last used interval (start checking with next one)
:type last: int
:param trials: number of intervals to check after 'last'
:type trials: int
:return: interval number, or False if check unsuccessful
:rtype: int or bool
>>> secret = b'MFRGGZDFMZTWQ2LK'
>>> valid_hotp(713385, secret, last=1, trials=5)
4
>>> valid_hotp(865438, secret, last=1, trials=5)
False
>>> valid_hotp(713385, secret, last=4, trials=5)
False
"""
if not _is_possible_token(token):
return False
for i in six.moves.xrange(last + 1, last + trials + 1):
if get_hotp(secret=secret, intervals_no=i) == int(token):
return i
return False
def valid_totp(token, secret):
"""Check if given token is valid time-based one-time password for given
secret.
:param token: token which is being checked
:type token: int or str
:param secret: secret for which the token is being checked
:type secret: str
:return: True, if is valid token, False otherwise
:rtype: bool
>>> secret = b'MFRGGZDFMZTWQ2LK'
>>> token = get_totp(secret)
>>> valid_totp(token, secret)
True
>>> valid_totp(token+1, secret)
False
>>> token = get_totp(secret, as_string=True)
>>> valid_totp(token, secret)
True
>>> valid_totp(token + b'1', secret)
False
"""
return _is_possible_token(token) and int(token) == get_totp(secret)
__all__ = [
'get_hotp',
'get_totp',
'valid_hotp',
'valid_totp'
]
|
mit
|
TeamEOS/external_chromium_org
|
third_party/markdown/extensions/nl2br.py
|
109
|
2625
|
# markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
NL2BR Extension
===============
A Python-Markdown extension to treat newlines as hard breaks; like
GitHub-flavored Markdown does.
Usage:
>>> import markdown
>>> print markdown.markdown('line 1\\nline 2', extensions=['nl2br'])
<p>line 1<br />
line 2</p>
Copyright 2011 [Brian Neal](http://deathofagremmie.com/)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.1+](http://packages.python.org/Markdown/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import SubstituteTagPattern
BR_RE = r'\n'
class Nl2BrExtension(Extension):
def extendMarkdown(self, md, md_globals):
br_tag = SubstituteTagPattern(BR_RE, 'br')
md.inlinePatterns.add('nl', br_tag, '_end')
def makeExtension(configs=None):
return Nl2BrExtension(configs)
|
bsd-3-clause
|
braams/shtoom
|
shtoom/ui/wxui/main.py
|
1
|
9579
|
from wxPython.wx import *
from wxshtoomframe import ShtoomMainFrame
from wxlogframe import LogFrame
from shtoom.ui.base import ShtoomBaseUI
from twisted.python import log
from twisted.internet import reactor, defer
from prefs import PreferencesDialog
import thread
import os
# Implements the ShtoomMainFrame class as generated by
# wxglade. Call me paranoid but I'm not sticking this code
# in a generated file - no matter how bug free and fantastic
# wxglade is at generating code.
# TODO: Hookup the DTMF code for the number buttons
class ShtoomMainFrameImpl(ShtoomMainFrame, ShtoomBaseUI):
# Control IDs
MENU_PREFS = 101
MENU_EXIT = 102
MENU_HELP_CONTENTS = 103
MENU_REGISTER = 104
MENU_ERRORLOG = 105
COMBO_ADDRESS = 150
BUTT_ADVANCED = 201
BUTT_CALL = 202
BUTT_LOOKUP = 203
BUTT_0 = 210
BUTT_1 = 211
BUTT_2 = 212
BUTT_3 = 213
BUTT_4 = 214
BUTT_5 = 215
BUTT_6 = 216
BUTT_7 = 217
BUTT_8 = 218
BUTT_9 = 219
BUTT_STAR = 220
BUTT_HASH = 212
def __init__(self, *args, **kwds):
ShtoomMainFrame.__init__(self, *args, **kwds)
EVT_MENU(self, self.MENU_EXIT, self.DoExit)
EVT_MENU(self, self.MENU_PREFS, self.DoPreferences)
EVT_MENU(self, self.MENU_REGISTER, self.DoRegister)
EVT_MENU(self, self.MENU_ERRORLOG, self.DoErrorLog)
EVT_BUTTON(self, self.BUTT_ADVANCED, self.OnAdvanced)
EVT_BUTTON(self, self.BUTT_CALL, self.OnCall)
EVT_TEXT_ENTER(self, self.COMBO_ADDRESS, self.PlaceCall)
EVT_CLOSE(self, self.DoClose)
# Advanced mode - whether to display the dtmf buttons or not
self.advanced_mode = True
# Call mode - whether the call button places a call or hangs up
self.call_mode = True
# Setup combobox values from previous address history
self.combo_address.Clear()
self.address_history = []
self.loadHistory()
if self.address_history:
self.combo_address.Append("")
[self.combo_address.Append(v) for v in self.address_history]
sizex, sizey = self.GetSize()
self.minx = sizex
# Initialise the status bar
self.SetStatusText(_('Not connected'))
# Startup without the "advanced" functionality showing
# This also restricts the resizing of the window
self.OnAdvanced(None)
# Hookup the error log
# Calculate initial pos for the message log window
posx, posy = self.GetPosition()
self.errorlog = LogFrameImpl(self, -1, _("Debugging"),
pos=(posx+sizex+5,posy))
wxLog_SetActiveTarget(wxLogTextCtrl(self.errorlog.text_errorlog))
self.logger = Logger()
def statusMessage(self, message):
self.SetStatusText(message)
def debugMessage(self, message):
wxLogMessage(message)
def errorMessage(self, message):
wxLogMessage("%s: %s"%(_('ERROR'),message))
def updateCallButton(self, do_call):
if do_call:
self.call_mode = True
self.button_call.SetLabel(_("Call"))
else:
self.call_mode = False
self.button_call.SetLabel(_("Hang Up"))
def OnCall(self, event):
if self.call_mode:
self.PlaceCall(event)
else:
self.HangupCall(event)
def getCurrentAddress(self):
return self.combo_address.GetValue()
def PlaceCall(self, event):
sipURL = self.getCurrentAddress()
if not sipURL.startswith('sip'):
dlg = wxMessageDialog(self,
'%s %s'%(sipURL, _('is a invalid address. The address must begin with "sip".')),
_("Address error"), wxOK)
dlg.ShowModal()
return
# have hang up and call buttons toggle
self.updateCallButton(do_call=False)
self.app.placeCall(sipURL)
def callStarted(self, cookie):
self.cookie = cookie
self.updateCallButton(do_call=False)
def callConnected(self, cookie):
self.updateCallButton(do_call=False)
self.SetStatusText(_("Call Connected"))
# Save the address we connected to. We'll use this to
# pre-populate the address combo on startup
address = self.getCurrentAddress()
if address not in self.address_history:
self.address_history.append(address)
def callDisconnected(self, cookie, message=""):
status = _("Call Disconnected")
if message:
status = "%s: %r"%(status, message)
self.SetStatusText(status)
self.updateCallButton(do_call=True)
self.cookie = None
def callFailed(self, cookie, message=""):
status = _("Call Failed")
if message:
status = "%s: %r"%(status, message)
self.SetStatusText(status)
self.updateCallButton(do_call=True)
self.cookie = None
def HangupCall(self, event):
self.app.dropCall(self.cookie)
self.updateCallButton(do_call=True)
self.SetStatusText(_('Not connected'))
self.cookie = False
def incomingCall(self, description, cookie):
from shtoom.exceptions import CallRejected
dlg = wxMessageDialog(self, 'Incoming Call: %s\nAnswer?'%description,
"Shtoom Call", wxYES_NO|wxICON_QUESTION)
accept = dlg.ShowModal()
if accept == wxID_YES:
self.cookie = cookie
self.updateCallButton(do_call=False)
self.SetStatusText('Connected to %s'%description)
self.app.answerIncomingCall(cookie)
else:
self.app.answerIncomingCall(CallRejected('no thanks', cookie))
def DoErrorLog(self, event):
self.errorlog.Show(True)
def DoRegister(self, event):
dlg = wxMessageDialog(self,
'Re-register by entering new details in the identity preferences.\nContinue registering?',
"Register", wxYES_NO|wxICON_QUESTION)
accept = dlg.ShowModal()
if accept == wxID_YES:
self.app.register()
def DoPreferences(self, event):
dlg = PreferencesDialog(main=self, opts=self.app.getOptions())
val = dlg.ShowModal()
if val == wxID_OK:
dlg.savePreferences(self.app)
def getHistoryFilename(self):
try:
saveDir = os.path.expanduser('~%s'%os.getlogin())
except:
saveDir = os.getcwd()
return os.path.join(saveDir, ".shtoom_history")
def saveHistory(self):
if self.address_history:
hfile = self.getHistoryFilename()
if not os.access(hfile, os.R_OK|os.W_OK):
return
hfp = open(hfile, 'w')
[hfp.write('%s\n'%h) for h in self.address_history]
hfp.close()
def loadHistory(self):
hfile = self.getHistoryFilename()
if not os.access(hfile, os.R_OK|os.W_OK):
return
hfp = open(hfile, 'r')
while 1:
l = hfp.readline()
if not l: break
l = l.strip()
self.address_history.append(l)
hfp.close()
def DoClose(self, event):
# Write out the current address history
self.saveHistory()
# TODO: Move this into the proxy app
reactor.callFromThread(reactor.stop)
self.logger.disable()
self.Destroy()
def DoExit(self, event):
self.Close(True)
def UpdateHeight(self, newheight):
curwidth, curheight = self.GetSize()
self.SetSizeHints(self.minx, newheight, self.minx*2, newheight)
self.SetSize((curwidth, newheight))
self.Show(1)
def debugSize(self):
print "size=",self.GetSize()
def OnAdvanced(self, event):
# Hide the extended interface. Basically the last slot in the
# frames sizer. Modifies the advanced button label. Fixes up window
# sizes
sizer = self.GetSizer()
awidth,aheight = self.advanced.GetSize()
fwidth,fheight = self.GetSize()
if self.advanced_mode:
self.advanced_mode = False
self.advanced.Show(0)
sizer.Show(self.advanced, 0)
self.advheight = aheight
newheight = fheight-aheight
self.button_advanced.SetLabel('+')
self.button_advanced.SetToolTipString("Display extra controls")
else:
self.advanced_mode = 1
self.advanced.Show(1)
sizer.Show(self.advanced, 1)
newheight = fheight+self.advheight
self.button_advanced.SetLabel('-')
self.button_advanced.SetToolTipString("Hide extra controls")
#self.Layout()
#sizer.Layout()
#sizer.Fit(self)
#sizer.SetSizeHints(self)
self.UpdateHeight(newheight)
def getLogger(self):
return self.logger
class LogFrameImpl(LogFrame):
BUTT_CLEAR = 101
BUTT_CLOSE = 102
def __init__(self, *args, **kwargs):
LogFrame.__init__(self, *args, **kwargs)
EVT_BUTTON(self, self.BUTT_CLEAR, self.OnClear)
EVT_BUTTON(self, self.BUTT_CLOSE, self.OnClose)
EVT_CLOSE(self, self.OnClose)
def OnClear(self, event):
self.text_errorlog.Clear()
def OnClose(self, event):
self.Hide()
class Logger:
def __init__(self):
# Disable logging during shutdown
self.enabled = 1
def disable(self):
self.enabled = 0
def flush(self):
pass
def write(self, text):
if self.enabled:
wxLogMessage(text)
|
lgpl-2.1
|
cuilishen/cuilishenMissionPlanner
|
Lib/site-packages/scipy/stats/setup.py
|
51
|
1085
|
#!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('stats', parent_package, top_path)
config.add_data_dir('tests')
config.add_library('statlib',
sources=[join('statlib', '*.f')])
# add statlib module
config.add_extension('statlib',
sources=['statlib.pyf'],
f2py_options=['--no-wrap-functions'],
libraries=['statlib'],
)
# add vonmises_cython module
config.add_extension('vonmises_cython',
sources=['vonmises_cython.c'], # FIXME: use cython source
)
# add futil module
config.add_extension('futil',
sources=['futil.f'],
)
# add mvn module
config.add_extension('mvn',
sources=['mvn.pyf','mvndst.f'],
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
gpl-3.0
|
tbadgu/Barcamp-Bangalore-Android-App
|
gcm_flask/werkzeug/debug/console.py
|
74
|
5465
|
# -*- coding: utf-8 -*-
"""
werkzeug.debug.console
~~~~~~~~~~~~~~~~~~~~~~
Interactive console support.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import sys
import code
from types import CodeType
from werkzeug.utils import escape
from werkzeug.local import Local
from werkzeug.debug.repr import debug_repr, dump, helper
_local = Local()
class HTMLStringO(object):
"""A StringO version that HTML escapes on write."""
def __init__(self):
self._buffer = []
def isatty(self):
return False
def close(self):
pass
def flush(self):
pass
def seek(self, n, mode=0):
pass
def readline(self):
if len(self._buffer) == 0:
return ''
ret = self._buffer[0]
del self._buffer[0]
return ret
def reset(self):
val = ''.join(self._buffer)
del self._buffer[:]
return val
def _write(self, x):
if isinstance(x, str):
x = x.decode('utf-8', 'replace')
self._buffer.append(x)
def write(self, x):
self._write(escape(x))
def writelines(self, x):
self._write(escape(''.join(x)))
class ThreadedStream(object):
"""Thread-local wrapper for sys.stdout for the interactive console."""
def push():
if not isinstance(sys.stdout, ThreadedStream):
sys.stdout = ThreadedStream()
_local.stream = HTMLStringO()
push = staticmethod(push)
def fetch():
try:
stream = _local.stream
except AttributeError:
return ''
return stream.reset()
fetch = staticmethod(fetch)
def displayhook(obj):
try:
stream = _local.stream
except AttributeError:
return _displayhook(obj)
# stream._write bypasses escaping as debug_repr is
# already generating HTML for us.
if obj is not None:
stream._write(debug_repr(obj))
displayhook = staticmethod(displayhook)
def __setattr__(self, name, value):
raise AttributeError('read only attribute %s' % name)
def __dir__(self):
return dir(sys.__stdout__)
def __getattribute__(self, name):
if name == '__members__':
return dir(sys.__stdout__)
try:
stream = _local.stream
except AttributeError:
stream = sys.__stdout__
return getattr(stream, name)
def __repr__(self):
return repr(sys.__stdout__)
# add the threaded stream as display hook
_displayhook = sys.displayhook
sys.displayhook = ThreadedStream.displayhook
class _ConsoleLoader(object):
def __init__(self):
self._storage = {}
def register(self, code, source):
self._storage[id(code)] = source
# register code objects of wrapped functions too.
for var in code.co_consts:
if isinstance(var, CodeType):
self._storage[id(var)] = source
def get_source_by_code(self, code):
try:
return self._storage[id(code)]
except KeyError:
pass
def _wrap_compiler(console):
compile = console.compile
def func(source, filename, symbol):
code = compile(source, filename, symbol)
console.loader.register(code, source)
return code
console.compile = func
class _InteractiveConsole(code.InteractiveInterpreter):
def __init__(self, globals, locals):
code.InteractiveInterpreter.__init__(self, locals)
self.globals = dict(globals)
self.globals['dump'] = dump
self.globals['help'] = helper
self.globals['__loader__'] = self.loader = _ConsoleLoader()
self.more = False
self.buffer = []
_wrap_compiler(self)
def runsource(self, source):
source = source.rstrip() + '\n'
ThreadedStream.push()
prompt = self.more and '... ' or '>>> '
try:
source_to_eval = ''.join(self.buffer + [source])
if code.InteractiveInterpreter.runsource(self,
source_to_eval, '<debugger>', 'single'):
self.more = True
self.buffer.append(source)
else:
self.more = False
del self.buffer[:]
finally:
output = ThreadedStream.fetch()
return prompt + source + output
def runcode(self, code):
try:
exec code in self.globals, self.locals
except Exception:
self.showtraceback()
def showtraceback(self):
from werkzeug.debug.tbtools import get_current_traceback
tb = get_current_traceback(skip=1)
sys.stdout._write(tb.render_summary())
def showsyntaxerror(self, filename=None):
from werkzeug.debug.tbtools import get_current_traceback
tb = get_current_traceback(skip=4)
sys.stdout._write(tb.render_summary())
def write(self, data):
sys.stdout.write(data)
class Console(object):
"""An interactive console."""
def __init__(self, globals=None, locals=None):
if locals is None:
locals = {}
if globals is None:
globals = {}
self._ipy = _InteractiveConsole(globals, locals)
def eval(self, code):
old_sys_stdout = sys.stdout
try:
return self._ipy.runsource(code)
finally:
sys.stdout = old_sys_stdout
|
apache-2.0
|
dcroc16/skunk_works
|
google_appengine/google/appengine/api/search/stub/tokens.py
|
11
|
3103
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Token classes for the Full Text Search API stub."""
from google.appengine.api.search import search_util
class Token(object):
"""Represents a token, usually a word, extracted from some document field."""
def __init__(self, chars=None, position=None, field_name=None):
"""Initializer.
Args:
chars: The string representation of the token.
position: The position of the token in the sequence from the document
field.
field_name: The name of the field the token occured in.
Raises:
TypeError: If an unknown argument is passed.
"""
if isinstance(chars, basestring) and not isinstance(chars, unicode):
chars = unicode(chars, 'utf-8')
self._chars = chars
self._position = position
self._field_name = field_name
@property
def chars(self):
"""Returns a list of fields of the document."""
value = self._chars
if not isinstance(value, basestring):
value = str(self._chars)
if self._field_name:
return self._field_name + ':' + value
return value
@property
def position(self):
"""Returns a list of fields of the document."""
return self._position
def RestrictField(self, field_name):
"""Creates a copy of this Token and sets field_name."""
return Token(chars=self.chars, position=self.position,
field_name=field_name)
def __repr__(self):
return search_util.Repr(
self, [('chars', self.chars), ('position', self.position)])
def __eq__(self, other):
return (isinstance(other, Token) and
self.chars.lower() == other.chars.lower())
def __hash__(self):
return hash(self.chars)
class Quote(Token):
"""Represents a single or double quote in a document field or query."""
def __init__(self, **kwargs):
Token.__init__(self, **kwargs)
class Number(Token):
"""Represents a number in a document field or query."""
def __init__(self, **kwargs):
Token.__init__(self, **kwargs)
class GeoPoint(Token):
"""Represents a geo point in a document field or query."""
def __init__(self, **kwargs):
self._latitude = kwargs.pop('latitude')
self._longitude = kwargs.pop('longitude')
Token.__init__(self, **kwargs)
@property
def latitude(self):
"""Returns the angle between equatorial plan and line thru the geo point."""
return self._latitude
@property
def longitude(self):
"""Returns the angle from a reference meridian to another meridian."""
return self._longitude
|
mit
|
mcanthony/moviepy
|
setup.py
|
11
|
1067
|
#!/usr/bin/env python
# This will try to import setuptools. If not here, it will reach for the embedded
# ez_setup (or the ez_setup package). If none, it fails with a message
try:
from setuptools import setup
except ImportError:
try:
import ez_setup
ez_setup.use_setuptools()
except ImportError:
raise ImportError("MoviePy could not be installed, probably because"
" neither setuptools nor ez_setup are installed on this computer."
"\nInstall ez_setup ([sudo] pip install ez_setup) and try again.")
from setuptools import setup, find_packages
exec(open('moviepy/version.py').read()) # loads __version__
setup(name='moviepy',
version=__version__,
author='Zulko 2015',
description='Video editing with Python',
long_description=open('README.rst').read(),
url='http://zulko.github.io/moviepy/',
license='MIT License',
keywords="video editing audio compositing ffmpeg",
packages= find_packages(exclude='docs'),
install_requires= ['numpy', 'decorator', 'imageio', 'tqdm'])
|
mit
|
technologiescollege/s2a_fr
|
s2a/Python/Lib/doctest.py
|
10
|
104460
|
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters ([email protected]).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if hasattr(package, '__loader__'):
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
with open(filename) as f:
return f.read(), filename
# Use sys.stdout encoding for ouput.
_encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8'
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
If the string `s` is Unicode, it is encoded using the stdout
encoding and the `backslashreplace` error handler.
"""
if isinstance(s, unicode):
s = s.encode(_encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
if not self.buf:
# Reset it to an empty string, to make sure it's not unicode.
self.buf = ''
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self, stdout=out)
# still use input() to get user input
self.use_rawinput = 1
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that precede the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.source == other.source and \
self.want == other.want and \
self.lineno == other.lineno and \
self.indent == other.indent and \
self.options == other.options and \
self.exc_msg == other.exc_msg
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.source, self.want, self.lineno, self.indent,
self.exc_msg))
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.examples == other.examples and \
self.docstring == other.docstring and \
self.globs == other.globs and \
self.name == other.name and \
self.filename == other.filename and \
self.lineno == other.lineno
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.docstring, self.name, self.filename, self.lineno))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.+$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
if module is not None:
# Supply the module globals in case the module was
# originally loaded via a PEP 302 loader and
# file is not a valid filesystem path
source_lines = linecache.getlines(file, module.__dict__)
else:
# No access to a loader, so assume it's a normal
# filesystem path
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__' # provide a default module name
# Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print test.name, '->', runner.run(test)
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'(?:[^:]*\.)?([^:]*:)', example.exc_msg)
m2 = re.match(r'(?:[^:]*\.)?([^:]*:)', exc_msg)
if m1 and m2 and check(m1.group(1), m2.group(1),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>.+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
source = example.source
if isinstance(source, unicode):
source = source.encode('ascii', 'backslashreplace')
return source.splitlines(True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
# Make sure sys.displayhook just prints the value to stdout
save_displayhook = sys.displayhook
sys.displayhook = sys.__displayhook__
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
sys.displayhook = save_displayhook
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return TestResults(totalf, totalt)
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
# Don't print here by default, since doing
# so breaks some of the buildbots
#print "*** DocTestRunner.merge: '" + name + "' in both" \
# " testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
TestResults(failed=0, attempted=1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See help(doctest) for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
if encoding is not None:
text = text.decode(encoding)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.optionflags = optionflags
self.testfinder = DocTestFinder()
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return TestResults(f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return TestResults(f,t)
def rundict(self, d, name, module=None):
import types
m = types.ModuleType(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import types
m = types.ModuleType(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexpected
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
def id(self):
return self._dt_test.name
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._dt_test == other._dt_test and \
self._dt_optionflags == other._dt_optionflags and \
self._dt_setUp == other._dt_setUp and \
self._dt_tearDown == other._dt_tearDown and \
self._dt_checker == other._dt_checker
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown,
self._dt_checker))
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
class SkipDocTestCase(DocTestCase):
def __init__(self, module):
self.module = module
DocTestCase.__init__(self, None)
def setUp(self):
self.skipTest("DocTestSuite will not work with -O2 and above")
def test_skip(self):
pass
def shortDescription(self):
return "Skipping tests from %s" % self.module.__name__
__str__ = shortDescription
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if not tests and sys.flags.optimize >=2:
# Skip doctests when running with -O2
suite = unittest.TestSuite()
suite.addTest(SkipDocTestCase(module))
return suite
elif not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
# It is probably a bug that this exception is not also raised if the
# number of doctest examples in tests is zero (i.e. if no doctest
# examples were found). However, we should probably not be raising
# an exception at all here, though it is too late to make this change
# for a maintenance release. See also issue #14649.
raise ValueError(module, "has no docstrings")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative)
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# If an encoding is specified, use it to convert the file to unicode
if encoding is not None:
doc = doc.decode(encoding)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-']
if not testfiles:
name = os.path.basename(sys.argv[0])
if '__loader__' in globals(): # python -m
name, _ = os.path.splitext(name)
print("usage: {0} [-v] file ...".format(name))
return 2
for filename in testfiles:
if filename.endswith(".py"):
# It is a module -- insert its dir into sys.path and try to
# import it. If it is part of a package, that possibly
# won't work because of package imports.
dirname, filename = os.path.split(filename)
sys.path.insert(0, dirname)
m = __import__(filename[:-3])
del sys.path[0]
failures, _ = testmod(m)
else:
failures, _ = testfile(filename, module_relative=False)
if failures:
return 1
return 0
if __name__ == "__main__":
sys.exit(_test())
|
gpl-3.0
|
2ndQuadrant/ansible
|
lib/ansible/plugins/callback/profile_tasks.py
|
39
|
6614
|
# (C) 2016, Joel, https://github.com/jjshoe
# (C) 2015, Tom Paine, <[email protected]>
# (C) 2014, Jharrod LaFon, @JharrodLaFon
# (C) 2012-2013, Michael DeHaan, <[email protected]>
# (C) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: profile_tasks
type: aggregate
short_description: adds time information to tasks
version_added: "2.0"
description:
- Ansible callback plugin for timing individual tasks and overall execution time.
- "Mashup of 2 excellent original works: https://github.com/jlafon/ansible-profile,
https://github.com/junaid18183/ansible_home/blob/master/ansible_plugins/callback_plugins/timestamp.py.old"
- "Format: C(<task start timestamp> (<length of previous task>) <current elapsed playbook execution time>)"
- It also lists the top/bottom time consuming tasks in the summary (configurable)
- Before 2.4 only the environment variables were available for configuration.
requirements:
- whitelisting in configuration - see examples section below for details.
options:
output_limit:
description: Number of tasks to display in the summary
default: 20
env:
- name: PROFILE_TASKS_TASK_OUTPUT_LIMIT
ini:
- section: callback_profile_tasks
key: task_output_limit
sort_order:
description: Adjust the sorting output of summary tasks
choices: ['descending', 'ascending', 'none']
default: 'descending'
env:
- name: PROFILE_TASKS_SORT_ORDER
ini:
- section: callback_profile_tasks
key: sort_order
'''
EXAMPLES = '''
example: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = profile_tasks
sample output: >
#
# TASK: [ensure messaging security group exists] ********************************
# Thursday 11 June 2017 22:50:53 +0100 (0:00:00.721) 0:00:05.322 *********
# ok: [localhost]
#
# TASK: [ensure db security group exists] ***************************************
# Thursday 11 June 2017 22:50:54 +0100 (0:00:00.558) 0:00:05.880 *********
# changed: [localhost]
#
'''
import collections
import time
from ansible.module_utils.six.moves import reduce
from ansible.plugins.callback import CallbackBase
# define start time
t0 = tn = time.time()
def secondsToStr(t):
# http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds
def rediv(ll, b):
return list(divmod(ll[0], b)) + ll[1:]
return "%d:%02d:%02d.%03d" % tuple(reduce(rediv, [[t * 1000, ], 1000, 60, 60]))
def filled(msg, fchar="*"):
if len(msg) == 0:
width = 79
else:
msg = "%s " % msg
width = 79 - len(msg)
if width < 3:
width = 3
filler = fchar * width
return "%s%s " % (msg, filler)
def timestamp(self):
if self.current is not None:
self.stats[self.current]['time'] = time.time() - self.stats[self.current]['time']
def tasktime():
global tn
time_current = time.strftime('%A %d %B %Y %H:%M:%S %z')
time_elapsed = secondsToStr(time.time() - tn)
time_total_elapsed = secondsToStr(time.time() - t0)
tn = time.time()
return filled('%s (%s)%s%s' % (time_current, time_elapsed, ' ' * 7, time_total_elapsed))
class CallbackModule(CallbackBase):
"""
This callback module provides per-task timing, ongoing playbook elapsed time
and ordered list of top 20 longest running tasks at end.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'profile_tasks'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
self.stats = collections.OrderedDict()
self.current = None
self.sort_order = None
self.task_output_limit = None
super(CallbackModule, self).__init__()
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.sort_order = self.get_option('sort_order')
if self.sort_order is not None:
if self.sort_order == 'ascending':
self.sort_order = False
elif self.sort_order == 'descending':
self.sort_order = True
elif self.sort_order == 'none':
self.sort_order = None
self.task_output_limit = self.get_option('output_limit')
if self.task_output_limit is not None:
if self.task_output_limit == 'all':
self.task_output_limit = None
else:
self.task_output_limit = int(self.task_output_limit)
def _record_task(self, task):
"""
Logs the start of each task
"""
self._display.display(tasktime())
timestamp(self)
# Record the start time of the current task
self.current = task._uuid
self.stats[self.current] = {'time': time.time(), 'name': task.get_name()}
if self._display.verbosity >= 2:
self.stats[self.current]['path'] = task.get_path()
def v2_playbook_on_task_start(self, task, is_conditional):
self._record_task(task)
def v2_playbook_on_handler_task_start(self, task):
self._record_task(task)
def playbook_on_setup(self):
self._display.display(tasktime())
def playbook_on_stats(self, stats):
self._display.display(tasktime())
self._display.display(filled("", fchar="="))
timestamp(self)
self.current = None
results = self.stats.items()
# Sort the tasks by the specified sort
if self.sort_order is not None:
results = sorted(
self.stats.items(),
key=lambda x: x[1]['time'],
reverse=self.sort_order,
)
# Display the number of tasks specified or the default of 20
results = results[:self.task_output_limit]
# Print the timings
for uuid, result in results:
msg = u"{0:-<{2}}{1:->9}".format(result['name'] + u' ', u' {0:.02f}s'.format(result['time']), self._display.columns - 9)
if 'path' in result:
msg += u"\n{0:-<{1}}".format(result['path'] + u' ', self._display.columns)
self._display.display(msg)
|
gpl-3.0
|
swtp1v07/Savu
|
savu/plugins/base_recon.py
|
1
|
3442
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: base_recon
:platform: Unix
:synopsis: A simple implementation a reconstruction routine for testing
purposes
.. moduleauthor:: Mark Basham <[email protected]>
"""
import logging
from savu.data.structures import ProjectionData, VolumeData
from savu.plugins.plugin import Plugin
from savu.core.utils import logmethod
import numpy as np
class BaseRecon(Plugin):
"""
A Plugin to apply a simple reconstruction with no dependancies
:param center_of_rotation: Center of rotation to use for the reconstruction). Default: 86.
"""
count = 0
def __init__(self, name='BaseRecon'):
super(BaseRecon, self).__init__(name)
def reconstruct(self, sinogram, centre_of_rotation, angles, shape, center):
"""
Reconstruct a single sinogram with the provided center of rotation
"""
logging.error("reconstruct needs to be implemented")
raise NotImplementedError("reconstruct " +
"needs to be implemented")
@logmethod
def process(self, data, output, processes, process):
"""
"""
if data.center_of_rotation is None:
centre_of_rotation = np.ones(data.get_number_of_sinograms())
centre_of_rotation = centre_of_rotation * self.parameters['center_of_rotation']
else :
centre_of_rotation = data.center_of_rotation[:]
if centre_of_rotation is None:
centre_of_rotation = np.ones(data.get_number_of_sinograms())
centre_of_rotation = centre_of_rotation * self.parameters['center_of_rotation']
sinogram_frames = np.arange(data.get_number_of_sinograms())
frames = np.array_split(sinogram_frames, len(processes))[process]
centre_of_rotations =\
np.array_split(centre_of_rotation, len(processes))[process]
angles = data.rotation_angle.data[:]
for i in range(len(frames)):
frame_centre_of_rotation = centre_of_rotations[i]
sinogram = data.data[:, frames[i], :]
reconstruction = \
self.reconstruct(sinogram, frame_centre_of_rotation, angles,
(output.data.shape[0], output.data.shape[2]),
(output.data.shape[0]/2,
output.data.shape[2]/2))
output.data[:, frames[i], :] = reconstruction
self.count+=1
print self.count
def required_data_type(self):
"""
The input for this plugin is ProjectionData
:returns: ProjectionData
"""
return ProjectionData
def output_data_type(self):
"""
The output of this plugin is VolumeData
:returns: VolumeData
"""
return VolumeData
|
apache-2.0
|
Toshakins/wagtail
|
wagtail/wagtailimages/fields.py
|
9
|
4197
|
from __future__ import absolute_import, unicode_literals
import os
from django.conf import settings
from django.core.exceptions import ValidationError
from django.forms.fields import ImageField
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
from PIL import Image
ALLOWED_EXTENSIONS = ['gif', 'jpg', 'jpeg', 'png']
SUPPORTED_FORMATS_TEXT = _("GIF, JPEG, PNG")
class WagtailImageField(ImageField):
def __init__(self, *args, **kwargs):
super(WagtailImageField, self).__init__(*args, **kwargs)
# Get max upload size from settings
self.max_upload_size = getattr(settings, 'WAGTAILIMAGES_MAX_UPLOAD_SIZE', 10 * 1024 * 1024)
max_upload_size_text = filesizeformat(self.max_upload_size)
# Help text
if self.max_upload_size is not None:
self.help_text = _(
"Supported formats: %(supported_formats)s. Maximum filesize: %(max_upload_size)s."
) % {
'supported_formats': SUPPORTED_FORMATS_TEXT,
'max_upload_size': max_upload_size_text,
}
else:
self.help_text = _(
"Supported formats: %(supported_formats)s."
) % {
'supported_formats': SUPPORTED_FORMATS_TEXT,
}
# Error messages
self.error_messages['invalid_image'] = _(
"Not a supported image format. Supported formats: %s."
) % SUPPORTED_FORMATS_TEXT
self.error_messages['invalid_image_known_format'] = _(
"Not a valid %s image."
)
self.error_messages['file_too_large'] = _(
"This file is too big (%%s). Maximum filesize %s."
) % max_upload_size_text
self.error_messages['file_too_large_unknown_size'] = _(
"This file is too big. Maximum filesize %s."
) % max_upload_size_text
def check_image_file_format(self, f):
# Check file extension
extension = os.path.splitext(f.name)[1].lower()[1:]
if extension not in ALLOWED_EXTENSIONS:
raise ValidationError(self.error_messages['invalid_image'], code='invalid_image')
if hasattr(f, 'image'):
# Django 1.8 annotates the file object with the PIL image
image = f.image
elif not f.closed:
# Open image file
file_position = f.tell()
f.seek(0)
try:
image = Image.open(f)
except IOError:
# Uploaded file is not even an image file (or corrupted)
raise ValidationError(self.error_messages['invalid_image_known_format'],
code='invalid_image_known_format')
f.seek(file_position)
else:
# Couldn't get the PIL image, skip checking the internal file format
return
image_format = extension.upper()
if image_format == 'JPG':
image_format = 'JPEG'
internal_image_format = image.format.upper()
if internal_image_format == 'MPO':
internal_image_format = 'JPEG'
# Check that the internal format matches the extension
# It is possible to upload PSD files if their extension is set to jpg, png or gif. This should catch them out
if internal_image_format != image_format:
raise ValidationError(self.error_messages['invalid_image_known_format'] % (
image_format,
), code='invalid_image_known_format')
def check_image_file_size(self, f):
# Upload size checking can be disabled by setting max upload size to None
if self.max_upload_size is None:
return
# Check the filesize
if f.size > self.max_upload_size:
raise ValidationError(self.error_messages['file_too_large'] % (
filesizeformat(f.size),
), code='file_too_large')
def to_python(self, data):
f = super(WagtailImageField, self).to_python(data)
if f is not None:
self.check_image_file_size(f)
self.check_image_file_format(f)
return f
|
bsd-3-clause
|
akumar21NCSU/servo
|
tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/mux.py
|
636
|
71218
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides classes and helper functions for multiplexing extension.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06
"""
import collections
import copy
import email
import email.parser
import logging
import math
import struct
import threading
import traceback
from mod_pywebsocket import common
from mod_pywebsocket import handshake
from mod_pywebsocket import util
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_hybi import Frame
from mod_pywebsocket._stream_hybi import Stream
from mod_pywebsocket._stream_hybi import StreamOptions
from mod_pywebsocket._stream_hybi import create_binary_frame
from mod_pywebsocket._stream_hybi import create_closing_handshake_body
from mod_pywebsocket._stream_hybi import create_header
from mod_pywebsocket._stream_hybi import create_length_header
from mod_pywebsocket._stream_hybi import parse_frame
from mod_pywebsocket.handshake import hybi
_CONTROL_CHANNEL_ID = 0
_DEFAULT_CHANNEL_ID = 1
_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
_MUX_OPCODE_FLOW_CONTROL = 2
_MUX_OPCODE_DROP_CHANNEL = 3
_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
_MAX_CHANNEL_ID = 2 ** 29 - 1
_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64
_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024
_HANDSHAKE_ENCODING_IDENTITY = 0
_HANDSHAKE_ENCODING_DELTA = 1
# We need only these status code for now.
_HTTP_BAD_RESPONSE_MESSAGES = {
common.HTTP_STATUS_BAD_REQUEST: 'Bad Request',
}
# DropChannel reason code
# TODO(bashi): Define all reason code defined in -05 draft.
_DROP_CODE_NORMAL_CLOSURE = 1000
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001
_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003
_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 2010
_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
_DROP_CODE_SEND_QUOTA_OVERFLOW = 3006
_DROP_CODE_ACKNOWLEDGED = 3008
_DROP_CODE_BAD_FRAGMENTATION = 3009
class MuxUnexpectedException(Exception):
"""Exception in handling multiplexing extension."""
pass
# Temporary
class MuxNotImplementedException(Exception):
"""Raised when a flow enters unimplemented code path."""
pass
class LogicalConnectionClosedException(Exception):
"""Raised when logical connection is gracefully closed."""
pass
class PhysicalConnectionError(Exception):
"""Raised when there is a physical connection error."""
def __init__(self, drop_code, message=''):
super(PhysicalConnectionError, self).__init__(
'code=%d, message=%r' % (drop_code, message))
self.drop_code = drop_code
self.message = message
class LogicalChannelError(Exception):
"""Raised when there is a logical channel error."""
def __init__(self, channel_id, drop_code, message=''):
super(LogicalChannelError, self).__init__(
'channel_id=%d, code=%d, message=%r' % (
channel_id, drop_code, message))
self.channel_id = channel_id
self.drop_code = drop_code
self.message = message
def _encode_channel_id(channel_id):
if channel_id < 0:
raise ValueError('Channel id %d must not be negative' % channel_id)
if channel_id < 2 ** 7:
return chr(channel_id)
if channel_id < 2 ** 14:
return struct.pack('!H', 0x8000 + channel_id)
if channel_id < 2 ** 21:
first = chr(0xc0 + (channel_id >> 16))
return first + struct.pack('!H', channel_id & 0xffff)
if channel_id < 2 ** 29:
return struct.pack('!L', 0xe0000000 + channel_id)
raise ValueError('Channel id %d is too large' % channel_id)
def _encode_number(number):
return create_length_header(number, False)
def _create_add_channel_response(channel_id, encoded_handshake,
encoding=0, rejected=False):
if encoding != 0 and encoding != 1:
raise ValueError('Invalid encoding %d' % encoding)
first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) |
(rejected << 4) | encoding)
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(len(encoded_handshake)) +
encoded_handshake)
return block
def _create_drop_channel(channel_id, code=None, message=''):
if len(message) > 0 and code is None:
raise ValueError('Code must be specified if message is specified')
first_byte = _MUX_OPCODE_DROP_CHANNEL << 5
block = chr(first_byte) + _encode_channel_id(channel_id)
if code is None:
block += _encode_number(0) # Reason size
else:
reason = struct.pack('!H', code) + message
reason_size = _encode_number(len(reason))
block += reason_size + reason
return block
def _create_flow_control(channel_id, replenished_quota):
first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(replenished_quota))
return block
def _create_new_channel_slot(slots, send_quota):
if slots < 0 or send_quota < 0:
raise ValueError('slots and send_quota must be non-negative.')
first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
block = (chr(first_byte) +
_encode_number(slots) +
_encode_number(send_quota))
return block
def _create_fallback_new_channel_slot():
first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
return block
def _parse_request_text(request_text):
request_line, header_lines = request_text.split('\r\n', 1)
words = request_line.split(' ')
if len(words) != 3:
raise ValueError('Bad Request-Line syntax %r' % request_line)
[command, path, version] = words
if version != 'HTTP/1.1':
raise ValueError('Bad request version %r' % version)
# email.parser.Parser() parses RFC 2822 (RFC 822) style headers.
# RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers
# RFC 822.
headers = email.parser.Parser().parsestr(header_lines)
return command, path, version, headers
class _ControlBlock(object):
"""A structure that holds parsing result of multiplexing control block.
Control block specific attributes will be added by _MuxFramePayloadParser.
(e.g. encoded_handshake will be added for AddChannelRequest and
AddChannelResponse)
"""
def __init__(self, opcode):
self.opcode = opcode
class _MuxFramePayloadParser(object):
"""A class that parses multiplexed frame payload."""
def __init__(self, payload):
self._data = payload
self._read_position = 0
self._logger = util.get_class_logger(self)
def read_channel_id(self):
"""Reads channel id.
Raises:
ValueError: when the payload doesn't contain
valid channel id.
"""
remaining_length = len(self._data) - self._read_position
pos = self._read_position
if remaining_length == 0:
raise ValueError('Invalid channel id format')
channel_id = ord(self._data[pos])
channel_id_length = 1
if channel_id & 0xe0 == 0xe0:
if remaining_length < 4:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!L',
self._data[pos:pos+4])[0] & 0x1fffffff
channel_id_length = 4
elif channel_id & 0xc0 == 0xc0:
if remaining_length < 3:
raise ValueError('Invalid channel id format')
channel_id = (((channel_id & 0x1f) << 16) +
struct.unpack('!H', self._data[pos+1:pos+3])[0])
channel_id_length = 3
elif channel_id & 0x80 == 0x80:
if remaining_length < 2:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!H',
self._data[pos:pos+2])[0] & 0x3fff
channel_id_length = 2
self._read_position += channel_id_length
return channel_id
def read_inner_frame(self):
"""Reads an inner frame.
Raises:
PhysicalConnectionError: when the inner frame is invalid.
"""
if len(self._data) == self._read_position:
raise PhysicalConnectionError(
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED)
bits = ord(self._data[self._read_position])
self._read_position += 1
fin = (bits & 0x80) == 0x80
rsv1 = (bits & 0x40) == 0x40
rsv2 = (bits & 0x20) == 0x20
rsv3 = (bits & 0x10) == 0x10
opcode = bits & 0xf
payload = self.remaining_data()
# Consume rest of the message which is payload data of the original
# frame.
self._read_position = len(self._data)
return fin, rsv1, rsv2, rsv3, opcode, payload
def _read_number(self):
if self._read_position + 1 > len(self._data):
raise ValueError(
'Cannot read the first byte of number field')
number = ord(self._data[self._read_position])
if number & 0x80 == 0x80:
raise ValueError(
'The most significant bit of the first byte of number should '
'be unset')
self._read_position += 1
pos = self._read_position
if number == 127:
if pos + 8 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 8
number = struct.unpack('!Q', self._data[pos:pos+8])[0]
if number > 0x7FFFFFFFFFFFFFFF:
raise ValueError('Encoded number(%d) >= 2^63' % number)
if number <= 0xFFFF:
raise ValueError(
'%d should not be encoded by 9 bytes encoding' % number)
return number
if number == 126:
if pos + 2 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 2
number = struct.unpack('!H', self._data[pos:pos+2])[0]
if number <= 125:
raise ValueError(
'%d should not be encoded by 3 bytes encoding' % number)
return number
def _read_size_and_contents(self):
"""Reads data that consists of followings:
- the size of the contents encoded the same way as payload length
of the WebSocket Protocol with 1 bit padding at the head.
- the contents.
"""
try:
size = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
pos = self._read_position
if pos + size > len(self._data):
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Cannot read %d bytes data' % size)
self._read_position += size
return self._data[pos:pos+size]
def _read_add_channel_request(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x7
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
# Invalid encoding will be handled by MuxHandler.
encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoding = encoding
encoded_handshake = self._read_size_and_contents()
control_block.encoded_handshake = encoded_handshake
return control_block
def _read_add_channel_response(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x3
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.accepted = (first_byte >> 4) & 1
control_block.encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoded_handshake = self._read_size_and_contents()
return control_block
def _read_flow_control(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def _read_drop_channel(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
reason = self._read_size_and_contents()
if len(reason) == 0:
control_block.drop_code = None
control_block.drop_message = ''
elif len(reason) >= 2:
control_block.drop_code = struct.unpack('!H', reason[:2])[0]
control_block.drop_message = reason[2:]
else:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received DropChannel that conains only 1-byte reason')
return control_block
def _read_new_channel_slot(self, first_byte, control_block):
reserved = first_byte & 0x1e
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.fallback = first_byte & 1
try:
control_block.slots = self._read_number()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def read_control_blocks(self):
"""Reads control block(s).
Raises:
PhysicalConnectionError: when the payload contains invalid control
block(s).
StopIteration: when no control blocks left.
"""
while self._read_position < len(self._data):
first_byte = ord(self._data[self._read_position])
self._read_position += 1
opcode = (first_byte >> 5) & 0x7
control_block = _ControlBlock(opcode=opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
yield self._read_add_channel_request(first_byte, control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
yield self._read_add_channel_response(
first_byte, control_block)
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
yield self._read_flow_control(first_byte, control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
yield self._read_drop_channel(first_byte, control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
yield self._read_new_channel_slot(first_byte, control_block)
else:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_MUX_OPCODE,
'Invalid opcode %d' % opcode)
assert self._read_position == len(self._data)
raise StopIteration
def remaining_data(self):
"""Returns remaining data."""
return self._data[self._read_position:]
class _LogicalRequest(object):
"""Mimics mod_python request."""
def __init__(self, channel_id, command, path, protocol, headers,
connection):
"""Constructs an instance.
Args:
channel_id: the channel id of the logical channel.
command: HTTP request command.
path: HTTP request path.
headers: HTTP headers.
connection: _LogicalConnection instance.
"""
self.channel_id = channel_id
self.method = command
self.uri = path
self.protocol = protocol
self.headers_in = headers
self.connection = connection
self.server_terminated = False
self.client_terminated = False
def is_https(self):
"""Mimics request.is_https(). Returns False because this method is
used only by old protocols (hixie and hybi00).
"""
return False
class _LogicalConnection(object):
"""Mimics mod_python mp_conn."""
# For details, see the comment of set_read_state().
STATE_ACTIVE = 1
STATE_GRACEFULLY_CLOSED = 2
STATE_TERMINATED = 3
def __init__(self, mux_handler, channel_id):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
channel_id: channel id of this connection.
"""
self._mux_handler = mux_handler
self._channel_id = channel_id
self._incoming_data = ''
# - Protects _waiting_write_completion
# - Signals the thread waiting for completion of write by mux handler
self._write_condition = threading.Condition()
self._waiting_write_completion = False
self._read_condition = threading.Condition()
self._read_state = self.STATE_ACTIVE
def get_local_addr(self):
"""Getter to mimic mp_conn.local_addr."""
return self._mux_handler.physical_connection.get_local_addr()
local_addr = property(get_local_addr)
def get_remote_addr(self):
"""Getter to mimic mp_conn.remote_addr."""
return self._mux_handler.physical_connection.get_remote_addr()
remote_addr = property(get_remote_addr)
def get_memorized_lines(self):
"""Gets memorized lines. Not supported."""
raise MuxUnexpectedException('_LogicalConnection does not support '
'get_memorized_lines')
def write(self, data):
"""Writes data. mux_handler sends data asynchronously. The caller will
be suspended until write done.
Args:
data: data to be written.
Raises:
MuxUnexpectedException: when called before finishing the previous
write.
"""
try:
self._write_condition.acquire()
if self._waiting_write_completion:
raise MuxUnexpectedException(
'Logical connection %d is already waiting the completion '
'of write' % self._channel_id)
self._waiting_write_completion = True
self._mux_handler.send_data(self._channel_id, data)
self._write_condition.wait()
# TODO(tyoshino): Raise an exception if woke up by on_writer_done.
finally:
self._write_condition.release()
def write_control_data(self, data):
"""Writes data via the control channel. Don't wait finishing write
because this method can be called by mux dispatcher.
Args:
data: data to be written.
"""
self._mux_handler.send_control_data(data)
def on_write_data_done(self):
"""Called when sending data is completed."""
try:
self._write_condition.acquire()
if not self._waiting_write_completion:
raise MuxUnexpectedException(
'Invalid call of on_write_data_done for logical '
'connection %d' % self._channel_id)
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def on_writer_done(self):
"""Called by the mux handler when the writer thread has finished."""
try:
self._write_condition.acquire()
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def append_frame_data(self, frame_data):
"""Appends incoming frame data. Called when mux_handler dispatches
frame data to the corresponding application.
Args:
frame_data: incoming frame data.
"""
self._read_condition.acquire()
self._incoming_data += frame_data
self._read_condition.notify()
self._read_condition.release()
def read(self, length):
"""Reads data. Blocks until enough data has arrived via physical
connection.
Args:
length: length of data to be read.
Raises:
LogicalConnectionClosedException: when closing handshake for this
logical channel has been received.
ConnectionTerminatedException: when the physical connection has
closed, or an error is caused on the reader thread.
"""
self._read_condition.acquire()
while (self._read_state == self.STATE_ACTIVE and
len(self._incoming_data) < length):
self._read_condition.wait()
try:
if self._read_state == self.STATE_GRACEFULLY_CLOSED:
raise LogicalConnectionClosedException(
'Logical channel %d has closed.' % self._channel_id)
elif self._read_state == self.STATE_TERMINATED:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Logical channel (%d) closed' %
(length, self._channel_id))
value = self._incoming_data[:length]
self._incoming_data = self._incoming_data[length:]
finally:
self._read_condition.release()
return value
def set_read_state(self, new_state):
"""Sets the state of this connection. Called when an event for this
connection has occurred.
Args:
new_state: state to be set. new_state must be one of followings:
- STATE_GRACEFULLY_CLOSED: when closing handshake for this
connection has been received.
- STATE_TERMINATED: when the physical connection has closed or
DropChannel of this connection has received.
"""
self._read_condition.acquire()
self._read_state = new_state
self._read_condition.notify()
self._read_condition.release()
class _InnerMessage(object):
"""Holds the result of _InnerMessageBuilder.build().
"""
def __init__(self, opcode, payload):
self.opcode = opcode
self.payload = payload
class _InnerMessageBuilder(object):
"""A class that holds the context of inner message fragmentation and
builds a message from fragmented inner frame(s).
"""
def __init__(self):
self._control_opcode = None
self._pending_control_fragments = []
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
def _handle_first(self, frame):
if frame.opcode == common.OPCODE_CONTINUATION:
raise InvalidFrameException('Sending invalid continuation opcode')
if common.is_control_opcode(frame.opcode):
return self._process_first_fragmented_control(frame)
else:
return self._process_first_fragmented_message(frame)
def _process_first_fragmented_control(self, frame):
self._control_opcode = frame.opcode
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_control
return None
return self._reassemble_fragmented_control()
def _process_first_fragmented_message(self, frame):
self._message_opcode = frame.opcode
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_message
return None
return self._reassemble_fragmented_message()
def _handle_fragmented_control(self, frame):
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented control '
'message' % frame.opcode)
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_control()
def _reassemble_fragmented_control(self):
opcode = self._control_opcode
payload = ''.join(self._pending_control_fragments)
self._control_opcode = None
self._pending_control_fragments = []
if self._message_opcode is not None:
self._frame_handler = self._handle_fragmented_message
else:
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def _handle_fragmented_message(self, frame):
# Sender can interleave a control message while sending fragmented
# messages.
if common.is_control_opcode(frame.opcode):
if self._control_opcode is not None:
raise MuxUnexpectedException(
'Should not reach here(Bug in builder)')
return self._process_first_fragmented_control(frame)
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented message' %
frame.opcode)
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_message()
def _reassemble_fragmented_message(self):
opcode = self._message_opcode
payload = ''.join(self._pending_message_fragments)
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def build(self, frame):
"""Build an inner message. Returns an _InnerMessage instance when
the given frame is the last fragmented frame. Returns None otherwise.
Args:
frame: an inner frame.
Raises:
InvalidFrameException: when received invalid opcode. (e.g.
receiving non continuation data opcode but the fin flag of
the previous inner frame was not set.)
"""
return self._frame_handler(frame)
class _LogicalStream(Stream):
"""Mimics the Stream class. This class interprets multiplexed WebSocket
frames.
"""
def __init__(self, request, stream_options, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
stream_options: StreamOptions instance.
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
# Physical stream is responsible for masking.
stream_options.unmask_receive = False
Stream.__init__(self, request, stream_options)
self._send_closed = False
self._send_quota = send_quota
# - Protects _send_closed and _send_quota
# - Signals the thread waiting for send quota replenished
self._send_condition = threading.Condition()
# The opcode of the first frame in messages.
self._message_opcode = common.OPCODE_TEXT
# True when the last message was fragmented.
self._last_message_was_fragmented = False
self._receive_quota = receive_quota
self._write_inner_frame_semaphore = threading.Semaphore()
self._inner_message_builder = _InnerMessageBuilder()
def _create_inner_frame(self, opcode, payload, end=True):
frame = Frame(fin=end, opcode=opcode, payload=payload)
for frame_filter in self._options.outgoing_frame_filters:
frame_filter.filter(frame)
if len(payload) != len(frame.payload):
raise MuxUnexpectedException(
'Mux extension must not be used after extensions which change '
' frame boundary')
first_byte = ((frame.fin << 7) | (frame.rsv1 << 6) |
(frame.rsv2 << 5) | (frame.rsv3 << 4) | frame.opcode)
return chr(first_byte) + frame.payload
def _write_inner_frame(self, opcode, payload, end=True):
payload_length = len(payload)
write_position = 0
try:
# An inner frame will be fragmented if there is no enough send
# quota. This semaphore ensures that fragmented inner frames are
# sent in order on the logical channel.
# Note that frames that come from other logical channels or
# multiplexing control blocks can be inserted between fragmented
# inner frames on the physical channel.
self._write_inner_frame_semaphore.acquire()
# Consume an octet quota when this is the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self._request.channel_id)
self._send_quota -= 1
finally:
self._send_condition.release()
while write_position < payload_length:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._logger.debug(
'No quota. Waiting FlowControl message for %d.' %
self._request.channel_id)
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self.request._channel_id)
remaining = payload_length - write_position
write_length = min(self._send_quota, remaining)
inner_frame_end = (
end and
(write_position + write_length == payload_length))
inner_frame = self._create_inner_frame(
opcode,
payload[write_position:write_position+write_length],
inner_frame_end)
self._send_quota -= write_length
self._logger.debug('Consumed quota=%d, remaining=%d' %
(write_length, self._send_quota))
finally:
self._send_condition.release()
# Writing data will block the worker so we need to release
# _send_condition before writing.
self._logger.debug('Sending inner frame: %r' % inner_frame)
self._request.connection.write(inner_frame)
write_position += write_length
opcode = common.OPCODE_CONTINUATION
except ValueError, e:
raise BadOperationException(e)
finally:
self._write_inner_frame_semaphore.release()
def replenish_send_quota(self, send_quota):
"""Replenish send quota."""
try:
self._send_condition.acquire()
if self._send_quota + send_quota > 0x7FFFFFFFFFFFFFFF:
self._send_quota = 0
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_SEND_QUOTA_OVERFLOW)
self._send_quota += send_quota
self._logger.debug('Replenished send quota for channel id %d: %d' %
(self._request.channel_id, self._send_quota))
finally:
self._send_condition.notify()
self._send_condition.release()
def consume_receive_quota(self, amount):
"""Consumes receive quota. Returns False on failure."""
if self._receive_quota < amount:
self._logger.debug('Violate quota on channel id %d: %d < %d' %
(self._request.channel_id,
self._receive_quota, amount))
return False
self._receive_quota -= amount
return True
def send_message(self, message, end=True, binary=False):
"""Override Stream.send_message."""
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
if binary and isinstance(message, unicode):
raise BadOperationException(
'Message for binary frame must be instance of str')
if binary:
opcode = common.OPCODE_BINARY
else:
opcode = common.OPCODE_TEXT
message = message.encode('utf-8')
for message_filter in self._options.outgoing_message_filters:
message = message_filter.filter(message, end, binary)
if self._last_message_was_fragmented:
if opcode != self._message_opcode:
raise BadOperationException('Message types are different in '
'frames for the same message')
opcode = common.OPCODE_CONTINUATION
else:
self._message_opcode = opcode
self._write_inner_frame(opcode, message, end)
self._last_message_was_fragmented = not end
def _receive_frame(self):
"""Overrides Stream._receive_frame.
In addition to call Stream._receive_frame, this method adds the amount
of payload to receiving quota and sends FlowControl to the client.
We need to do it here because Stream.receive_message() handles
control frames internally.
"""
opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
amount = len(payload)
# Replenish extra one octet when receiving the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
amount += 1
self._receive_quota += amount
frame_data = _create_flow_control(self._request.channel_id,
amount)
self._logger.debug('Sending flow control for %d, replenished=%d' %
(self._request.channel_id, amount))
self._request.connection.write_control_data(frame_data)
return opcode, payload, fin, rsv1, rsv2, rsv3
def _get_message_from_frame(self, frame):
"""Overrides Stream._get_message_from_frame.
"""
try:
inner_message = self._inner_message_builder.build(frame)
except InvalidFrameException:
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_BAD_FRAGMENTATION)
if inner_message is None:
return None
self._original_opcode = inner_message.opcode
return inner_message.payload
def receive_message(self):
"""Overrides Stream.receive_message."""
# Just call Stream.receive_message(), but catch
# LogicalConnectionClosedException, which is raised when the logical
# connection has closed gracefully.
try:
return Stream.receive_message(self)
except LogicalConnectionClosedException, e:
self._logger.debug('%s', e)
return None
def _send_closing_handshake(self, code, reason):
"""Overrides Stream._send_closing_handshake."""
body = create_closing_handshake_body(code, reason)
self._logger.debug('Sending closing handshake for %d: (%r, %r)' %
(self._request.channel_id, code, reason))
self._write_inner_frame(common.OPCODE_CLOSE, body, end=True)
self._request.server_terminated = True
def send_ping(self, body=''):
"""Overrides Stream.send_ping"""
self._logger.debug('Sending ping on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PING, body, end=True)
self._ping_queue.append(body)
def _send_pong(self, body):
"""Overrides Stream._send_pong"""
self._logger.debug('Sending pong on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PONG, body, end=True)
def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
"""Overrides Stream.close_connection."""
# TODO(bashi): Implement
self._logger.debug('Closing logical connection %d' %
self._request.channel_id)
self._request.server_terminated = True
def stop_sending(self):
"""Stops accepting new send operation (_write_inner_frame)."""
self._send_condition.acquire()
self._send_closed = True
self._send_condition.notify()
self._send_condition.release()
class _OutgoingData(object):
"""A structure that holds data to be sent via physical connection and
origin of the data.
"""
def __init__(self, channel_id, data):
self.channel_id = channel_id
self.data = data
class _PhysicalConnectionWriter(threading.Thread):
"""A thread that is responsible for writing data to physical connection.
TODO(bashi): Make sure there is no thread-safety problem when the reader
thread reads data from the same socket at a time.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
# When set, make this thread stop accepting new data, flush pending
# data and exit.
self._stop_requested = False
# The close code of the physical connection.
self._close_code = common.STATUS_NORMAL_CLOSURE
# Deque for passing write data. It's protected by _deque_condition
# until _stop_requested is set.
self._deque = collections.deque()
# - Protects _deque, _stop_requested and _close_code
# - Signals threads waiting for them to be available
self._deque_condition = threading.Condition()
def put_outgoing_data(self, data):
"""Puts outgoing data.
Args:
data: _OutgoingData instance.
Raises:
BadOperationException: when the thread has been requested to
terminate.
"""
try:
self._deque_condition.acquire()
if self._stop_requested:
raise BadOperationException('Cannot write data anymore')
self._deque.append(data)
self._deque_condition.notify()
finally:
self._deque_condition.release()
def _write_data(self, outgoing_data):
message = (_encode_channel_id(outgoing_data.channel_id) +
outgoing_data.data)
try:
self._mux_handler.physical_stream.send_message(
message=message, end=True, binary=True)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._mux_handler.physical_connection.remote_addr,), e)
raise
# TODO(bashi): It would be better to block the thread that sends
# control data as well.
if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
self._mux_handler.notify_write_data_done(outgoing_data.channel_id)
def run(self):
try:
self._deque_condition.acquire()
while not self._stop_requested:
if len(self._deque) == 0:
self._deque_condition.wait()
continue
outgoing_data = self._deque.popleft()
self._deque_condition.release()
self._write_data(outgoing_data)
self._deque_condition.acquire()
# Flush deque.
#
# At this point, self._deque_condition is always acquired.
try:
while len(self._deque) > 0:
outgoing_data = self._deque.popleft()
self._write_data(outgoing_data)
finally:
self._deque_condition.release()
# Close physical connection.
try:
# Don't wait the response here. The response will be read
# by the reader thread.
self._mux_handler.physical_stream.close_connection(
self._close_code, wait_response=False)
except Exception, e:
util.prepend_message_to_exception(
'Failed to close the physical connection: %r' % e)
raise
finally:
self._mux_handler.notify_writer_done()
def stop(self, close_code=common.STATUS_NORMAL_CLOSURE):
"""Stops the writer thread."""
self._deque_condition.acquire()
self._stop_requested = True
self._close_code = close_code
self._deque_condition.notify()
self._deque_condition.release()
class _PhysicalConnectionReader(threading.Thread):
"""A thread that is responsible for reading data from physical connection.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
def run(self):
while True:
try:
physical_stream = self._mux_handler.physical_stream
message = physical_stream.receive_message()
if message is None:
break
# Below happens only when a data message is received.
opcode = physical_stream.get_last_received_opcode()
if opcode != common.OPCODE_BINARY:
self._mux_handler.fail_physical_connection(
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
'Received a text message on physical connection')
break
except ConnectionTerminatedException, e:
self._logger.debug('%s', e)
break
try:
self._mux_handler.dispatch_message(message)
except PhysicalConnectionError, e:
self._mux_handler.fail_physical_connection(
e.drop_code, e.message)
break
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
except Exception, e:
self._logger.debug(traceback.format_exc())
break
self._mux_handler.notify_reader_done()
class _Worker(threading.Thread):
"""A thread that is responsible for running the corresponding application
handler.
"""
def __init__(self, mux_handler, request):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
request: _LogicalRequest instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self._request = request
self.setDaemon(True)
def run(self):
self._logger.debug('Logical channel worker started. (id=%d)' %
self._request.channel_id)
try:
# Non-critical exceptions will be handled by dispatcher.
self._mux_handler.dispatcher.transfer_data(self._request)
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
finally:
self._mux_handler.notify_worker_done(self._request.channel_id)
class _MuxHandshaker(hybi.Handshaker):
"""Opening handshake processor for multiplexing."""
_DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ=='
def __init__(self, request, dispatcher, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
hybi.Handshaker.__init__(self, request, dispatcher)
self._send_quota = send_quota
self._receive_quota = receive_quota
# Append headers which should not be included in handshake field of
# AddChannelRequest.
# TODO(bashi): Make sure whether we should raise exception when
# these headers are included already.
request.headers_in[common.UPGRADE_HEADER] = (
common.WEBSOCKET_UPGRADE_TYPE)
request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
str(common.VERSION_HYBI_LATEST))
request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
self._DUMMY_WEBSOCKET_KEY)
def _create_stream(self, stream_options):
"""Override hybi.Handshaker._create_stream."""
self._logger.debug('Creating logical stream for %d' %
self._request.channel_id)
return _LogicalStream(
self._request, stream_options, self._send_quota,
self._receive_quota)
def _create_handshake_response(self, accept):
"""Override hybi._create_handshake_response."""
response = []
response.append('HTTP/1.1 101 Switching Protocols\r\n')
# Upgrade and Sec-WebSocket-Accept should be excluded.
response.append('%s: %s\r\n' % (
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
if self._request.ws_protocol is not None:
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
if (self._request.ws_extensions is not None and
len(self._request.ws_extensions) != 0):
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
response.append('\r\n')
return ''.join(response)
def _send_handshake(self, accept):
"""Override hybi.Handshaker._send_handshake."""
# Don't send handshake response for the default channel
if self._request.channel_id == _DEFAULT_CHANNEL_ID:
return
handshake_response = self._create_handshake_response(accept)
frame_data = _create_add_channel_response(
self._request.channel_id,
handshake_response)
self._logger.debug('Sending handshake response for %d: %r' %
(self._request.channel_id, frame_data))
self._request.connection.write_control_data(frame_data)
class _LogicalChannelData(object):
"""A structure that holds information about logical channel.
"""
def __init__(self, request, worker):
self.request = request
self.worker = worker
self.drop_code = _DROP_CODE_NORMAL_CLOSURE
self.drop_message = ''
class _HandshakeDeltaBase(object):
"""A class that holds information for delta-encoded handshake."""
def __init__(self, headers):
self._headers = headers
def create_headers(self, delta=None):
"""Creates request headers for an AddChannelRequest that has
delta-encoded handshake.
Args:
delta: headers should be overridden.
"""
headers = copy.copy(self._headers)
if delta:
for key, value in delta.items():
# The spec requires that a header with an empty value is
# removed from the delta base.
if len(value) == 0 and headers.has_key(key):
del headers[key]
else:
headers[key] = value
return headers
class _MuxHandler(object):
"""Multiplexing handler. When a handler starts, it launches three
threads; the reader thread, the writer thread, and a worker thread.
The reader thread reads data from the physical stream, i.e., the
ws_stream object of the underlying websocket connection. The reader
thread interprets multiplexed frames and dispatches them to logical
channels. Methods of this class are mostly called by the reader thread.
The writer thread sends multiplexed frames which are created by
logical channels via the physical connection.
The worker thread launched at the starting point handles the
"Implicitly Opened Connection". If multiplexing handler receives
an AddChannelRequest and accepts it, the handler will launch a new worker
thread and dispatch the request to it.
"""
def __init__(self, request, dispatcher):
"""Constructs an instance.
Args:
request: mod_python request of the physical connection.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
"""
self.original_request = request
self.dispatcher = dispatcher
self.physical_connection = request.connection
self.physical_stream = request.ws_stream
self._logger = util.get_class_logger(self)
self._logical_channels = {}
self._logical_channels_condition = threading.Condition()
# Holds client's initial quota
self._channel_slots = collections.deque()
self._handshake_base = None
self._worker_done_notify_received = False
self._reader = None
self._writer = None
def start(self):
"""Starts the handler.
Raises:
MuxUnexpectedException: when the handler already started, or when
opening handshake of the default channel fails.
"""
if self._reader or self._writer:
raise MuxUnexpectedException('MuxHandler already started')
self._reader = _PhysicalConnectionReader(self)
self._writer = _PhysicalConnectionWriter(self)
self._reader.start()
self._writer.start()
# Create "Implicitly Opened Connection".
logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
headers = copy.copy(self.original_request.headers_in)
# Add extensions for logical channel.
headers[common.SEC_WEBSOCKET_EXTENSIONS_HEADER] = (
common.format_extensions(
self.original_request.mux_processor.extensions()))
self._handshake_base = _HandshakeDeltaBase(headers)
logical_request = _LogicalRequest(
_DEFAULT_CHANNEL_ID,
self.original_request.method,
self.original_request.uri,
self.original_request.protocol,
self._handshake_base.create_headers(),
logical_connection)
# Client's send quota for the implicitly opened connection is zero,
# but we will send FlowControl later so set the initial quota to
# _INITIAL_QUOTA_FOR_CLIENT.
self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
send_quota = self.original_request.mux_processor.quota()
if not self._do_handshake_for_logical_request(
logical_request, send_quota=send_quota):
raise MuxUnexpectedException(
'Failed handshake on the default channel id')
self._add_logical_channel(logical_request)
# Send FlowControl for the implicitly opened connection.
frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID,
_INITIAL_QUOTA_FOR_CLIENT)
logical_request.connection.write_control_data(frame_data)
def add_channel_slots(self, slots, send_quota):
"""Adds channel slots.
Args:
slots: number of slots to be added.
send_quota: initial send quota for slots.
"""
self._channel_slots.extend([send_quota] * slots)
# Send NewChannelSlot to client.
frame_data = _create_new_channel_slot(slots, send_quota)
self.send_control_data(frame_data)
def wait_until_done(self, timeout=None):
"""Waits until all workers are done. Returns False when timeout has
occurred. Returns True on success.
Args:
timeout: timeout in sec.
"""
self._logical_channels_condition.acquire()
try:
while len(self._logical_channels) > 0:
self._logger.debug('Waiting workers(%d)...' %
len(self._logical_channels))
self._worker_done_notify_received = False
self._logical_channels_condition.wait(timeout)
if not self._worker_done_notify_received:
self._logger.debug('Waiting worker(s) timed out')
return False
finally:
self._logical_channels_condition.release()
# Flush pending outgoing data
self._writer.stop()
self._writer.join()
return True
def notify_write_data_done(self, channel_id):
"""Called by the writer thread when a write operation has done.
Args:
channel_id: objective channel id.
"""
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
channel_data.request.connection.on_write_data_done()
else:
self._logger.debug('Seems that logical channel for %d has gone'
% channel_id)
finally:
self._logical_channels_condition.release()
def send_control_data(self, data):
"""Sends data via the control channel.
Args:
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=_CONTROL_CHANNEL_ID, data=data))
def send_data(self, channel_id, data):
"""Sends data via given logical channel. This method is called by
worker threads.
Args:
channel_id: objective channel id.
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=channel_id, data=data))
def _send_drop_channel(self, channel_id, code=None, message=''):
frame_data = _create_drop_channel(channel_id, code, message)
self._logger.debug(
'Sending drop channel for channel id %d' % channel_id)
self.send_control_data(frame_data)
def _send_error_add_channel_response(self, channel_id, status=None):
if status is None:
status = common.HTTP_STATUS_BAD_REQUEST
if status in _HTTP_BAD_RESPONSE_MESSAGES:
message = _HTTP_BAD_RESPONSE_MESSAGES[status]
else:
self._logger.debug('Response message for %d is not found' % status)
message = '???'
response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message)
frame_data = _create_add_channel_response(channel_id,
encoded_handshake=response,
encoding=0, rejected=True)
self.send_control_data(frame_data)
def _create_logical_request(self, block):
if block.channel_id == _CONTROL_CHANNEL_ID:
# TODO(bashi): Raise PhysicalConnectionError with code 2006
# instead of MuxUnexpectedException.
raise MuxUnexpectedException(
'Received the control channel id (0) as objective channel '
'id for AddChannel')
if block.encoding > _HANDSHAKE_ENCODING_DELTA:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_REQUEST_ENCODING)
method, path, version, headers = _parse_request_text(
block.encoded_handshake)
if block.encoding == _HANDSHAKE_ENCODING_DELTA:
headers = self._handshake_base.create_headers(headers)
connection = _LogicalConnection(self, block.channel_id)
request = _LogicalRequest(block.channel_id, method, path, version,
headers, connection)
return request
def _do_handshake_for_logical_request(self, request, send_quota=0):
try:
receive_quota = self._channel_slots.popleft()
except IndexError:
raise LogicalChannelError(
request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION)
handshaker = _MuxHandshaker(request, self.dispatcher,
send_quota, receive_quota)
try:
handshaker.do_handshake()
except handshake.VersionException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(
request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return False
except handshake.HandshakeException, e:
# TODO(bashi): Should we _Fail the Logical Channel_ with 3001
# instead?
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id,
status=e.status)
return False
except handshake.AbortedByUserException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id)
return False
return True
def _add_logical_channel(self, logical_request):
try:
self._logical_channels_condition.acquire()
if logical_request.channel_id in self._logical_channels:
self._logger.debug('Channel id %d already exists' %
logical_request.channel_id)
raise PhysicalConnectionError(
_DROP_CODE_CHANNEL_ALREADY_EXISTS,
'Channel id %d already exists' %
logical_request.channel_id)
worker = _Worker(self, logical_request)
channel_data = _LogicalChannelData(logical_request, worker)
self._logical_channels[logical_request.channel_id] = channel_data
worker.start()
finally:
self._logical_channels_condition.release()
def _process_add_channel_request(self, block):
try:
logical_request = self._create_logical_request(block)
except ValueError, e:
self._logger.debug('Failed to create logical request: %r' % e)
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return
if self._do_handshake_for_logical_request(logical_request):
if block.encoding == _HANDSHAKE_ENCODING_IDENTITY:
# Update handshake base.
# TODO(bashi): Make sure this is the right place to update
# handshake base.
self._handshake_base = _HandshakeDeltaBase(
logical_request.headers_in)
self._add_logical_channel(logical_request)
else:
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
def _process_flow_control(self, block):
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.request.ws_stream.replenish_send_quota(
block.send_quota)
finally:
self._logical_channels_condition.release()
def _process_drop_channel(self, block):
self._logger.debug(
'DropChannel received for %d: code=%r, reason=%r' %
(block.channel_id, block.drop_code, block.drop_message))
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
# Close the logical channel
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
finally:
self._logical_channels_condition.release()
def _process_control_blocks(self, parser):
for control_block in parser.read_control_blocks():
opcode = control_block.opcode
self._logger.debug('control block received, opcode: %d' % opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
self._process_add_channel_request(control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received AddChannelResponse')
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
self._process_flow_control(control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
self._process_drop_channel(control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received NewChannelSlot')
else:
raise MuxUnexpectedException(
'Unexpected opcode %r' % opcode)
def _process_logical_frame(self, channel_id, parser):
self._logger.debug('Received a frame. channel id=%d' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
# We must ignore the message for an inactive channel.
return
channel_data = self._logical_channels[channel_id]
fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
consuming_byte = len(payload)
if opcode != common.OPCODE_CONTINUATION:
consuming_byte += 1
if not channel_data.request.ws_stream.consume_receive_quota(
consuming_byte):
# The client violates quota. Close logical channel.
raise LogicalChannelError(
channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3,
mask=False)
frame_data = header + payload
channel_data.request.connection.append_frame_data(frame_data)
finally:
self._logical_channels_condition.release()
def dispatch_message(self, message):
"""Dispatches message. The reader thread calls this method.
Args:
message: a message that contains encapsulated frame.
Raises:
PhysicalConnectionError: if the message contains physical
connection level errors.
LogicalChannelError: if the message contains logical channel
level errors.
"""
parser = _MuxFramePayloadParser(message)
try:
channel_id = parser.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED)
if channel_id == _CONTROL_CHANNEL_ID:
self._process_control_blocks(parser)
else:
self._process_logical_frame(channel_id, parser)
def notify_worker_done(self, channel_id):
"""Called when a worker has finished.
Args:
channel_id: channel id corresponded with the worker.
"""
self._logger.debug('Worker for channel id %d terminated' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
raise MuxUnexpectedException(
'Channel id %d not found' % channel_id)
channel_data = self._logical_channels.pop(channel_id)
finally:
self._worker_done_notify_received = True
self._logical_channels_condition.notify()
self._logical_channels_condition.release()
if not channel_data.request.server_terminated:
self._send_drop_channel(
channel_id, code=channel_data.drop_code,
message=channel_data.drop_message)
def notify_reader_done(self):
"""This method is called by the reader thread when the reader has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for incoming data '
'...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def notify_writer_done(self):
"""This method is called by the writer thread when the writer has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for write '
'completion ...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.on_writer_done()
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def fail_physical_connection(self, code, message):
"""Fail the physical connection.
Args:
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing the physical connection...')
self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
self._writer.stop(common.STATUS_INTERNAL_ENDPOINT_ERROR)
def fail_logical_channel(self, channel_id, code, message):
"""Fail a logical channel.
Args:
channel_id: channel id.
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing logical channel %d...' % channel_id)
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
# Close the logical channel. notify_worker_done() will be
# called later and it will send DropChannel.
channel_data.drop_code = code
channel_data.drop_message = message
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
else:
self._send_drop_channel(channel_id, code, message)
finally:
self._logical_channels_condition.release()
def use_mux(request):
return hasattr(request, 'mux_processor') and (
request.mux_processor.is_active())
def start(request, dispatcher):
mux_handler = _MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS,
_INITIAL_QUOTA_FOR_CLIENT)
mux_handler.wait_until_done()
# vi:sts=4 sw=4 et
|
mpl-2.0
|
jdelight/django
|
django/contrib/gis/utils/wkt.py
|
589
|
1923
|
"""
Utilities for manipulating Geometry WKT.
"""
from django.utils import six
def precision_wkt(geom, prec):
"""
Returns WKT text of the geometry according to the given precision (an
integer or a string). If the precision is an integer, then the decimal
places of coordinates WKT will be truncated to that number:
>>> from django.contrib.gis.geos import Point
>>> pnt = Point(5, 23)
>>> pnt.wkt
'POINT (5.0000000000000000 23.0000000000000000)'
>>> precision_wkt(pnt, 1)
'POINT (5.0 23.0)'
If the precision is a string, it must be valid Python format string
(e.g., '%20.7f') -- thus, you should know what you're doing.
"""
if isinstance(prec, int):
num_fmt = '%%.%df' % prec
elif isinstance(prec, six.string_types):
num_fmt = prec
else:
raise TypeError
# TODO: Support 3D geometries.
coord_fmt = ' '.join([num_fmt, num_fmt])
def formatted_coords(coords):
return ','.join(coord_fmt % c[:2] for c in coords)
def formatted_poly(poly):
return ','.join('(%s)' % formatted_coords(r) for r in poly)
def formatted_geom(g):
gtype = str(g.geom_type).upper()
yield '%s(' % gtype
if gtype == 'POINT':
yield formatted_coords((g.coords,))
elif gtype in ('LINESTRING', 'LINEARRING'):
yield formatted_coords(g.coords)
elif gtype in ('POLYGON', 'MULTILINESTRING'):
yield formatted_poly(g)
elif gtype == 'MULTIPOINT':
yield formatted_coords(g.coords)
elif gtype == 'MULTIPOLYGON':
yield ','.join('(%s)' % formatted_poly(p) for p in g)
elif gtype == 'GEOMETRYCOLLECTION':
yield ','.join(''.join(wkt for wkt in formatted_geom(child)) for child in g)
else:
raise TypeError
yield ')'
return ''.join(wkt for wkt in formatted_geom(geom))
|
bsd-3-clause
|
MarkWh1te/xueqiu_predict
|
p3_env/lib/python3.5/site-packages/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py
|
2360
|
3778
|
"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
mit
|
bigdatauniversity/edx-platform
|
common/djangoapps/cors_csrf/authentication.py
|
152
|
1357
|
"""Django Rest Framework Authentication classes for cross-domain end-points."""
from rest_framework import authentication
from cors_csrf.helpers import is_cross_domain_request_allowed, skip_cross_domain_referer_check
class SessionAuthenticationCrossDomainCsrf(authentication.SessionAuthentication):
"""Session authentication that skips the referer check over secure connections.
Django Rest Framework's `SessionAuthentication` class calls Django's
CSRF middleware implementation directly, which bypasses the middleware
stack.
This version of `SessionAuthentication` performs the same workaround
as `CorsCSRFMiddleware` to skip the referer check for whitelisted
domains over a secure connection. See `cors_csrf.middleware` for
more information.
Since this subclass overrides only the `enforce_csrf()` method,
it can be mixed in with other `SessionAuthentication` subclasses.
"""
def enforce_csrf(self, request):
"""Skip the referer check if the cross-domain request is allowed. """
if is_cross_domain_request_allowed(request):
with skip_cross_domain_referer_check(request):
return super(SessionAuthenticationCrossDomainCsrf, self).enforce_csrf(request)
else:
return super(SessionAuthenticationCrossDomainCsrf, self).enforce_csrf(request)
|
agpl-3.0
|
tastynoodle/django
|
tests/delete/tests.py
|
4
|
13446
|
from __future__ import unicode_literals
from django.db import models, IntegrityError, connection
from django.test import TestCase, skipUnlessDBFeature, skipIfDBFeature
from django.utils.six.moves import xrange
from .models import (R, RChild, S, T, U, A, M, MR, MRNull,
create_a, get_default_r, User, Avatar, HiddenUser, HiddenUserProfile,
M2MTo, M2MFrom, Parent, Child, Base)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
self.assertRaises(IntegrityError, a.protect.delete)
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_do_nothing_qscount(self):
"""
Test that a models.DO_NOTHING relation doesn't trigger a query.
"""
b = Base.objects.create()
with self.assertNumQueries(1):
# RelToBase should not be queried.
b.delete()
self.assertEqual(Base.objects.count(), 0)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').rel.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
s = S.objects.create(r=R.objects.create())
for i in xrange(2*GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append(list(a.pk for a in obj.setnull_set.all()))
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertEqual(None, obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertEqual(None, a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
T.objects.create(pk=1, s=s1)
T.objects.create(pk=2, s=s2)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.pre_delete.disconnect(log_pre_delete)
def test_relational_post_delete_signals_happen_before_parent_object(self):
deletions = []
def log_post_delete(instance, **kwargs):
self.assertTrue(R.objects.filter(pk=instance.r_id))
self.assertIs(type(instance), S)
deletions.append(instance.id)
r = R.objects.create(pk=1)
S.objects.create(pk=1, r=r)
models.signals.post_delete.connect(log_post_delete, sender=S)
try:
r.delete()
finally:
models.signals.post_delete.disconnect(log_post_delete)
self.assertEqual(len(deletions), 1)
self.assertEqual(deletions[0], 1)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to delete the avatar
# The important thing is that when we can defer constraint checks there
# is no need to do an UPDATE on User.avatar to null it out.
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
self.assertNumQueries(3, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
@skipIfDBFeature("can_defer_constraint_checks")
def test_cannot_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
a = Avatar.objects.get(pk=u.avatar_id)
# The below doesn't make sense... Why do we need to null out
# user.avatar if we are going to delete the user immediately after it,
# and there are no more cascades.
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to null out user.avatar, because we can't defer the constraint
# 1 query to delete the avatar
self.assertNumQueries(4, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
def test_hidden_related(self):
r = R.objects.create()
h = HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h)
r.delete()
self.assertEqual(HiddenUserProfile.objects.count(), 0)
class FastDeleteTests(TestCase):
def test_fast_delete_fk(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to fast-delete the user
# 1 query to delete the avatar
self.assertNumQueries(2, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_fast_delete_m2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete f, 1 to fast-delete m2m for f
self.assertNumQueries(2, f.delete)
def test_fast_delete_revm2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete t, 1 to fast-delete t's m_set
self.assertNumQueries(2, f.delete)
def test_fast_delete_qs(self):
u1 = User.objects.create()
u2 = User.objects.create()
self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_joined_qs(self):
a = Avatar.objects.create(desc='a')
User.objects.create(avatar=a)
u2 = User.objects.create()
expected_queries = 1 if connection.features.update_can_self_select else 2
self.assertNumQueries(expected_queries,
User.objects.filter(avatar__desc='a').delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_inheritance(self):
c = Child.objects.create()
p = Parent.objects.create()
# 1 for self, 1 for parent
# However, this doesn't work as child.parent access creates a query,
# and this means we will be generating extra queries (a lot for large
# querysets). This is not a fast-delete problem.
# self.assertNumQueries(2, c.delete)
c.delete()
self.assertFalse(Child.objects.exists())
self.assertEqual(Parent.objects.count(), 1)
self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1)
# 1 for self delete, 1 for fast delete of empty "child" qs.
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
# 1 for self delete, 1 for fast delete of empty "child" qs.
c = Child.objects.create()
p = c.parent_ptr
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
|
bsd-3-clause
|
ychab/mymoney-server
|
mymoney/core/tests/utils/test_currencies.py
|
1
|
1971
|
from decimal import Decimal
from django.test import TestCase, override_settings
from mymoney.core.utils.currencies import (
format_currency, localize_signed_amount, localize_signed_amount_currency,
)
class CurrencyFormatTestCase(TestCase):
@override_settings(LANGUAGE_CODE='en-us')
def test_format_currency_en_us(self):
self.assertEqual(
format_currency(Decimal('-1547.23'), 'USD'),
"-$1,547.23",
)
@override_settings(LANGUAGE_CODE='fr-fr')
def test_format_currency_fr_fr(self):
self.assertEqual(
format_currency(Decimal('-1547.23'), 'EUR'),
'-1\xa0547,23\xa0€',
)
@override_settings(LANGUAGE_CODE='fr')
def test_format_currency_fr(self):
self.assertEqual(
format_currency(-1547, 'EUR'),
"-1\xa0547,00\xa0€",
)
@override_settings(LANGUAGE_CODE='it')
def test_format_currency_it(self):
self.assertEqual(
format_currency(-1547, 'EUR'),
"-1.547,00\xa0€",
)
@override_settings(LANGUAGE_CODE='en-us')
def test_localize_signed_amount_en_us(self):
self.assertEqual(
localize_signed_amount(Decimal('15.23')),
'+15.23',
)
@override_settings(LANGUAGE_CODE='fr-fr')
def test_localize_signed_amount_fr_fr(self):
self.assertEqual(
localize_signed_amount(Decimal('15.23')),
'+15,23',
)
@override_settings(LANGUAGE_CODE='en-us')
def test_localize_signed_amount_currency_en_us(self):
self.assertEqual(
localize_signed_amount_currency(Decimal('1547.23'), 'USD'),
"+$1,547.23",
)
@override_settings(LANGUAGE_CODE='fr-fr')
def test_localize_signed_amount_currency_fr_fr(self):
self.assertEqual(
localize_signed_amount_currency(Decimal('1547.23'), 'EUR'),
'+1\xa0547,23\xa0€',
)
|
bsd-3-clause
|
suyashphadtare/sajil-final-erp
|
erpnext/erpnext/accounts/doctype/chart_of_accounts/charts/import_from_openerp.py
|
38
|
8787
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
"""
Import chart of accounts from OpenERP sources
"""
from __future__ import unicode_literals
import os, json
import ast
from xml.etree import ElementTree as ET
from frappe.utils.csvutils import read_csv_content
from frappe.utils import cstr
import frappe
path = "/Users/nabinhait/Documents/openerp/openerp/addons"
accounts = {}
charts = {}
all_account_types = []
def go():
global accounts, charts
default_account_types = get_default_account_types()
country_dirs = []
for basepath, folders, files in os.walk(path):
basename = os.path.basename(basepath)
if basename.startswith("l10n_"):
country_dirs.append(basename)
for country_dir in country_dirs:
accounts, charts = {}, {}
country_path = os.path.join(path, country_dir)
manifest = ast.literal_eval(open(os.path.join(country_path, "__openerp__.py")).read())
data_files = manifest.get("data", []) + manifest.get("init_xml", []) + \
manifest.get("update_xml", [])
files_path = [os.path.join(country_path, d) for d in data_files]
xml_roots = get_xml_roots(files_path)
csv_content = get_csv_contents(files_path)
prefix = country_dir if csv_content else None
account_types = get_account_types(xml_roots.get("account.account.type", []),
csv_content.get("account.account.type", []), prefix)
account_types.update(default_account_types)
if xml_roots:
make_maps_for_xml(xml_roots, account_types, country_dir)
if csv_content:
make_maps_for_csv(csv_content, account_types, country_dir)
make_account_trees()
make_charts()
def get_default_account_types():
default_types_root = []
for file in ["data_account_type.xml"]:
default_types_root.append(ET.parse(os.path.join(path, "account", "data",
"data_account_type.xml")).getroot())
return get_account_types(default_types_root, None, prefix="account")
def get_xml_roots(files_path):
xml_roots = frappe._dict()
for filepath in files_path:
fname = os.path.basename(filepath)
if fname.endswith(".xml"):
tree = ET.parse(filepath)
root = tree.getroot()
for node in root[0].findall("record"):
if node.get("model") in ["account.account.template",
"account.chart.template", "account.account.type"]:
xml_roots.setdefault(node.get("model"), []).append(root)
break
return xml_roots
def get_csv_contents(files_path):
csv_content = {}
for filepath in files_path:
fname = os.path.basename(filepath)
for file_type in ["account.account.template", "account.account.type",
"account.chart.template"]:
if fname.startswith(file_type) and fname.endswith(".csv"):
with open(filepath, "r") as csvfile:
try:
csv_content.setdefault(file_type, [])\
.append(read_csv_content(csvfile.read()))
except Exception, e:
continue
return csv_content
def get_account_types(root_list, csv_content, prefix=None):
types = {}
account_type_map = {
'cash': 'Cash',
'bank': 'Bank',
'tr_cash': 'Cash',
'tr_bank': 'Bank',
'receivable': 'Receivable',
'tr_receivable': 'Receivable',
'account rec': 'Receivable',
'payable': 'Payable',
'tr_payable': 'Payable',
'equity': 'Equity',
'stocks': 'Stock',
'stock': 'Stock',
'tax': 'Tax',
'tr_tax': 'Tax',
'tax-out': 'Tax',
'tax-in': 'Tax',
'charges_personnel': 'Chargeable',
'fixed asset': 'Fixed Asset',
'cogs': 'Cost of Goods Sold',
}
for root in root_list:
for node in root[0].findall("record"):
if node.get("model")=="account.account.type":
data = {}
for field in node.findall("field"):
if field.get("name")=="report_type" and field.text.lower() != "none":
data["report_type"] = get_report_type(field.text.title())
if field.get("name")=="code" and field.text.lower() != "none" \
and account_type_map.get(field.text):
data["account_type"] = account_type_map[field.text]
node_id = prefix + "." + node.get("id") if prefix else node.get("id")
types[node_id] = data
if csv_content and csv_content[0][0]=="id":
for row in csv_content[1:]:
row_dict = dict(zip(csv_content[0], row))
data = {}
if row_dict.get("report_type"):
data["report_type"] = get_report_type(row_dict.get("report_type"))
if row_dict.get("code") and account_type_map.get(row_dict["code"]):
data["account_type"] = account_type_map[row_dict["code"]]
if data and data.get("id"):
node_id = prefix + "." + data.get("id") if prefix else data.get("id")
types[node_id] = data
return types
def get_report_type(report_type):
report_type_map = {
"asset": "Balance Sheet",
"liability": "Balance Sheet",
"equity": "Balance Sheet",
"expense": "Profit and Loss",
"income": "Profit and Loss"
}
for d in report_type_map:
if d in report_type.lower():
return report_type_map[d]
def make_maps_for_xml(xml_roots, account_types, country_dir):
"""make maps for `charts` and `accounts`"""
for model, root_list in xml_roots.iteritems():
for root in root_list:
for node in root[0].findall("record"):
if node.get("model")=="account.account.template":
data = {}
for field in node.findall("field"):
if field.get("name")=="name":
data["name"] = field.text
if field.get("name")=="parent_id":
parent_id = field.get("ref") or field.get("eval")
data["parent_id"] = parent_id
if field.get("name")=="user_type":
value = field.get("ref")
if account_types.get(value, {}).get("report_type"):
data["report_type"] = account_types[value]["report_type"]
if account_types.get(value, {}).get("account_type"):
data["account_type"] = account_types[value]["account_type"]
if data["account_type"] not in all_account_types:
all_account_types.append(data["account_type"])
data["children"] = []
accounts[node.get("id")] = data
if node.get("model")=="account.chart.template":
data = {}
for field in node.findall("field"):
if field.get("name")=="name":
data["name"] = field.text
if field.get("name")=="account_root_id":
data["account_root_id"] = field.get("ref")
data["id"] = country_dir
charts.setdefault(node.get("id"), {}).update(data)
def make_account_trees():
"""build tree hierarchy"""
for id in accounts.keys():
account = accounts[id]
if account.get("parent_id"):
if accounts.get(account["parent_id"]):
accounts[account["parent_id"]]["children"].append(account)
del account["parent_id"]
# remove empty children
for id in accounts.keys():
if "children" in accounts[id] and not accounts[id].get("children"):
del accounts[id]["children"]
def make_maps_for_csv(csv_content, account_types, country_dir):
for content in csv_content.get("account.account.template", []):
for row in content[1:]:
data = dict(zip(content[0], row))
account = {
"name": data.get("name"),
"parent_id": data.get("parent_id:id") or data.get("parent_id/id"),
"children": []
}
user_type = data.get("user_type/id") or data.get("user_type:id")
if account_types.get(user_type, {}).get("report_type"):
account["report_type"] = account_types[user_type]["report_type"]
if account_types.get(user_type, {}).get("account_type"):
account["account_type"] = account_types[user_type]["account_type"]
if account["account_type"] not in all_account_types:
all_account_types.append(account["account_type"])
accounts[data.get("id")] = account
if not account.get("parent_id") and data.get("chart_template_id:id"):
chart_id = data.get("chart_template_id:id")
charts.setdefault(chart_id, {}).update({"account_root_id": data.get("id")})
for content in csv_content.get("account.chart.template", []):
for row in content[1:]:
if row:
data = dict(zip(content[0], row))
charts.setdefault(data.get("id"), {}).update({
"account_root_id": data.get("account_root_id:id") or \
data.get("account_root_id/id"),
"name": data.get("name"),
"id": country_dir
})
def make_charts():
"""write chart files in app/setup/doctype/company/charts"""
for chart_id in charts:
src = charts[chart_id]
if not src.get("name") or not src.get("account_root_id"):
continue
if not src["account_root_id"] in accounts:
continue
filename = src["id"][5:] + "_" + chart_id
print "building " + filename
chart = {}
chart["name"] = src["name"]
chart["root"] = accounts[src["account_root_id"]]
with open(os.path.join("erpnext", "accounts", "doctype", "chart_of_accounts",
"charts", filename + ".json"), "w") as chartfile:
chartfile.write(json.dumps(chart, indent=1, sort_keys=True))
if __name__=="__main__":
go()
|
agpl-3.0
|
wy1iu/sphereface
|
tools/caffe-sphereface/python/caffe/classifier.py
|
22
|
3537
|
#!/usr/bin/env python
"""
Classifier is an image classifier specialization of Net.
"""
import numpy as np
import caffe
class Classifier(caffe.Net):
"""
Classifier extends Net for image class prediction
by scaling, center cropping, or oversampling.
Parameters
----------
image_dims : dimensions to scale input for cropping/sampling.
Default is to scale to net input size for whole-image crop.
mean, input_scale, raw_scale, channel_swap: params for
preprocessing options.
"""
def __init__(self, model_file, pretrained_file, image_dims=None,
mean=None, input_scale=None, raw_scale=None,
channel_swap=None):
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# configure pre-processing
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.crop_dims = np.array(self.blobs[in_].data.shape[2:])
if not image_dims:
image_dims = self.crop_dims
self.image_dims = image_dims
def predict(self, inputs, oversample=True):
"""
Predict classification probabilities of inputs.
Parameters
----------
inputs : iterable of (H x W x K) input ndarrays.
oversample : boolean
average predictions across center, corners, and mirrors
when True (default). Center-only prediction when False.
Returns
-------
predictions: (N x C) ndarray of class probabilities for N images and C
classes.
"""
# Scale to standardize input dimensions.
input_ = np.zeros((len(inputs),
self.image_dims[0],
self.image_dims[1],
inputs[0].shape[2]),
dtype=np.float32)
for ix, in_ in enumerate(inputs):
input_[ix] = caffe.io.resize_image(in_, self.image_dims)
if oversample:
# Generate center, corner, and mirrored crops.
input_ = caffe.io.oversample(input_, self.crop_dims)
else:
# Take center crop.
center = np.array(self.image_dims) / 2.0
crop = np.tile(center, (1, 2))[0] + np.concatenate([
-self.crop_dims / 2.0,
self.crop_dims / 2.0
])
crop = crop.astype(int)
input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]
# Classify
caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],
dtype=np.float32)
for ix, in_ in enumerate(input_):
caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions = out[self.outputs[0]]
# For oversampling, average predictions across crops.
if oversample:
predictions = predictions.reshape((len(predictions) / 10, 10, -1))
predictions = predictions.mean(1)
return predictions
|
mit
|
ff94315/hiwifi-openwrt-HC5661-HC5761
|
staging_dir/host/lib/scons-2.1.0/SCons/Tool/BitKeeper.py
|
21
|
2492
|
"""SCons.Tool.BitKeeper.py
Tool-specific initialization for the BitKeeper source code control
system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/BitKeeper.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
BitKeeper to an Environment."""
def BitKeeperFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The BitKeeper() factory is deprecated and there is no replacement.""")
act = SCons.Action.Action("$BITKEEPERCOM", "$BITKEEPERCOMSTR")
return SCons.Builder.Builder(action = act, env = env)
#setattr(env, 'BitKeeper', BitKeeperFactory)
env.BitKeeper = BitKeeperFactory
env['BITKEEPER'] = 'bk'
env['BITKEEPERGET'] = '$BITKEEPER get'
env['BITKEEPERGETFLAGS'] = SCons.Util.CLVar('')
env['BITKEEPERCOM'] = '$BITKEEPERGET $BITKEEPERGETFLAGS $TARGET'
def exists(env):
return env.Detect('bk')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gpl-2.0
|
fullfanta/mxnet
|
python/mxnet/contrib/autograd.py
|
30
|
7040
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Autograd for NDArray."""
from __future__ import absolute_import
from __future__ import division
from array import array
import ctypes
import functools
from ..base import _LIB, check_call, string_types
from ..base import mx_uint, NDArrayHandle, c_array, c_array_buf, c_handle_array
# pylint: disable= unused-import
from ..ndarray import NDArray, zeros_like, _GRAD_REQ_MAP
def set_is_training(is_train):
"""Set status to training/not training. When training, graph will be constructed
for gradient computation. Operators will also run with ctx.is_train=True. For example,
Dropout will drop inputs randomly when is_train=True while simply passing through
if is_train=False.
Parameters
----------
is_train: bool
Returns
-------
previous state before this set.
"""
prev = ctypes.c_int()
check_call(_LIB.MXAutogradSetIsTraining(
ctypes.c_int(is_train), ctypes.byref(prev)))
check_call(_LIB.MXAutogradSetIsRecording(
ctypes.c_int(is_train), ctypes.byref(prev)))
return bool(prev.value)
class TrainingStateScope(object):
"""Scope for managing training state.
Example::
with TrainingStateScope(True):
y = model(x)
compute_gradient([y])
"""
def __init__(self, enter_state):
self._enter_state = enter_state
self._prev = None
def __enter__(self):
self._prev = set_is_training(self._enter_state)
def __exit__(self, ptype, value, trace):
if self._prev != self._enter_state:
set_is_training(self._prev)
def train_section():
"""Returns a training scope context to be used in 'with' statement
and captures training code.
Example::
with autograd.train_section():
y = model(x)
compute_gradient([y])
metric.update(...)
optim.step(...)
"""
return TrainingStateScope(True)
def test_section():
"""Returns a testing scope context to be used in 'with' statement
and captures testing code.
Example::
with autograd.train_section():
y = model(x)
compute_gradient([y])
with autograd.test_section():
# testing, IO, gradient updates...
"""
return TrainingStateScope(False)
def mark_variables(variables, gradients, grad_reqs='write'):
"""Mark NDArrays as variables to compute gradient for autograd.
Parameters
----------
variables: list of NDArray
gradients: list of NDArray
grad_reqs: list of string
"""
if isinstance(grad_reqs, string_types):
grad_reqs = [_GRAD_REQ_MAP[grad_reqs]]*len(variables)
else:
grad_reqs = [_GRAD_REQ_MAP[i] for i in grad_reqs]
check_call(_LIB.MXAutogradMarkVariables(
len(variables),
c_handle_array(variables),
c_array_buf(mx_uint, array('I', grad_reqs)),
c_handle_array(gradients)))
def backward(outputs, out_grads=None, retain_graph=False):
"""Compute the gradients of outputs w.r.t variables.
Parameters
----------
outputs: list of NDArray
out_grads: list of NDArray or None
"""
assert isinstance(outputs, (list, tuple)), \
"outputs must be a list or tuple of NDArrays"
if out_grads is None:
check_call(_LIB.MXAutogradBackward(
len(outputs),
c_handle_array(outputs),
ctypes.c_void_p(0),
ctypes.c_int(retain_graph)))
return
ograd_handles = []
for arr in out_grads:
if arr is not None:
ograd_handles.append(arr.handle)
else:
ograd_handles.append(NDArrayHandle(0))
assert len(ograd_handles) == len(outputs), \
"outputs and out_grads must have the same length"
check_call(_LIB.MXAutogradBackward(
len(outputs),
c_handle_array(outputs),
c_array(NDArrayHandle, ograd_handles),
ctypes.c_int(retain_graph)))
def compute_gradient(outputs):
"""Deprecated. Please use backward"""
backward(outputs)
def grad_and_loss(func, argnum=None):
"""Return function that computes both gradient of arguments and loss value.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_and_loss_func: a python function
A function that would compute both the gradient of arguments and loss value.
"""
@functools.wraps(func)
def wrapped(*args):
"""Wrapped function."""
variables = args
if argnum is not None:
argnum_ = argnum if isinstance(argnum, list) else [argnum]
variables = [args[i] for i in argnum_]
for x in variables:
assert isinstance(x, NDArray), "type of autograd input should NDArray."
grads = [zeros_like(x) for x in variables]
mark_variables(variables, grads)
with train_section():
outputs = func(*args)
compute_gradient([outputs] if isinstance(outputs, NDArray) else outputs)
return grads, outputs
return wrapped
def grad(func, argnum=None):
"""Return function that computes gradient of arguments.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_func: a python function
A function that would compute the gradient of arguments.
Examples
--------
>>> # autograd supports dynamic graph which is changed
>>> # every instance
>>> def func(x):
>>> r = random.randint(0, 1)
>>> if r % 2:
>>> return x**2
>>> else:
>>> return x/3
>>> # use `grad(func)` to get the gradient function
>>> for x in range(10):
>>> grad_func = grad(func)
>>> inputs = nd.array([[1, 2, 3], [4, 5, 6]])
>>> grad_vals = grad_func(inputs)
"""
grad_with_loss_func = grad_and_loss(func, argnum)
@functools.wraps(grad_with_loss_func)
def wrapped(*args):
return grad_with_loss_func(*args)[0]
return wrapped
|
apache-2.0
|
tinkhaven-organization/odoo
|
addons/email_template/wizard/email_template_preview.py
|
377
|
3851
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
class email_template_preview(osv.osv_memory):
_inherit = "email.template"
_name = "email_template.preview"
_description = "Email Template Preview"
def _get_records(self, cr, uid, context=None):
"""
Return Records of particular Email Template's Model
"""
if context is None:
context = {}
template_id = context.get('template_id', False)
if not template_id:
return []
email_template = self.pool.get('email.template')
template = email_template.browse(cr, uid, int(template_id), context=context)
template_object = template.model_id
model = self.pool[template_object.model]
record_ids = model.search(cr, uid, [], 0, 10, 'id', context=context)
default_id = context.get('default_res_id')
if default_id and default_id not in record_ids:
record_ids.insert(0, default_id)
return model.name_get(cr, uid, record_ids, context)
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
result = super(email_template_preview, self).default_get(cr, uid, fields, context=context)
email_template = self.pool.get('email.template')
template_id = context.get('template_id')
if 'res_id' in fields and not result.get('res_id'):
records = self._get_records(cr, uid, context=context)
result['res_id'] = records and records[0][0] or False # select first record as a Default
if template_id and 'model_id' in fields and not result.get('model_id'):
result['model_id'] = email_template.read(cr, uid, int(template_id), ['model_id'], context).get('model_id', False)
return result
_columns = {
'res_id': fields.selection(_get_records, 'Sample Document'),
'partner_ids': fields.many2many('res.partner', string='Recipients'),
}
def on_change_res_id(self, cr, uid, ids, res_id, context=None):
if context is None:
context = {'value': {}}
if not res_id or not context.get('template_id'):
return {'value': {}}
email_template = self.pool.get('email.template')
template_id = context.get('template_id')
template = email_template.browse(cr, uid, template_id, context=context)
# generate and get template values
mail_values = email_template.generate_email(cr, uid, template_id, res_id, context=context)
vals = dict((field, mail_values.get(field, False)) for field in ('email_from', 'email_to', 'email_cc', 'reply_to', 'subject', 'body_html', 'partner_to', 'partner_ids', 'attachment_ids'))
vals['name'] = template.name
return {'value': vals}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
badloop/SickRage
|
sickbeard/__init__.py
|
1
|
114674
|
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import webbrowser
import datetime
import socket
import os
import re
import os.path
import shutil
import shutil_custom
shutil.copyfile = shutil_custom.copyfile_custom
from threading import Lock
import sys
from github import Github
from sickbeard import metadata
from sickbeard import providers
from sickbeard.providers.generic import GenericProvider
from sickbeard.config import CheckSection, check_setting_int, check_setting_str, check_setting_float, ConfigMigrator, \
naming_ep_type
from sickbeard import searchBacklog, showUpdater, versionChecker, properFinder, autoPostProcesser, \
subtitles, traktChecker, numdict
from sickbeard import db
from sickbeard import helpers
from sickbeard import scheduler
from sickbeard import search_queue
from sickbeard import show_queue
from sickbeard import logger
from sickbeard import naming
from sickbeard import dailysearcher
from sickbeard.indexers import indexer_api
from sickbeard.indexers.indexer_exceptions import indexer_shownotfound, indexer_showincomplete, indexer_exception, indexer_error, \
indexer_episodenotfound, indexer_attributenotfound, indexer_seasonnotfound, indexer_userabort, indexerExcepts
from sickbeard.common import SD
from sickbeard.common import SKIPPED
from sickbeard.common import WANTED
from sickbeard.databases import mainDB, cache_db, failed_db
from sickrage.helper.exceptions import ex
from sickrage.system.Shutdown import Shutdown
from configobj import ConfigObj
import requests
requests.packages.urllib3.disable_warnings()
indexerApi = indexer_api.indexerApi
PID = None
CFG = None
CONFIG_FILE = None
# This is the version of the config we EXPECT to find
CONFIG_VERSION = 7
# Default encryption version (0 for None)
ENCRYPTION_VERSION = 0
ENCRYPTION_SECRET = None
PROG_DIR = '.'
MY_FULLNAME = None
MY_NAME = None
MY_ARGS = []
SYS_ENCODING = ''
DATA_DIR = ''
CREATEPID = False
PIDFILE = ''
DAEMON = None
NO_RESIZE = False
# system events
events = None
# github
gh = None
# schedualers
dailySearchScheduler = None
backlogSearchScheduler = None
showUpdateScheduler = None
versionCheckScheduler = None
showQueueScheduler = None
searchQueueScheduler = None
properFinderScheduler = None
autoPostProcesserScheduler = None
subtitlesFinderScheduler = None
traktCheckerScheduler = None
showList = None
loadingShowList = None
providerList = []
newznabProviderList = []
torrentRssProviderList = []
metadata_provider_dict = {}
NEWEST_VERSION = None
NEWEST_VERSION_STRING = None
VERSION_NOTIFY = False
AUTO_UPDATE = False
NOTIFY_ON_UPDATE = False
CUR_COMMIT_HASH = None
BRANCH = ''
GIT_RESET = True
GIT_REMOTE = ''
GIT_REMOTE_URL = ''
CUR_COMMIT_BRANCH = ''
GIT_ORG = 'SickRage'
GIT_REPO = 'SickRage'
GIT_USERNAME = None
GIT_PASSWORD = None
GIT_PATH = None
GIT_AUTOISSUES = False
GIT_NEWVER = False
DEVELOPER = False
NEWS_URL = 'http://sickrage.github.io/sickrage-news/news.md'
NEWS_LAST_READ = None
NEWS_LATEST = None
NEWS_UNREAD = 0
INIT_LOCK = Lock()
started = False
ACTUAL_LOG_DIR = None
LOG_DIR = None
LOG_NR = 5
LOG_SIZE = 1048576
SOCKET_TIMEOUT = None
WEB_PORT = None
WEB_LOG = None
WEB_ROOT = None
WEB_USERNAME = None
WEB_PASSWORD = None
WEB_HOST = None
WEB_IPV6 = None
WEB_COOKIE_SECRET = None
WEB_USE_GZIP = True
DOWNLOAD_URL = None
HANDLE_REVERSE_PROXY = False
PROXY_SETTING = None
PROXY_INDEXERS = True
SSL_VERIFY = True
LOCALHOST_IP = None
CPU_PRESET = None
ANON_REDIRECT = None
API_KEY = None
API_ROOT = None
ENABLE_HTTPS = False
HTTPS_CERT = None
HTTPS_KEY = None
INDEXER_DEFAULT_LANGUAGE = None
EP_DEFAULT_DELETED_STATUS = None
LAUNCH_BROWSER = False
CACHE_DIR = None
ACTUAL_CACHE_DIR = None
ROOT_DIRS = None
TRASH_REMOVE_SHOW = False
TRASH_ROTATE_LOGS = False
SORT_ARTICLE = False
DEBUG = False
DISPLAY_ALL_SEASONS = True
DEFAULT_PAGE = 'home'
USE_LISTVIEW = False
METADATA_KODI = None
METADATA_KODI_12PLUS = None
METADATA_MEDIABROWSER = None
METADATA_PS3 = None
METADATA_WDTV = None
METADATA_TIVO = None
METADATA_MEDE8ER = None
QUALITY_DEFAULT = None
STATUS_DEFAULT = None
STATUS_DEFAULT_AFTER = None
FLATTEN_FOLDERS_DEFAULT = False
SUBTITLES_DEFAULT = False
INDEXER_DEFAULT = None
INDEXER_TIMEOUT = None
SCENE_DEFAULT = False
ANIME_DEFAULT = False
ARCHIVE_DEFAULT = False
PROVIDER_ORDER = []
NAMING_MULTI_EP = False
NAMING_ANIME_MULTI_EP = False
NAMING_PATTERN = None
NAMING_ABD_PATTERN = None
NAMING_CUSTOM_ABD = False
NAMING_SPORTS_PATTERN = None
NAMING_CUSTOM_SPORTS = False
NAMING_ANIME_PATTERN = None
NAMING_CUSTOM_ANIME = False
NAMING_FORCE_FOLDERS = False
NAMING_STRIP_YEAR = False
NAMING_ANIME = None
USE_NZBS = False
USE_TORRENTS = False
NZB_METHOD = None
NZB_DIR = None
USENET_RETENTION = None
TORRENT_METHOD = None
TORRENT_DIR = None
DOWNLOAD_PROPERS = False
CHECK_PROPERS_INTERVAL = None
ALLOW_HIGH_PRIORITY = False
SAB_FORCED = False
RANDOMIZE_PROVIDERS = False
AUTOPOSTPROCESSER_FREQUENCY = None
DAILYSEARCH_FREQUENCY = None
UPDATE_FREQUENCY = None
BACKLOG_FREQUENCY = None
SHOWUPDATE_HOUR = None
DEFAULT_AUTOPOSTPROCESSER_FREQUENCY = 10
DEFAULT_DAILYSEARCH_FREQUENCY = 40
DEFAULT_BACKLOG_FREQUENCY = 21
DEFAULT_UPDATE_FREQUENCY = 1
DEFAULT_SHOWUPDATE_HOUR = 3
MIN_AUTOPOSTPROCESSER_FREQUENCY = 1
MIN_DAILYSEARCH_FREQUENCY = 10
MIN_BACKLOG_FREQUENCY = 10
MIN_UPDATE_FREQUENCY = 1
BACKLOG_DAYS = 7
ADD_SHOWS_WO_DIR = False
CREATE_MISSING_SHOW_DIRS = False
RENAME_EPISODES = False
AIRDATE_EPISODES = False
FILE_TIMESTAMP_TIMEZONE = None
PROCESS_AUTOMATICALLY = False
NO_DELETE = False
KEEP_PROCESSED_DIR = False
PROCESS_METHOD = None
DELRARCONTENTS = False
MOVE_ASSOCIATED_FILES = False
POSTPONE_IF_SYNC_FILES = True
NFO_RENAME = True
TV_DOWNLOAD_DIR = None
UNPACK = False
SKIP_REMOVED_FILES = False
NZBS = False
NZBS_UID = None
NZBS_HASH = None
OMGWTFNZBS = False
OMGWTFNZBS_USERNAME = None
OMGWTFNZBS_APIKEY = None
NEWZBIN = False
NEWZBIN_USERNAME = None
NEWZBIN_PASSWORD = None
SAB_USERNAME = None
SAB_PASSWORD = None
SAB_APIKEY = None
SAB_CATEGORY = None
SAB_CATEGORY_BACKLOG = None
SAB_CATEGORY_ANIME = None
SAB_CATEGORY_ANIME_BACKLOG = None
SAB_HOST = ''
NZBGET_USERNAME = None
NZBGET_PASSWORD = None
NZBGET_CATEGORY = None
NZBGET_CATEGORY_BACKLOG = None
NZBGET_CATEGORY_ANIME = None
NZBGET_CATEGORY_ANIME_BACKLOG = None
NZBGET_HOST = None
NZBGET_USE_HTTPS = False
NZBGET_PRIORITY = 100
TORRENT_USERNAME = None
TORRENT_PASSWORD = None
TORRENT_HOST = ''
TORRENT_PATH = ''
TORRENT_SEED_TIME = None
TORRENT_PAUSED = False
TORRENT_HIGH_BANDWIDTH = False
TORRENT_LABEL = ''
TORRENT_LABEL_ANIME = ''
TORRENT_VERIFY_CERT = False
TORRENT_RPCURL = 'transmission'
TORRENT_AUTH_TYPE = 'none'
USE_KODI = False
KODI_ALWAYS_ON = True
KODI_NOTIFY_ONSNATCH = False
KODI_NOTIFY_ONDOWNLOAD = False
KODI_NOTIFY_ONSUBTITLEDOWNLOAD = False
KODI_UPDATE_LIBRARY = False
KODI_UPDATE_FULL = False
KODI_UPDATE_ONLYFIRST = False
KODI_HOST = ''
KODI_USERNAME = None
KODI_PASSWORD = None
USE_PLEX = False
PLEX_NOTIFY_ONSNATCH = False
PLEX_NOTIFY_ONDOWNLOAD = False
PLEX_NOTIFY_ONSUBTITLEDOWNLOAD = False
PLEX_UPDATE_LIBRARY = False
PLEX_SERVER_HOST = None
PLEX_SERVER_TOKEN = None
PLEX_HOST = None
PLEX_USERNAME = None
PLEX_PASSWORD = None
USE_PLEX_CLIENT = False
PLEX_CLIENT_USERNAME = None
PLEX_CLIENT_PASSWORD = None
USE_EMBY = False
EMBY_HOST = None
EMBY_APIKEY = None
USE_GROWL = False
GROWL_NOTIFY_ONSNATCH = False
GROWL_NOTIFY_ONDOWNLOAD = False
GROWL_NOTIFY_ONSUBTITLEDOWNLOAD = False
GROWL_HOST = ''
GROWL_PASSWORD = None
USE_FREEMOBILE = False
FREEMOBILE_NOTIFY_ONSNATCH = False
FREEMOBILE_NOTIFY_ONDOWNLOAD = False
FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD = False
FREEMOBILE_ID = ''
FREEMOBILE_APIKEY = ''
USE_PROWL = False
PROWL_NOTIFY_ONSNATCH = False
PROWL_NOTIFY_ONDOWNLOAD = False
PROWL_NOTIFY_ONSUBTITLEDOWNLOAD = False
PROWL_API = None
PROWL_PRIORITY = 0
USE_TWITTER = False
TWITTER_NOTIFY_ONSNATCH = False
TWITTER_NOTIFY_ONDOWNLOAD = False
TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD = False
TWITTER_USERNAME = None
TWITTER_PASSWORD = None
TWITTER_PREFIX = None
TWITTER_DMTO = None
TWITTER_USEDM = False
USE_BOXCAR = False
BOXCAR_NOTIFY_ONSNATCH = False
BOXCAR_NOTIFY_ONDOWNLOAD = False
BOXCAR_NOTIFY_ONSUBTITLEDOWNLOAD = False
BOXCAR_USERNAME = None
BOXCAR_PASSWORD = None
BOXCAR_PREFIX = None
USE_BOXCAR2 = False
BOXCAR2_NOTIFY_ONSNATCH = False
BOXCAR2_NOTIFY_ONDOWNLOAD = False
BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD = False
BOXCAR2_ACCESSTOKEN = None
USE_PUSHOVER = False
PUSHOVER_NOTIFY_ONSNATCH = False
PUSHOVER_NOTIFY_ONDOWNLOAD = False
PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = False
PUSHOVER_USERKEY = None
PUSHOVER_APIKEY = None
PUSHOVER_DEVICE = None
PUSHOVER_SOUND = None
USE_LIBNOTIFY = False
LIBNOTIFY_NOTIFY_ONSNATCH = False
LIBNOTIFY_NOTIFY_ONDOWNLOAD = False
LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD = False
USE_NMJ = False
NMJ_HOST = None
NMJ_DATABASE = None
NMJ_MOUNT = None
ANIMESUPPORT = False
USE_ANIDB = False
ANIDB_USERNAME = None
ANIDB_PASSWORD = None
ANIDB_USE_MYLIST = False
ADBA_CONNECTION = None
ANIME_SPLIT_HOME = False
USE_SYNOINDEX = False
USE_NMJv2 = False
NMJv2_HOST = None
NMJv2_DATABASE = None
NMJv2_DBLOC = None
USE_SYNOLOGYNOTIFIER = False
SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH = False
SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD = False
SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD = False
USE_TRAKT = False
TRAKT_USERNAME = None
TRAKT_ACCESS_TOKEN = None
TRAKT_REFRESH_TOKEN = None
TRAKT_REMOVE_WATCHLIST = False
TRAKT_REMOVE_SERIESLIST = False
TRAKT_REMOVE_SHOW_FROM_SICKRAGE = False
TRAKT_SYNC_WATCHLIST = False
TRAKT_METHOD_ADD = None
TRAKT_START_PAUSED = False
TRAKT_USE_RECOMMENDED = False
TRAKT_SYNC = False
TRAKT_SYNC_REMOVE = False
TRAKT_DEFAULT_INDEXER = None
TRAKT_TIMEOUT = None
TRAKT_BLACKLIST_NAME = None
USE_PYTIVO = False
PYTIVO_NOTIFY_ONSNATCH = False
PYTIVO_NOTIFY_ONDOWNLOAD = False
PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD = False
PYTIVO_UPDATE_LIBRARY = False
PYTIVO_HOST = ''
PYTIVO_SHARE_NAME = ''
PYTIVO_TIVO_NAME = ''
USE_NMA = False
NMA_NOTIFY_ONSNATCH = False
NMA_NOTIFY_ONDOWNLOAD = False
NMA_NOTIFY_ONSUBTITLEDOWNLOAD = False
NMA_API = None
NMA_PRIORITY = 0
USE_PUSHALOT = False
PUSHALOT_NOTIFY_ONSNATCH = False
PUSHALOT_NOTIFY_ONDOWNLOAD = False
PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD = False
PUSHALOT_AUTHORIZATIONTOKEN = None
USE_PUSHBULLET = False
PUSHBULLET_NOTIFY_ONSNATCH = False
PUSHBULLET_NOTIFY_ONDOWNLOAD = False
PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD = False
PUSHBULLET_API = None
PUSHBULLET_DEVICE = None
USE_EMAIL = False
EMAIL_NOTIFY_ONSNATCH = False
EMAIL_NOTIFY_ONDOWNLOAD = False
EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD = False
EMAIL_HOST = None
EMAIL_PORT = 25
EMAIL_TLS = False
EMAIL_USER = None
EMAIL_PASSWORD = None
EMAIL_FROM = None
EMAIL_LIST = None
GUI_NAME = None
HOME_LAYOUT = None
HISTORY_LAYOUT = None
HISTORY_LIMIT = 0
DISPLAY_SHOW_SPECIALS = False
COMING_EPS_LAYOUT = None
COMING_EPS_DISPLAY_PAUSED = False
COMING_EPS_SORT = None
COMING_EPS_MISSED_RANGE = None
FUZZY_DATING = False
TRIM_ZERO = False
DATE_PRESET = None
TIME_PRESET = None
TIME_PRESET_W_SECONDS = None
TIMEZONE_DISPLAY = None
THEME_NAME = None
POSTER_SORTBY = None
POSTER_SORTDIR = None
FILTER_ROW = True
USE_SUBTITLES = False
SUBTITLES_LANGUAGES = []
SUBTITLES_DIR = ''
SUBTITLES_SERVICES_LIST = []
SUBTITLES_SERVICES_ENABLED = []
SUBTITLES_HISTORY = False
EMBEDDED_SUBTITLES_ALL = False
SUBTITLES_HEARING_IMPAIRED = False
SUBTITLES_FINDER_FREQUENCY = 1
SUBTITLES_MULTI = False
SUBTITLES_EXTRA_SCRIPTS = []
ADDIC7ED_USER = None
ADDIC7ED_PASS = None
OPENSUBTITLES_USER = None
OPENSUBTITLES_PASS = None
LEGENDASTV_USER = None
LEGENDASTV_PASS = None
USE_FAILED_DOWNLOADS = False
DELETE_FAILED = False
EXTRA_SCRIPTS = []
IGNORE_WORDS = "german,french,core2hd,dutch,swedish,reenc,MrLss"
REQUIRE_WORDS = ""
IGNORED_SUBS_LIST = "dk,fin,heb,kor,nor,nordic,pl,swe"
SYNC_FILES = "!sync,lftp-pget-status,part,bts,!qb"
CALENDAR_UNPROTECTED = False
CALENDAR_ICONS = False
NO_RESTART = False
TMDB_API_KEY = 'edc5f123313769de83a71e157758030b'
# TRAKT_API_KEY = 'd4161a7a106424551add171e5470112e4afdaf2438e6ef2fe0548edc75924868'
TRAKT_API_KEY = '5c65f55e11d48c35385d9e8670615763a605fad28374c8ae553a7b7a50651ddd'
TRAKT_API_SECRET = 'b53e32045ac122a445ef163e6d859403301ffe9b17fb8321d428531b69022a82'
TRAKT_PIN_URL = 'https://trakt.tv/pin/4562'
TRAKT_OAUTH_URL = 'https://trakt.tv/'
TRAKT_API_URL = 'https://api-v2launch.trakt.tv/'
FANART_API_KEY = '9b3afaf26f6241bdb57d6cc6bd798da7'
SHOWS_RECENT = []
__INITIALIZED__ = False
NEWZNAB_DATA = None
def get_backlog_cycle_time():
cycletime = DAILYSEARCH_FREQUENCY * 2 + 7
return max([cycletime, 720])
def initialize(consoleLogging=True):
with INIT_LOCK:
global BRANCH, GIT_RESET, GIT_REMOTE, GIT_REMOTE_URL, CUR_COMMIT_HASH, CUR_COMMIT_BRANCH, GIT_NEWVER, ACTUAL_LOG_DIR, LOG_DIR, LOG_NR, LOG_SIZE, WEB_PORT, WEB_LOG, ENCRYPTION_VERSION, ENCRYPTION_SECRET, WEB_ROOT, WEB_USERNAME, WEB_PASSWORD, WEB_HOST, WEB_IPV6, WEB_COOKIE_SECRET, WEB_USE_GZIP, API_KEY, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, \
HANDLE_REVERSE_PROXY, USE_NZBS, USE_TORRENTS, NZB_METHOD, NZB_DIR, DOWNLOAD_PROPERS, RANDOMIZE_PROVIDERS, CHECK_PROPERS_INTERVAL, ALLOW_HIGH_PRIORITY, SAB_FORCED, TORRENT_METHOD, \
SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_CATEGORY_BACKLOG, SAB_CATEGORY_ANIME, SAB_CATEGORY_ANIME_BACKLOG, SAB_HOST, \
NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_CATEGORY_BACKLOG, NZBGET_CATEGORY_ANIME, NZBGET_CATEGORY_ANIME_BACKLOG, NZBGET_PRIORITY, NZBGET_HOST, NZBGET_USE_HTTPS, backlogSearchScheduler, \
TORRENT_USERNAME, TORRENT_PASSWORD, TORRENT_HOST, TORRENT_PATH, TORRENT_SEED_TIME, TORRENT_PAUSED, TORRENT_HIGH_BANDWIDTH, TORRENT_LABEL, TORRENT_LABEL_ANIME, TORRENT_VERIFY_CERT, TORRENT_RPCURL, TORRENT_AUTH_TYPE, \
USE_KODI, KODI_ALWAYS_ON, KODI_NOTIFY_ONSNATCH, KODI_NOTIFY_ONDOWNLOAD, KODI_NOTIFY_ONSUBTITLEDOWNLOAD, KODI_UPDATE_FULL, KODI_UPDATE_ONLYFIRST, \
KODI_UPDATE_LIBRARY, KODI_HOST, KODI_USERNAME, KODI_PASSWORD, BACKLOG_FREQUENCY, \
USE_TRAKT, TRAKT_USERNAME, TRAKT_ACCESS_TOKEN, TRAKT_REFRESH_TOKEN, TRAKT_REMOVE_WATCHLIST, TRAKT_SYNC_WATCHLIST, TRAKT_REMOVE_SHOW_FROM_SICKRAGE, TRAKT_METHOD_ADD, TRAKT_START_PAUSED, traktCheckerScheduler, TRAKT_USE_RECOMMENDED, TRAKT_SYNC, TRAKT_SYNC_REMOVE, TRAKT_DEFAULT_INDEXER, TRAKT_REMOVE_SERIESLIST, TRAKT_TIMEOUT, TRAKT_BLACKLIST_NAME, \
USE_PLEX, PLEX_NOTIFY_ONSNATCH, PLEX_NOTIFY_ONDOWNLOAD, PLEX_NOTIFY_ONSUBTITLEDOWNLOAD, PLEX_UPDATE_LIBRARY, USE_PLEX_CLIENT, PLEX_CLIENT_USERNAME, PLEX_CLIENT_PASSWORD, \
PLEX_SERVER_HOST, PLEX_SERVER_TOKEN, PLEX_HOST, PLEX_USERNAME, PLEX_PASSWORD, MIN_BACKLOG_FREQUENCY, SKIP_REMOVED_FILES, \
USE_EMBY, EMBY_HOST, EMBY_APIKEY, \
showUpdateScheduler, __INITIALIZED__, INDEXER_DEFAULT_LANGUAGE, EP_DEFAULT_DELETED_STATUS, LAUNCH_BROWSER, TRASH_REMOVE_SHOW, TRASH_ROTATE_LOGS, SORT_ARTICLE, showList, loadingShowList, \
NEWZNAB_DATA, NZBS, NZBS_UID, NZBS_HASH, INDEXER_DEFAULT, INDEXER_TIMEOUT, USENET_RETENTION, TORRENT_DIR, \
QUALITY_DEFAULT, FLATTEN_FOLDERS_DEFAULT, SUBTITLES_DEFAULT, STATUS_DEFAULT, STATUS_DEFAULT_AFTER, \
GROWL_NOTIFY_ONSNATCH, GROWL_NOTIFY_ONDOWNLOAD, GROWL_NOTIFY_ONSUBTITLEDOWNLOAD, TWITTER_NOTIFY_ONSNATCH, TWITTER_NOTIFY_ONDOWNLOAD, TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD, USE_FREEMOBILE, FREEMOBILE_ID, FREEMOBILE_APIKEY, FREEMOBILE_NOTIFY_ONSNATCH, FREEMOBILE_NOTIFY_ONDOWNLOAD, FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD, \
USE_GROWL, GROWL_HOST, GROWL_PASSWORD, USE_PROWL, PROWL_NOTIFY_ONSNATCH, PROWL_NOTIFY_ONDOWNLOAD, PROWL_NOTIFY_ONSUBTITLEDOWNLOAD, PROWL_API, PROWL_PRIORITY, \
USE_PYTIVO, PYTIVO_NOTIFY_ONSNATCH, PYTIVO_NOTIFY_ONDOWNLOAD, PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD, PYTIVO_UPDATE_LIBRARY, PYTIVO_HOST, PYTIVO_SHARE_NAME, PYTIVO_TIVO_NAME, \
USE_NMA, NMA_NOTIFY_ONSNATCH, NMA_NOTIFY_ONDOWNLOAD, NMA_NOTIFY_ONSUBTITLEDOWNLOAD, NMA_API, NMA_PRIORITY, \
USE_PUSHALOT, PUSHALOT_NOTIFY_ONSNATCH, PUSHALOT_NOTIFY_ONDOWNLOAD, PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD, PUSHALOT_AUTHORIZATIONTOKEN, \
USE_PUSHBULLET, PUSHBULLET_NOTIFY_ONSNATCH, PUSHBULLET_NOTIFY_ONDOWNLOAD, PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD, PUSHBULLET_API, PUSHBULLET_DEVICE, \
versionCheckScheduler, VERSION_NOTIFY, AUTO_UPDATE, NOTIFY_ON_UPDATE, PROCESS_AUTOMATICALLY, NO_DELETE, UNPACK, CPU_PRESET, \
KEEP_PROCESSED_DIR, PROCESS_METHOD, DELRARCONTENTS, TV_DOWNLOAD_DIR, UPDATE_FREQUENCY, \
showQueueScheduler, searchQueueScheduler, ROOT_DIRS, CACHE_DIR, ACTUAL_CACHE_DIR, TIMEZONE_DISPLAY, \
NAMING_PATTERN, NAMING_MULTI_EP, NAMING_ANIME_MULTI_EP, NAMING_FORCE_FOLDERS, NAMING_ABD_PATTERN, NAMING_CUSTOM_ABD, NAMING_SPORTS_PATTERN, NAMING_CUSTOM_SPORTS, NAMING_ANIME_PATTERN, NAMING_CUSTOM_ANIME, NAMING_STRIP_YEAR, \
RENAME_EPISODES, AIRDATE_EPISODES, FILE_TIMESTAMP_TIMEZONE, properFinderScheduler, PROVIDER_ORDER, autoPostProcesserScheduler, \
providerList, newznabProviderList, torrentRssProviderList, \
EXTRA_SCRIPTS, USE_TWITTER, TWITTER_USERNAME, TWITTER_PASSWORD, TWITTER_PREFIX, DAILYSEARCH_FREQUENCY, TWITTER_DMTO, TWITTER_USEDM, \
USE_BOXCAR, BOXCAR_USERNAME, BOXCAR_NOTIFY_ONDOWNLOAD, BOXCAR_NOTIFY_ONSUBTITLEDOWNLOAD, BOXCAR_NOTIFY_ONSNATCH, \
USE_BOXCAR2, BOXCAR2_ACCESSTOKEN, BOXCAR2_NOTIFY_ONDOWNLOAD, BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD, BOXCAR2_NOTIFY_ONSNATCH, \
USE_PUSHOVER, PUSHOVER_USERKEY, PUSHOVER_APIKEY, PUSHOVER_DEVICE, PUSHOVER_NOTIFY_ONDOWNLOAD, PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD, PUSHOVER_NOTIFY_ONSNATCH, PUSHOVER_SOUND, \
USE_LIBNOTIFY, LIBNOTIFY_NOTIFY_ONSNATCH, LIBNOTIFY_NOTIFY_ONDOWNLOAD, LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD, USE_NMJ, NMJ_HOST, NMJ_DATABASE, NMJ_MOUNT, USE_NMJv2, NMJv2_HOST, NMJv2_DATABASE, NMJv2_DBLOC, USE_SYNOINDEX, \
USE_SYNOLOGYNOTIFIER, SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH, SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD, SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD, \
USE_EMAIL, EMAIL_HOST, EMAIL_PORT, EMAIL_TLS, EMAIL_USER, EMAIL_PASSWORD, EMAIL_FROM, EMAIL_NOTIFY_ONSNATCH, EMAIL_NOTIFY_ONDOWNLOAD, EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD, EMAIL_LIST, \
USE_LISTVIEW, METADATA_KODI, METADATA_KODI_12PLUS, METADATA_MEDIABROWSER, METADATA_PS3, metadata_provider_dict, \
NEWZBIN, NEWZBIN_USERNAME, NEWZBIN_PASSWORD, GIT_PATH, MOVE_ASSOCIATED_FILES, SYNC_FILES, POSTPONE_IF_SYNC_FILES, dailySearchScheduler, NFO_RENAME, \
GUI_NAME, HOME_LAYOUT, HISTORY_LAYOUT, DISPLAY_SHOW_SPECIALS, COMING_EPS_LAYOUT, COMING_EPS_SORT, COMING_EPS_DISPLAY_PAUSED, COMING_EPS_MISSED_RANGE, FUZZY_DATING, TRIM_ZERO, DATE_PRESET, TIME_PRESET, TIME_PRESET_W_SECONDS, THEME_NAME, FILTER_ROW, \
POSTER_SORTBY, POSTER_SORTDIR, HISTORY_LIMIT, CREATE_MISSING_SHOW_DIRS, ADD_SHOWS_WO_DIR, \
METADATA_WDTV, METADATA_TIVO, METADATA_MEDE8ER, IGNORE_WORDS, IGNORED_SUBS_LIST, REQUIRE_WORDS, CALENDAR_UNPROTECTED, CALENDAR_ICONS, NO_RESTART, \
USE_SUBTITLES, SUBTITLES_LANGUAGES, SUBTITLES_DIR, SUBTITLES_SERVICES_LIST, SUBTITLES_SERVICES_ENABLED, SUBTITLES_HISTORY, SUBTITLES_FINDER_FREQUENCY, SUBTITLES_MULTI, EMBEDDED_SUBTITLES_ALL, SUBTITLES_EXTRA_SCRIPTS, subtitlesFinderScheduler, \
SUBTITLES_HEARING_IMPAIRED, ADDIC7ED_USER, ADDIC7ED_PASS, LEGENDASTV_USER, LEGENDASTV_PASS, OPENSUBTITLES_USER, OPENSUBTITLES_PASS, \
USE_FAILED_DOWNLOADS, DELETE_FAILED, ANON_REDIRECT, LOCALHOST_IP, DEBUG, DEFAULT_PAGE, PROXY_SETTING, PROXY_INDEXERS, \
AUTOPOSTPROCESSER_FREQUENCY, SHOWUPDATE_HOUR, \
ANIME_DEFAULT, NAMING_ANIME, ANIMESUPPORT, USE_ANIDB, ANIDB_USERNAME, ANIDB_PASSWORD, ANIDB_USE_MYLIST, \
ANIME_SPLIT_HOME, SCENE_DEFAULT, ARCHIVE_DEFAULT, DOWNLOAD_URL, BACKLOG_DAYS, GIT_USERNAME, GIT_PASSWORD, \
GIT_AUTOISSUES, DEVELOPER, gh, DISPLAY_ALL_SEASONS, SSL_VERIFY, NEWS_LAST_READ, NEWS_LATEST, SOCKET_TIMEOUT
if __INITIALIZED__:
return False
CheckSection(CFG, 'General')
CheckSection(CFG, 'Blackhole')
CheckSection(CFG, 'Newzbin')
CheckSection(CFG, 'SABnzbd')
CheckSection(CFG, 'NZBget')
CheckSection(CFG, 'KODI')
CheckSection(CFG, 'PLEX')
CheckSection(CFG, 'Emby')
CheckSection(CFG, 'Growl')
CheckSection(CFG, 'Prowl')
CheckSection(CFG, 'Twitter')
CheckSection(CFG, 'Boxcar')
CheckSection(CFG, 'Boxcar2')
CheckSection(CFG, 'NMJ')
CheckSection(CFG, 'NMJv2')
CheckSection(CFG, 'Synology')
CheckSection(CFG, 'SynologyNotifier')
CheckSection(CFG, 'pyTivo')
CheckSection(CFG, 'NMA')
CheckSection(CFG, 'Pushalot')
CheckSection(CFG, 'Pushbullet')
CheckSection(CFG, 'Subtitles')
CheckSection(CFG, 'pyTivo')
# Need to be before any passwords
ENCRYPTION_VERSION = check_setting_int(CFG, 'General', 'encryption_version', 0)
ENCRYPTION_SECRET = check_setting_str(CFG, 'General', 'encryption_secret', helpers.generateCookieSecret(), censor_log=True)
GIT_AUTOISSUES = bool(check_setting_int(CFG, 'General', 'git_autoissues', 0))
# git login info
GIT_USERNAME = check_setting_str(CFG, 'General', 'git_username', '')
GIT_PASSWORD = check_setting_str(CFG, 'General', 'git_password', '', censor_log=True)
GIT_NEWVER = bool(check_setting_int(CFG, 'General', 'git_newver', 0))
DEVELOPER = bool(check_setting_int(CFG, 'General', 'developer', 0))
# debugging
DEBUG = bool(check_setting_int(CFG, 'General', 'debug', 0))
DEFAULT_PAGE = check_setting_str(CFG, 'General', 'default_page', 'home')
if DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'):
DEFAULT_PAGE = 'home'
ACTUAL_LOG_DIR = check_setting_str(CFG, 'General', 'log_dir', 'Logs')
LOG_DIR = os.path.normpath(os.path.join(DATA_DIR, ACTUAL_LOG_DIR))
LOG_NR = check_setting_int(CFG, 'General', 'log_nr', 5) # Default to 5 backup file (sickrage.log.x)
LOG_SIZE = check_setting_int(CFG, 'General', 'log_size', 1048576) # Default to max 1MB per logfile
fileLogging = True
if not helpers.makeDir(LOG_DIR):
sys.stderr.write("!!! No log folder, logging to screen only!\n")
fileLogging = False
# init logging
logger.initLogging(consoleLogging=consoleLogging, fileLogging=fileLogging, debugLogging=DEBUG)
# github api
try:
if not (GIT_USERNAME and GIT_PASSWORD):
gh = Github(user_agent="SiCKRAGE").get_organization(GIT_ORG).get_repo(GIT_REPO)
else:
gh = Github(login_or_token=GIT_USERNAME, password=GIT_PASSWORD, user_agent="SiCKRAGE").get_organization(GIT_ORG).get_repo(GIT_REPO)
except Exception as e:
gh = None
logger.log(u'Unable to setup GitHub properly. GitHub will not be available. Error: %s' % ex(e), logger.WARNING)
# git reset on update
GIT_RESET = bool(check_setting_int(CFG, 'General', 'git_reset', 1))
# current git branch
BRANCH = check_setting_str(CFG, 'General', 'branch', '')
# git_remote
GIT_REMOTE = check_setting_str(CFG, 'General', 'git_remote', 'origin')
GIT_REMOTE_URL = check_setting_str(CFG, 'General', 'git_remote_url',
'https://github.com/%s/%s.git' % (GIT_ORG, GIT_REPO))
if 'sickragetv' in GIT_REMOTE_URL.lower():
GIT_REMOTE_URL = 'https://github.com/SickRage/SickRage.git'
# current commit hash
CUR_COMMIT_HASH = check_setting_str(CFG, 'General', 'cur_commit_hash', '')
# current commit branch
CUR_COMMIT_BRANCH = check_setting_str(CFG, 'General', 'cur_commit_branch', '')
ACTUAL_CACHE_DIR = check_setting_str(CFG, 'General', 'cache_dir', 'cache')
# fix bad configs due to buggy code
if ACTUAL_CACHE_DIR == 'None':
ACTUAL_CACHE_DIR = 'cache'
# unless they specify, put the cache dir inside the data dir
if not os.path.isabs(ACTUAL_CACHE_DIR):
CACHE_DIR = os.path.join(DATA_DIR, ACTUAL_CACHE_DIR)
else:
CACHE_DIR = ACTUAL_CACHE_DIR
if not helpers.makeDir(CACHE_DIR):
logger.log(u"!!! Creating local cache dir failed, using system default", logger.ERROR)
CACHE_DIR = None
# Check if we need to perform a restore of the cache folder
try:
restoreDir = os.path.join(DATA_DIR, 'restore')
if os.path.exists(restoreDir) and os.path.exists(os.path.join(restoreDir, 'cache')):
def restoreCache(srcDir, dstDir):
def path_leaf(path):
head, tail = os.path.split(path)
return tail or os.path.basename(head)
try:
if os.path.isdir(dstDir):
bakFilename = '{0}-{1}'.format(path_leaf(dstDir), datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d_%H%M%S'))
shutil.move(dstDir, os.path.join(os.path.dirname(dstDir), bakFilename))
shutil.move(srcDir, dstDir)
logger.log(u"Restore: restoring cache successful", logger.INFO)
except Exception as e:
logger.log(u"Restore: restoring cache failed: {0}".format(str(e)), logger.ERROR)
restoreCache(os.path.join(restoreDir, 'cache'), CACHE_DIR)
except Exception as e:
logger.log(u"Restore: restoring cache failed: {0}".format(ex(e)), logger.ERROR)
finally:
if os.path.exists(os.path.join(DATA_DIR, 'restore')):
try:
shutil.rmtree(os.path.join(DATA_DIR, 'restore'))
except Exception as e:
logger.log(u"Restore: Unable to remove the restore directory: {0}".format(ex(e)), logger.ERROR)
for cleanupDir in ['mako', 'sessions', 'indexers']:
try:
shutil.rmtree(os.path.join(CACHE_DIR, cleanupDir))
except Exception as e:
logger.log(u"Restore: Unable to remove the cache/{0} directory: {1}".format(cleanupDir, ex(e)), logger.WARNING)
GUI_NAME = check_setting_str(CFG, 'GUI', 'gui_name', 'slick')
THEME_NAME = check_setting_str(CFG, 'GUI', 'theme_name', 'dark')
SOCKET_TIMEOUT = check_setting_int(CFG, 'General', 'socket_timeout', 30)
socket.setdefaulttimeout(SOCKET_TIMEOUT)
try:
WEB_PORT = check_setting_int(CFG, 'General', 'web_port', 8081)
except Exception:
WEB_PORT = 8081
if WEB_PORT < 21 or WEB_PORT > 65535:
WEB_PORT = 8081
WEB_HOST = check_setting_str(CFG, 'General', 'web_host', '0.0.0.0')
WEB_IPV6 = bool(check_setting_int(CFG, 'General', 'web_ipv6', 0))
WEB_ROOT = check_setting_str(CFG, 'General', 'web_root', '').rstrip("/")
WEB_LOG = bool(check_setting_int(CFG, 'General', 'web_log', 0))
WEB_USERNAME = check_setting_str(CFG, 'General', 'web_username', '', censor_log=True)
WEB_PASSWORD = check_setting_str(CFG, 'General', 'web_password', '', censor_log=True)
WEB_COOKIE_SECRET = check_setting_str(CFG, 'General', 'web_cookie_secret', helpers.generateCookieSecret(), censor_log=True)
if not WEB_COOKIE_SECRET:
WEB_COOKIE_SECRET = helpers.generateCookieSecret()
WEB_USE_GZIP = bool(check_setting_int(CFG, 'General', 'web_use_gzip', 1))
SSL_VERIFY = bool(check_setting_int(CFG, 'General', 'ssl_verify', 1))
INDEXER_DEFAULT_LANGUAGE = check_setting_str(CFG, 'General', 'indexerDefaultLang', 'en')
EP_DEFAULT_DELETED_STATUS = check_setting_int(CFG, 'General', 'ep_default_deleted_status', 6)
LAUNCH_BROWSER = bool(check_setting_int(CFG, 'General', 'launch_browser', 1))
DOWNLOAD_URL = check_setting_str(CFG, 'General', 'download_url', "")
LOCALHOST_IP = check_setting_str(CFG, 'General', 'localhost_ip', '')
CPU_PRESET = check_setting_str(CFG, 'General', 'cpu_preset', 'NORMAL')
ANON_REDIRECT = check_setting_str(CFG, 'General', 'anon_redirect', 'http://dereferer.org/?')
PROXY_SETTING = check_setting_str(CFG, 'General', 'proxy_setting', '')
PROXY_INDEXERS = bool(check_setting_int(CFG, 'General', 'proxy_indexers', 1))
# attempt to help prevent users from breaking links by using a bad url
if not ANON_REDIRECT.endswith('?'):
ANON_REDIRECT = ''
TRASH_REMOVE_SHOW = bool(check_setting_int(CFG, 'General', 'trash_remove_show', 0))
TRASH_ROTATE_LOGS = bool(check_setting_int(CFG, 'General', 'trash_rotate_logs', 0))
SORT_ARTICLE = bool(check_setting_int(CFG, 'General', 'sort_article', 0))
API_KEY = check_setting_str(CFG, 'General', 'api_key', '', censor_log=True)
ENABLE_HTTPS = bool(check_setting_int(CFG, 'General', 'enable_https', 0))
HTTPS_CERT = check_setting_str(CFG, 'General', 'https_cert', 'server.crt')
HTTPS_KEY = check_setting_str(CFG, 'General', 'https_key', 'server.key')
HANDLE_REVERSE_PROXY = bool(check_setting_int(CFG, 'General', 'handle_reverse_proxy', 0))
ROOT_DIRS = check_setting_str(CFG, 'General', 'root_dirs', '')
if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', ROOT_DIRS):
ROOT_DIRS = ''
QUALITY_DEFAULT = check_setting_int(CFG, 'General', 'quality_default', SD)
STATUS_DEFAULT = check_setting_int(CFG, 'General', 'status_default', SKIPPED)
STATUS_DEFAULT_AFTER = check_setting_int(CFG, 'General', 'status_default_after', WANTED)
VERSION_NOTIFY = bool(check_setting_int(CFG, 'General', 'version_notify', 1))
AUTO_UPDATE = bool(check_setting_int(CFG, 'General', 'auto_update', 0))
NOTIFY_ON_UPDATE = bool(check_setting_int(CFG, 'General', 'notify_on_update', 1))
FLATTEN_FOLDERS_DEFAULT = bool(check_setting_int(CFG, 'General', 'flatten_folders_default', 0))
INDEXER_DEFAULT = check_setting_int(CFG, 'General', 'indexer_default', 0)
INDEXER_TIMEOUT = check_setting_int(CFG, 'General', 'indexer_timeout', 20)
ANIME_DEFAULT = bool(check_setting_int(CFG, 'General', 'anime_default', 0))
SCENE_DEFAULT = bool(check_setting_int(CFG, 'General', 'scene_default', 0))
ARCHIVE_DEFAULT = bool(check_setting_int(CFG, 'General', 'archive_default', 0))
PROVIDER_ORDER = check_setting_str(CFG, 'General', 'provider_order', '').split()
NAMING_PATTERN = check_setting_str(CFG, 'General', 'naming_pattern', 'Season %0S/%SN - S%0SE%0E - %EN')
NAMING_ABD_PATTERN = check_setting_str(CFG, 'General', 'naming_abd_pattern', '%SN - %A.D - %EN')
NAMING_CUSTOM_ABD = bool(check_setting_int(CFG, 'General', 'naming_custom_abd', 0))
NAMING_SPORTS_PATTERN = check_setting_str(CFG, 'General', 'naming_sports_pattern', '%SN - %A-D - %EN')
NAMING_ANIME_PATTERN = check_setting_str(CFG, 'General', 'naming_anime_pattern',
'Season %0S/%SN - S%0SE%0E - %EN')
NAMING_ANIME = check_setting_int(CFG, 'General', 'naming_anime', 3)
NAMING_CUSTOM_SPORTS = bool(check_setting_int(CFG, 'General', 'naming_custom_sports', 0))
NAMING_CUSTOM_ANIME = bool(check_setting_int(CFG, 'General', 'naming_custom_anime', 0))
NAMING_MULTI_EP = check_setting_int(CFG, 'General', 'naming_multi_ep', 1)
NAMING_ANIME_MULTI_EP = check_setting_int(CFG, 'General', 'naming_anime_multi_ep', 1)
NAMING_FORCE_FOLDERS = naming.check_force_season_folders()
NAMING_STRIP_YEAR = bool(check_setting_int(CFG, 'General', 'naming_strip_year', 0))
USE_NZBS = bool(check_setting_int(CFG, 'General', 'use_nzbs', 0))
USE_TORRENTS = bool(check_setting_int(CFG, 'General', 'use_torrents', 1))
NZB_METHOD = check_setting_str(CFG, 'General', 'nzb_method', 'blackhole')
if NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
NZB_METHOD = 'blackhole'
TORRENT_METHOD = check_setting_str(CFG, 'General', 'torrent_method', 'blackhole')
if TORRENT_METHOD not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet'):
TORRENT_METHOD = 'blackhole'
DOWNLOAD_PROPERS = bool(check_setting_int(CFG, 'General', 'download_propers', 1))
CHECK_PROPERS_INTERVAL = check_setting_str(CFG, 'General', 'check_propers_interval', '')
if CHECK_PROPERS_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'):
CHECK_PROPERS_INTERVAL = 'daily'
RANDOMIZE_PROVIDERS = bool(check_setting_int(CFG, 'General', 'randomize_providers', 0))
ALLOW_HIGH_PRIORITY = bool(check_setting_int(CFG, 'General', 'allow_high_priority', 1))
SKIP_REMOVED_FILES = bool(check_setting_int(CFG, 'General', 'skip_removed_files', 0))
USENET_RETENTION = check_setting_int(CFG, 'General', 'usenet_retention', 500)
AUTOPOSTPROCESSER_FREQUENCY = check_setting_int(CFG, 'General', 'autopostprocesser_frequency',
DEFAULT_AUTOPOSTPROCESSER_FREQUENCY)
if AUTOPOSTPROCESSER_FREQUENCY < MIN_AUTOPOSTPROCESSER_FREQUENCY:
AUTOPOSTPROCESSER_FREQUENCY = MIN_AUTOPOSTPROCESSER_FREQUENCY
DAILYSEARCH_FREQUENCY = check_setting_int(CFG, 'General', 'dailysearch_frequency',
DEFAULT_DAILYSEARCH_FREQUENCY)
if DAILYSEARCH_FREQUENCY < MIN_DAILYSEARCH_FREQUENCY:
DAILYSEARCH_FREQUENCY = MIN_DAILYSEARCH_FREQUENCY
MIN_BACKLOG_FREQUENCY = get_backlog_cycle_time()
BACKLOG_FREQUENCY = check_setting_int(CFG, 'General', 'backlog_frequency', DEFAULT_BACKLOG_FREQUENCY)
if BACKLOG_FREQUENCY < MIN_BACKLOG_FREQUENCY:
BACKLOG_FREQUENCY = MIN_BACKLOG_FREQUENCY
UPDATE_FREQUENCY = check_setting_int(CFG, 'General', 'update_frequency', DEFAULT_UPDATE_FREQUENCY)
if UPDATE_FREQUENCY < MIN_UPDATE_FREQUENCY:
UPDATE_FREQUENCY = MIN_UPDATE_FREQUENCY
SHOWUPDATE_HOUR = check_setting_int(CFG, 'General', 'showupdate_hour', DEFAULT_SHOWUPDATE_HOUR)
if SHOWUPDATE_HOUR > 23:
SHOWUPDATE_HOUR = 0
elif SHOWUPDATE_HOUR < 0:
SHOWUPDATE_HOUR = 0
BACKLOG_DAYS = check_setting_int(CFG, 'General', 'backlog_days', 7)
NEWS_LAST_READ = check_setting_str(CFG, 'General', 'news_last_read', '1970-01-01')
NEWS_LATEST = NEWS_LAST_READ
NZB_DIR = check_setting_str(CFG, 'Blackhole', 'nzb_dir', '')
TORRENT_DIR = check_setting_str(CFG, 'Blackhole', 'torrent_dir', '')
TV_DOWNLOAD_DIR = check_setting_str(CFG, 'General', 'tv_download_dir', '')
PROCESS_AUTOMATICALLY = bool(check_setting_int(CFG, 'General', 'process_automatically', 0))
NO_DELETE = bool(check_setting_int(CFG, 'General', 'no_delete', 0))
UNPACK = bool(check_setting_int(CFG, 'General', 'unpack', 0))
RENAME_EPISODES = bool(check_setting_int(CFG, 'General', 'rename_episodes', 1))
AIRDATE_EPISODES = bool(check_setting_int(CFG, 'General', 'airdate_episodes', 0))
FILE_TIMESTAMP_TIMEZONE = check_setting_str(CFG, 'General', 'file_timestamp_timezone', 'network')
KEEP_PROCESSED_DIR = bool(check_setting_int(CFG, 'General', 'keep_processed_dir', 1))
PROCESS_METHOD = check_setting_str(CFG, 'General', 'process_method', 'copy' if KEEP_PROCESSED_DIR else 'move')
DELRARCONTENTS = bool(check_setting_int(CFG, 'General', 'del_rar_contents', 0))
MOVE_ASSOCIATED_FILES = bool(check_setting_int(CFG, 'General', 'move_associated_files', 0))
POSTPONE_IF_SYNC_FILES = bool(check_setting_int(CFG, 'General', 'postpone_if_sync_files', 1))
SYNC_FILES = check_setting_str(CFG, 'General', 'sync_files', SYNC_FILES)
NFO_RENAME = bool(check_setting_int(CFG, 'General', 'nfo_rename', 1))
CREATE_MISSING_SHOW_DIRS = bool(check_setting_int(CFG, 'General', 'create_missing_show_dirs', 0))
ADD_SHOWS_WO_DIR = bool(check_setting_int(CFG, 'General', 'add_shows_wo_dir', 0))
NZBS = bool(check_setting_int(CFG, 'NZBs', 'nzbs', 0))
NZBS_UID = check_setting_str(CFG, 'NZBs', 'nzbs_uid', '', censor_log=True)
NZBS_HASH = check_setting_str(CFG, 'NZBs', 'nzbs_hash', '', censor_log=True)
NEWZBIN = bool(check_setting_int(CFG, 'Newzbin', 'newzbin', 0))
NEWZBIN_USERNAME = check_setting_str(CFG, 'Newzbin', 'newzbin_username', '', censor_log=True)
NEWZBIN_PASSWORD = check_setting_str(CFG, 'Newzbin', 'newzbin_password', '', censor_log=True)
SAB_USERNAME = check_setting_str(CFG, 'SABnzbd', 'sab_username', '', censor_log=True)
SAB_PASSWORD = check_setting_str(CFG, 'SABnzbd', 'sab_password', '', censor_log=True)
SAB_APIKEY = check_setting_str(CFG, 'SABnzbd', 'sab_apikey', '', censor_log=True)
SAB_CATEGORY = check_setting_str(CFG, 'SABnzbd', 'sab_category', 'tv')
SAB_CATEGORY_BACKLOG = check_setting_str(CFG, 'SABnzbd', 'sab_category_backlog', SAB_CATEGORY)
SAB_CATEGORY_ANIME = check_setting_str(CFG, 'SABnzbd', 'sab_category_anime', 'anime')
SAB_CATEGORY_ANIME_BACKLOG = check_setting_str(CFG, 'SABnzbd', 'sab_category_anime_backlog', SAB_CATEGORY_ANIME)
SAB_HOST = check_setting_str(CFG, 'SABnzbd', 'sab_host', '')
SAB_FORCED = bool(check_setting_int(CFG, 'SABnzbd', 'sab_forced', 0))
NZBGET_USERNAME = check_setting_str(CFG, 'NZBget', 'nzbget_username', 'nzbget', censor_log=True)
NZBGET_PASSWORD = check_setting_str(CFG, 'NZBget', 'nzbget_password', 'tegbzn6789', censor_log=True)
NZBGET_CATEGORY = check_setting_str(CFG, 'NZBget', 'nzbget_category', 'tv')
NZBGET_CATEGORY_BACKLOG = check_setting_str(CFG, 'NZBget', 'nzbget_category_backlog', NZBGET_CATEGORY)
NZBGET_CATEGORY_ANIME = check_setting_str(CFG, 'NZBget', 'nzbget_category_anime', 'anime')
NZBGET_CATEGORY_ANIME_BACKLOG = check_setting_str(CFG, 'NZBget', 'nzbget_category_anime_backlog', NZBGET_CATEGORY_ANIME)
NZBGET_HOST = check_setting_str(CFG, 'NZBget', 'nzbget_host', '')
NZBGET_USE_HTTPS = bool(check_setting_int(CFG, 'NZBget', 'nzbget_use_https', 0))
NZBGET_PRIORITY = check_setting_int(CFG, 'NZBget', 'nzbget_priority', 100)
TORRENT_USERNAME = check_setting_str(CFG, 'TORRENT', 'torrent_username', '', censor_log=True)
TORRENT_PASSWORD = check_setting_str(CFG, 'TORRENT', 'torrent_password', '', censor_log=True)
TORRENT_HOST = check_setting_str(CFG, 'TORRENT', 'torrent_host', '')
TORRENT_PATH = check_setting_str(CFG, 'TORRENT', 'torrent_path', '')
TORRENT_SEED_TIME = check_setting_int(CFG, 'TORRENT', 'torrent_seed_time', 0)
TORRENT_PAUSED = bool(check_setting_int(CFG, 'TORRENT', 'torrent_paused', 0))
TORRENT_HIGH_BANDWIDTH = bool(check_setting_int(CFG, 'TORRENT', 'torrent_high_bandwidth', 0))
TORRENT_LABEL = check_setting_str(CFG, 'TORRENT', 'torrent_label', '')
TORRENT_LABEL_ANIME = check_setting_str(CFG, 'TORRENT', 'torrent_label_anime', '')
TORRENT_VERIFY_CERT = bool(check_setting_int(CFG, 'TORRENT', 'torrent_verify_cert', 0))
TORRENT_RPCURL = check_setting_str(CFG, 'TORRENT', 'torrent_rpcurl', 'transmission')
TORRENT_AUTH_TYPE = check_setting_str(CFG, 'TORRENT', 'torrent_auth_type', '')
USE_KODI = bool(check_setting_int(CFG, 'KODI', 'use_kodi', 0))
KODI_ALWAYS_ON = bool(check_setting_int(CFG, 'KODI', 'kodi_always_on', 1))
KODI_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'KODI', 'kodi_notify_onsnatch', 0))
KODI_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'KODI', 'kodi_notify_ondownload', 0))
KODI_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'KODI', 'kodi_notify_onsubtitledownload', 0))
KODI_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'KODI', 'kodi_update_library', 0))
KODI_UPDATE_FULL = bool(check_setting_int(CFG, 'KODI', 'kodi_update_full', 0))
KODI_UPDATE_ONLYFIRST = bool(check_setting_int(CFG, 'KODI', 'kodi_update_onlyfirst', 0))
KODI_HOST = check_setting_str(CFG, 'KODI', 'kodi_host', '')
KODI_USERNAME = check_setting_str(CFG, 'KODI', 'kodi_username', '', censor_log=True)
KODI_PASSWORD = check_setting_str(CFG, 'KODI', 'kodi_password', '', censor_log=True)
USE_PLEX = bool(check_setting_int(CFG, 'Plex', 'use_plex', 0))
PLEX_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Plex', 'plex_notify_onsnatch', 0))
PLEX_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Plex', 'plex_notify_ondownload', 0))
PLEX_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Plex', 'plex_notify_onsubtitledownload', 0))
PLEX_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'Plex', 'plex_update_library', 0))
PLEX_SERVER_HOST = check_setting_str(CFG, 'Plex', 'plex_server_host', '')
PLEX_SERVER_TOKEN = check_setting_str(CFG, 'Plex', 'plex_server_token', '')
PLEX_HOST = check_setting_str(CFG, 'Plex', 'plex_host', '')
PLEX_USERNAME = check_setting_str(CFG, 'Plex', 'plex_username', '', censor_log=True)
PLEX_PASSWORD = check_setting_str(CFG, 'Plex', 'plex_password', '', censor_log=True)
USE_PLEX_CLIENT = bool(check_setting_int(CFG, 'Plex', 'use_plex_client', 0))
PLEX_CLIENT_USERNAME = check_setting_str(CFG, 'Plex', 'plex_client_username', '', censor_log=True)
PLEX_CLIENT_PASSWORD = check_setting_str(CFG, 'Plex', 'plex_client_password', '', censor_log=True)
USE_EMBY = bool(check_setting_int(CFG, 'Emby', 'use_emby', 0))
EMBY_HOST = check_setting_str(CFG, 'Emby', 'emby_host', '')
EMBY_APIKEY = check_setting_str(CFG, 'Emby', 'emby_apikey', '')
USE_GROWL = bool(check_setting_int(CFG, 'Growl', 'use_growl', 0))
GROWL_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Growl', 'growl_notify_onsnatch', 0))
GROWL_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Growl', 'growl_notify_ondownload', 0))
GROWL_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Growl', 'growl_notify_onsubtitledownload', 0))
GROWL_HOST = check_setting_str(CFG, 'Growl', 'growl_host', '')
GROWL_PASSWORD = check_setting_str(CFG, 'Growl', 'growl_password', '', censor_log=True)
USE_FREEMOBILE = bool(check_setting_int(CFG, 'FreeMobile', 'use_freemobile', 0))
FREEMOBILE_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'FreeMobile', 'freemobile_notify_onsnatch', 0))
FREEMOBILE_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'FreeMobile', 'freemobile_notify_ondownload', 0))
FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'FreeMobile', 'freemobile_notify_onsubtitledownload', 0))
FREEMOBILE_ID = check_setting_str(CFG, 'FreeMobile', 'freemobile_id', '')
FREEMOBILE_APIKEY = check_setting_str(CFG, 'FreeMobile', 'freemobile_apikey', '')
USE_PROWL = bool(check_setting_int(CFG, 'Prowl', 'use_prowl', 0))
PROWL_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Prowl', 'prowl_notify_onsnatch', 0))
PROWL_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Prowl', 'prowl_notify_ondownload', 0))
PROWL_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Prowl', 'prowl_notify_onsubtitledownload', 0))
PROWL_API = check_setting_str(CFG, 'Prowl', 'prowl_api', '', censor_log=True)
PROWL_PRIORITY = check_setting_str(CFG, 'Prowl', 'prowl_priority', "0")
USE_TWITTER = bool(check_setting_int(CFG, 'Twitter', 'use_twitter', 0))
TWITTER_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Twitter', 'twitter_notify_onsnatch', 0))
TWITTER_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Twitter', 'twitter_notify_ondownload', 0))
TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD = bool(
check_setting_int(CFG, 'Twitter', 'twitter_notify_onsubtitledownload', 0))
TWITTER_USERNAME = check_setting_str(CFG, 'Twitter', 'twitter_username', '', censor_log=True)
TWITTER_PASSWORD = check_setting_str(CFG, 'Twitter', 'twitter_password', '', censor_log=True)
TWITTER_PREFIX = check_setting_str(CFG, 'Twitter', 'twitter_prefix', GIT_REPO)
TWITTER_DMTO = check_setting_str(CFG, 'Twitter', 'twitter_dmto', '')
TWITTER_USEDM = bool(check_setting_int(CFG, 'Twitter', 'twitter_usedm', 0))
USE_BOXCAR = bool(check_setting_int(CFG, 'Boxcar', 'use_boxcar', 0))
BOXCAR_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Boxcar', 'boxcar_notify_onsnatch', 0))
BOXCAR_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Boxcar', 'boxcar_notify_ondownload', 0))
BOXCAR_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Boxcar', 'boxcar_notify_onsubtitledownload', 0))
BOXCAR_USERNAME = check_setting_str(CFG, 'Boxcar', 'boxcar_username', '', censor_log=True)
USE_BOXCAR2 = bool(check_setting_int(CFG, 'Boxcar2', 'use_boxcar2', 0))
BOXCAR2_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_onsnatch', 0))
BOXCAR2_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_ondownload', 0))
BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_onsubtitledownload', 0))
BOXCAR2_ACCESSTOKEN = check_setting_str(CFG, 'Boxcar2', 'boxcar2_accesstoken', '', censor_log=True)
USE_PUSHOVER = bool(check_setting_int(CFG, 'Pushover', 'use_pushover', 0))
PUSHOVER_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Pushover', 'pushover_notify_onsnatch', 0))
PUSHOVER_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Pushover', 'pushover_notify_ondownload', 0))
PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Pushover', 'pushover_notify_onsubtitledownload', 0))
PUSHOVER_USERKEY = check_setting_str(CFG, 'Pushover', 'pushover_userkey', '', censor_log=True)
PUSHOVER_APIKEY = check_setting_str(CFG, 'Pushover', 'pushover_apikey', '', censor_log=True)
PUSHOVER_DEVICE = check_setting_str(CFG, 'Pushover', 'pushover_device', '')
PUSHOVER_SOUND = check_setting_str(CFG, 'Pushover', 'pushover_sound', 'pushover')
USE_LIBNOTIFY = bool(check_setting_int(CFG, 'Libnotify', 'use_libnotify', 0))
LIBNOTIFY_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Libnotify', 'libnotify_notify_onsnatch', 0))
LIBNOTIFY_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Libnotify', 'libnotify_notify_ondownload', 0))
LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Libnotify', 'libnotify_notify_onsubtitledownload', 0))
USE_NMJ = bool(check_setting_int(CFG, 'NMJ', 'use_nmj', 0))
NMJ_HOST = check_setting_str(CFG, 'NMJ', 'nmj_host', '')
NMJ_DATABASE = check_setting_str(CFG, 'NMJ', 'nmj_database', '')
NMJ_MOUNT = check_setting_str(CFG, 'NMJ', 'nmj_mount', '')
USE_NMJv2 = bool(check_setting_int(CFG, 'NMJv2', 'use_nmjv2', 0))
NMJv2_HOST = check_setting_str(CFG, 'NMJv2', 'nmjv2_host', '')
NMJv2_DATABASE = check_setting_str(CFG, 'NMJv2', 'nmjv2_database', '')
NMJv2_DBLOC = check_setting_str(CFG, 'NMJv2', 'nmjv2_dbloc', '')
USE_SYNOINDEX = bool(check_setting_int(CFG, 'Synology', 'use_synoindex', 0))
USE_SYNOLOGYNOTIFIER = bool(check_setting_int(CFG, 'SynologyNotifier', 'use_synologynotifier', 0))
SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH = bool(
check_setting_int(CFG, 'SynologyNotifier', 'synologynotifier_notify_onsnatch', 0))
SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD = bool(
check_setting_int(CFG, 'SynologyNotifier', 'synologynotifier_notify_ondownload', 0))
SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD = bool(
check_setting_int(CFG, 'SynologyNotifier', 'synologynotifier_notify_onsubtitledownload', 0))
USE_TRAKT = bool(check_setting_int(CFG, 'Trakt', 'use_trakt', 0))
TRAKT_USERNAME = check_setting_str(CFG, 'Trakt', 'trakt_username', '', censor_log=True)
TRAKT_ACCESS_TOKEN = check_setting_str(CFG, 'Trakt', 'trakt_access_token', '', censor_log=True)
TRAKT_REFRESH_TOKEN = check_setting_str(CFG, 'Trakt', 'trakt_refresh_token', '', censor_log=True)
TRAKT_REMOVE_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_remove_watchlist', 0))
TRAKT_REMOVE_SERIESLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_remove_serieslist', 0))
TRAKT_REMOVE_SHOW_FROM_SICKRAGE = bool(check_setting_int(CFG, 'Trakt', 'trakt_remove_show_from_sickrage', 0))
TRAKT_SYNC_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_sync_watchlist', 0))
TRAKT_METHOD_ADD = check_setting_int(CFG, 'Trakt', 'trakt_method_add', 0)
TRAKT_START_PAUSED = bool(check_setting_int(CFG, 'Trakt', 'trakt_start_paused', 0))
TRAKT_USE_RECOMMENDED = bool(check_setting_int(CFG, 'Trakt', 'trakt_use_recommended', 0))
TRAKT_SYNC = bool(check_setting_int(CFG, 'Trakt', 'trakt_sync', 0))
TRAKT_SYNC_REMOVE = bool(check_setting_int(CFG, 'Trakt', 'trakt_sync_remove', 0))
TRAKT_DEFAULT_INDEXER = check_setting_int(CFG, 'Trakt', 'trakt_default_indexer', 1)
TRAKT_TIMEOUT = check_setting_int(CFG, 'Trakt', 'trakt_timeout', 30)
TRAKT_BLACKLIST_NAME = check_setting_str(CFG, 'Trakt', 'trakt_blacklist_name', '')
USE_PYTIVO = bool(check_setting_int(CFG, 'pyTivo', 'use_pytivo', 0))
PYTIVO_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'pyTivo', 'pytivo_notify_onsnatch', 0))
PYTIVO_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'pyTivo', 'pytivo_notify_ondownload', 0))
PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'pyTivo', 'pytivo_notify_onsubtitledownload', 0))
PYTIVO_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'pyTivo', 'pyTivo_update_library', 0))
PYTIVO_HOST = check_setting_str(CFG, 'pyTivo', 'pytivo_host', '')
PYTIVO_SHARE_NAME = check_setting_str(CFG, 'pyTivo', 'pytivo_share_name', '')
PYTIVO_TIVO_NAME = check_setting_str(CFG, 'pyTivo', 'pytivo_tivo_name', '')
USE_NMA = bool(check_setting_int(CFG, 'NMA', 'use_nma', 0))
NMA_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'NMA', 'nma_notify_onsnatch', 0))
NMA_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'NMA', 'nma_notify_ondownload', 0))
NMA_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'NMA', 'nma_notify_onsubtitledownload', 0))
NMA_API = check_setting_str(CFG, 'NMA', 'nma_api', '', censor_log=True)
NMA_PRIORITY = check_setting_str(CFG, 'NMA', 'nma_priority', "0")
USE_PUSHALOT = bool(check_setting_int(CFG, 'Pushalot', 'use_pushalot', 0))
PUSHALOT_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Pushalot', 'pushalot_notify_onsnatch', 0))
PUSHALOT_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Pushalot', 'pushalot_notify_ondownload', 0))
PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD = bool(
check_setting_int(CFG, 'Pushalot', 'pushalot_notify_onsubtitledownload', 0))
PUSHALOT_AUTHORIZATIONTOKEN = check_setting_str(CFG, 'Pushalot', 'pushalot_authorizationtoken', '', censor_log=True)
USE_PUSHBULLET = bool(check_setting_int(CFG, 'Pushbullet', 'use_pushbullet', 0))
PUSHBULLET_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Pushbullet', 'pushbullet_notify_onsnatch', 0))
PUSHBULLET_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Pushbullet', 'pushbullet_notify_ondownload', 0))
PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD = bool(
check_setting_int(CFG, 'Pushbullet', 'pushbullet_notify_onsubtitledownload', 0))
PUSHBULLET_API = check_setting_str(CFG, 'Pushbullet', 'pushbullet_api', '', censor_log=True)
PUSHBULLET_DEVICE = check_setting_str(CFG, 'Pushbullet', 'pushbullet_device', '')
USE_EMAIL = bool(check_setting_int(CFG, 'Email', 'use_email', 0))
EMAIL_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Email', 'email_notify_onsnatch', 0))
EMAIL_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Email', 'email_notify_ondownload', 0))
EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Email', 'email_notify_onsubtitledownload', 0))
EMAIL_HOST = check_setting_str(CFG, 'Email', 'email_host', '')
EMAIL_PORT = check_setting_int(CFG, 'Email', 'email_port', 25)
EMAIL_TLS = bool(check_setting_int(CFG, 'Email', 'email_tls', 0))
EMAIL_USER = check_setting_str(CFG, 'Email', 'email_user', '', censor_log=True)
EMAIL_PASSWORD = check_setting_str(CFG, 'Email', 'email_password', '', censor_log=True)
EMAIL_FROM = check_setting_str(CFG, 'Email', 'email_from', '')
EMAIL_LIST = check_setting_str(CFG, 'Email', 'email_list', '')
USE_SUBTITLES = bool(check_setting_int(CFG, 'Subtitles', 'use_subtitles', 0))
SUBTITLES_LANGUAGES = check_setting_str(CFG, 'Subtitles', 'subtitles_languages', '').split(',')
if SUBTITLES_LANGUAGES[0] == '':
SUBTITLES_LANGUAGES = []
SUBTITLES_DIR = check_setting_str(CFG, 'Subtitles', 'subtitles_dir', '')
SUBTITLES_SERVICES_LIST = check_setting_str(CFG, 'Subtitles', 'SUBTITLES_SERVICES_LIST', '').split(',')
SUBTITLES_SERVICES_ENABLED = [int(x) for x in
check_setting_str(CFG, 'Subtitles', 'SUBTITLES_SERVICES_ENABLED', '').split('|')
if x]
SUBTITLES_DEFAULT = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_default', 0))
SUBTITLES_HISTORY = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_history', 0))
EMBEDDED_SUBTITLES_ALL = bool(check_setting_int(CFG, 'Subtitles', 'embedded_subtitles_all', 0))
SUBTITLES_HEARING_IMPAIRED = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_hearing_impaired', 0))
SUBTITLES_FINDER_FREQUENCY = check_setting_int(CFG, 'Subtitles', 'subtitles_finder_frequency', 1)
SUBTITLES_MULTI = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_multi', 1))
SUBTITLES_EXTRA_SCRIPTS = [x.strip() for x in check_setting_str(CFG, 'Subtitles', 'subtitles_extra_scripts', '').split('|') if x.strip()]
ADDIC7ED_USER = check_setting_str(CFG, 'Subtitles', 'addic7ed_username', '', censor_log=True)
ADDIC7ED_PASS = check_setting_str(CFG, 'Subtitles', 'addic7ed_password', '', censor_log=True)
LEGENDASTV_USER = check_setting_str(CFG, 'Subtitles', 'legendastv_username', '', censor_log=True)
LEGENDASTV_PASS = check_setting_str(CFG, 'Subtitles', 'legendastv_password', '', censor_log=True)
OPENSUBTITLES_USER = check_setting_str(CFG, 'Subtitles', 'opensubtitles_username', '', censor_log=True)
OPENSUBTITLES_PASS = check_setting_str(CFG, 'Subtitles', 'opensubtitles_password', '', censor_log=True)
USE_FAILED_DOWNLOADS = bool(check_setting_int(CFG, 'FailedDownloads', 'use_failed_downloads', 0))
DELETE_FAILED = bool(check_setting_int(CFG, 'FailedDownloads', 'delete_failed', 0))
GIT_PATH = check_setting_str(CFG, 'General', 'git_path', '')
IGNORE_WORDS = check_setting_str(CFG, 'General', 'ignore_words', IGNORE_WORDS)
REQUIRE_WORDS = check_setting_str(CFG, 'General', 'require_words', REQUIRE_WORDS)
IGNORED_SUBS_LIST = check_setting_str(CFG, 'General', 'ignored_subs_list', IGNORED_SUBS_LIST)
CALENDAR_UNPROTECTED = bool(check_setting_int(CFG, 'General', 'calendar_unprotected', 0))
CALENDAR_ICONS = bool(check_setting_int(CFG, 'General', 'calendar_icons', 0))
NO_RESTART = bool(check_setting_int(CFG, 'General', 'no_restart', 0))
EXTRA_SCRIPTS = [x.strip() for x in check_setting_str(CFG, 'General', 'extra_scripts', '').split('|') if
x.strip()]
USE_LISTVIEW = bool(check_setting_int(CFG, 'General', 'use_listview', 0))
ANIMESUPPORT = False
USE_ANIDB = bool(check_setting_int(CFG, 'ANIDB', 'use_anidb', 0))
ANIDB_USERNAME = check_setting_str(CFG, 'ANIDB', 'anidb_username', '', censor_log=True)
ANIDB_PASSWORD = check_setting_str(CFG, 'ANIDB', 'anidb_password', '', censor_log=True)
ANIDB_USE_MYLIST = bool(check_setting_int(CFG, 'ANIDB', 'anidb_use_mylist', 0))
ANIME_SPLIT_HOME = bool(check_setting_int(CFG, 'ANIME', 'anime_split_home', 0))
METADATA_KODI = check_setting_str(CFG, 'General', 'metadata_kodi', '0|0|0|0|0|0|0|0|0|0')
METADATA_KODI_12PLUS = check_setting_str(CFG, 'General', 'metadata_kodi_12plus', '0|0|0|0|0|0|0|0|0|0')
METADATA_MEDIABROWSER = check_setting_str(CFG, 'General', 'metadata_mediabrowser', '0|0|0|0|0|0|0|0|0|0')
METADATA_PS3 = check_setting_str(CFG, 'General', 'metadata_ps3', '0|0|0|0|0|0|0|0|0|0')
METADATA_WDTV = check_setting_str(CFG, 'General', 'metadata_wdtv', '0|0|0|0|0|0|0|0|0|0')
METADATA_TIVO = check_setting_str(CFG, 'General', 'metadata_tivo', '0|0|0|0|0|0|0|0|0|0')
METADATA_MEDE8ER = check_setting_str(CFG, 'General', 'metadata_mede8er', '0|0|0|0|0|0|0|0|0|0')
HOME_LAYOUT = check_setting_str(CFG, 'GUI', 'home_layout', 'poster')
HISTORY_LAYOUT = check_setting_str(CFG, 'GUI', 'history_layout', 'detailed')
HISTORY_LIMIT = check_setting_str(CFG, 'GUI', 'history_limit', '100')
DISPLAY_SHOW_SPECIALS = bool(check_setting_int(CFG, 'GUI', 'display_show_specials', 1))
COMING_EPS_LAYOUT = check_setting_str(CFG, 'GUI', 'coming_eps_layout', 'banner')
COMING_EPS_DISPLAY_PAUSED = bool(check_setting_int(CFG, 'GUI', 'coming_eps_display_paused', 0))
COMING_EPS_SORT = check_setting_str(CFG, 'GUI', 'coming_eps_sort', 'date')
COMING_EPS_MISSED_RANGE = check_setting_int(CFG, 'GUI', 'coming_eps_missed_range', 7)
FUZZY_DATING = bool(check_setting_int(CFG, 'GUI', 'fuzzy_dating', 0))
TRIM_ZERO = bool(check_setting_int(CFG, 'GUI', 'trim_zero', 0))
DATE_PRESET = check_setting_str(CFG, 'GUI', 'date_preset', '%x')
TIME_PRESET_W_SECONDS = check_setting_str(CFG, 'GUI', 'time_preset', '%I:%M:%S %p')
TIME_PRESET = TIME_PRESET_W_SECONDS.replace(u":%S", u"")
TIMEZONE_DISPLAY = check_setting_str(CFG, 'GUI', 'timezone_display', 'local')
POSTER_SORTBY = check_setting_str(CFG, 'GUI', 'poster_sortby', 'name')
POSTER_SORTDIR = check_setting_int(CFG, 'GUI', 'poster_sortdir', 1)
FILTER_ROW = bool(check_setting_int(CFG, 'GUI', 'filter_row', 1))
DISPLAY_ALL_SEASONS = bool(check_setting_int(CFG, 'General', 'display_all_seasons', 1))
# initialize NZB and TORRENT providers
providerList = providers.makeProviderList()
NEWZNAB_DATA = check_setting_str(CFG, 'Newznab', 'newznab_data', '')
newznabProviderList = providers.getNewznabProviderList(NEWZNAB_DATA)
TORRENTRSS_DATA = check_setting_str(CFG, 'TorrentRss', 'torrentrss_data', '')
torrentRssProviderList = providers.getTorrentRssProviderList(TORRENTRSS_DATA)
# dynamically load provider settings
for curTorrentProvider in [curProvider for curProvider in providers.sortedProviderList() if
curProvider.providerType == GenericProvider.TORRENT]:
curTorrentProvider.enabled = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID(), 0))
if hasattr(curTorrentProvider, 'api_key'):
curTorrentProvider.api_key = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_api_key', '', censor_log=True)
if hasattr(curTorrentProvider, 'hash'):
curTorrentProvider.hash = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_hash', '', censor_log=True)
if hasattr(curTorrentProvider, 'digest'):
curTorrentProvider.digest = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_digest', '', censor_log=True)
if hasattr(curTorrentProvider, 'username'):
curTorrentProvider.username = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_username', '', censor_log=True)
if hasattr(curTorrentProvider, 'password'):
curTorrentProvider.password = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_password', '', censor_log=True)
if hasattr(curTorrentProvider, 'passkey'):
curTorrentProvider.passkey = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_passkey', '', censor_log=True)
if hasattr(curTorrentProvider, 'pin'):
curTorrentProvider.pin = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_pin', '', censor_log=True)
if hasattr(curTorrentProvider, 'confirmed'):
curTorrentProvider.confirmed = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_confirmed', 1))
if hasattr(curTorrentProvider, 'ranked'):
curTorrentProvider.ranked = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_ranked', 1))
if hasattr(curTorrentProvider, 'engrelease'):
curTorrentProvider.engrelease = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_engrelease', 0))
if hasattr(curTorrentProvider, 'onlyspasearch'):
curTorrentProvider.onlyspasearch = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_onlyspasearch', 0))
if hasattr(curTorrentProvider, 'sorting'):
curTorrentProvider.sorting = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_sorting', 'seeders')
if hasattr(curTorrentProvider, 'options'):
curTorrentProvider.options = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_options', '')
if hasattr(curTorrentProvider, 'ratio'):
curTorrentProvider.ratio = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_ratio', '')
if hasattr(curTorrentProvider, 'minseed'):
curTorrentProvider.minseed = check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_minseed', 1)
if hasattr(curTorrentProvider, 'minleech'):
curTorrentProvider.minleech = check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_minleech', 0)
if hasattr(curTorrentProvider, 'freeleech'):
curTorrentProvider.freeleech = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_freeleech', 0))
if hasattr(curTorrentProvider, 'search_mode'):
curTorrentProvider.search_mode = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_search_mode',
'eponly')
if hasattr(curTorrentProvider, 'search_fallback'):
curTorrentProvider.search_fallback = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_search_fallback',
0))
if hasattr(curTorrentProvider, 'enable_daily'):
curTorrentProvider.enable_daily = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_enable_daily',
1))
if hasattr(curTorrentProvider, 'enable_backlog'):
curTorrentProvider.enable_backlog = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_enable_backlog',
curTorrentProvider.supportsBacklog))
if hasattr(curTorrentProvider, 'cat'):
curTorrentProvider.cat = check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_cat', 0)
if hasattr(curTorrentProvider, 'subtitle'):
curTorrentProvider.subtitle = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_subtitle', 0))
for curNzbProvider in [curProvider for curProvider in providers.sortedProviderList() if
curProvider.providerType == GenericProvider.NZB]:
curNzbProvider.enabled = bool(
check_setting_int(CFG, curNzbProvider.getID().upper(), curNzbProvider.getID(), 0))
if hasattr(curNzbProvider, 'api_key'):
curNzbProvider.api_key = check_setting_str(CFG, curNzbProvider.getID().upper(),
curNzbProvider.getID() + '_api_key', '', censor_log=True)
if hasattr(curNzbProvider, 'username'):
curNzbProvider.username = check_setting_str(CFG, curNzbProvider.getID().upper(),
curNzbProvider.getID() + '_username', '', censor_log=True)
if hasattr(curNzbProvider, 'search_mode'):
curNzbProvider.search_mode = check_setting_str(CFG, curNzbProvider.getID().upper(),
curNzbProvider.getID() + '_search_mode',
'eponly')
if hasattr(curNzbProvider, 'search_fallback'):
curNzbProvider.search_fallback = bool(check_setting_int(CFG, curNzbProvider.getID().upper(),
curNzbProvider.getID() + '_search_fallback',
0))
if hasattr(curNzbProvider, 'enable_daily'):
curNzbProvider.enable_daily = bool(check_setting_int(CFG, curNzbProvider.getID().upper(),
curNzbProvider.getID() + '_enable_daily',
1))
if hasattr(curNzbProvider, 'enable_backlog'):
curNzbProvider.enable_backlog = bool(check_setting_int(CFG, curNzbProvider.getID().upper(),
curNzbProvider.getID() + '_enable_backlog',
curNzbProvider.supportsBacklog))
if not os.path.isfile(CONFIG_FILE):
logger.log(u"Unable to find '" + CONFIG_FILE + "', all settings will be default!", logger.DEBUG)
save_config()
# initialize the main SB database
myDB = db.DBConnection()
db.upgradeDatabase(myDB, mainDB.InitialSchema)
# initialize the cache database
myDB = db.DBConnection('cache.db')
db.upgradeDatabase(myDB, cache_db.InitialSchema)
# initialize the failed downloads database
myDB = db.DBConnection('failed.db')
db.upgradeDatabase(myDB, failed_db.InitialSchema)
# fix up any db problems
myDB = db.DBConnection()
db.sanityCheckDatabase(myDB, mainDB.MainSanityCheck)
# migrate the config if it needs it
migrator = ConfigMigrator(CFG)
migrator.migrate_config()
# initialize metadata_providers
metadata_provider_dict = metadata.get_metadata_generator_dict()
for cur_metadata_tuple in [(METADATA_KODI, metadata.kodi),
(METADATA_KODI_12PLUS, metadata.kodi_12plus),
(METADATA_MEDIABROWSER, metadata.mediabrowser),
(METADATA_PS3, metadata.ps3),
(METADATA_WDTV, metadata.wdtv),
(METADATA_TIVO, metadata.tivo),
(METADATA_MEDE8ER, metadata.mede8er)]:
(cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
tmp_provider = cur_metadata_class.metadata_class()
tmp_provider.set_config(cur_metadata_config)
metadata_provider_dict[tmp_provider.name] = tmp_provider
# initialize schedulers
# updaters
versionCheckScheduler = scheduler.Scheduler(versionChecker.CheckVersion(),
cycleTime=datetime.timedelta(hours=UPDATE_FREQUENCY),
threadName="CHECKVERSION",
silent=False)
showQueueScheduler = scheduler.Scheduler(show_queue.ShowQueue(),
cycleTime=datetime.timedelta(seconds=3),
threadName="SHOWQUEUE")
showUpdateScheduler = scheduler.Scheduler(showUpdater.ShowUpdater(),
cycleTime=datetime.timedelta(hours=1),
threadName="SHOWUPDATER",
start_time=datetime.time(hour=SHOWUPDATE_HOUR))
# searchers
searchQueueScheduler = scheduler.Scheduler(search_queue.SearchQueue(),
cycleTime=datetime.timedelta(seconds=3),
threadName="SEARCHQUEUE")
# TODO: update_interval should take last daily/backlog times into account!
update_interval = datetime.timedelta(minutes=DAILYSEARCH_FREQUENCY)
dailySearchScheduler = scheduler.Scheduler(dailysearcher.DailySearcher(),
cycleTime=update_interval,
threadName="DAILYSEARCHER",
run_delay=update_interval)
update_interval = datetime.timedelta(minutes=BACKLOG_FREQUENCY)
backlogSearchScheduler = searchBacklog.BacklogSearchScheduler(searchBacklog.BacklogSearcher(),
cycleTime=update_interval,
threadName="BACKLOG",
run_delay=update_interval)
search_intervals = {'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}
if CHECK_PROPERS_INTERVAL in search_intervals:
update_interval = datetime.timedelta(minutes=search_intervals[CHECK_PROPERS_INTERVAL])
run_at = None
else:
update_interval = datetime.timedelta(hours=1)
run_at = datetime.time(hour=1) # 1 AM
properFinderScheduler = scheduler.Scheduler(properFinder.ProperFinder(),
cycleTime=update_interval,
threadName="FINDPROPERS",
start_time=run_at,
run_delay=update_interval)
# processors
autoPostProcesserScheduler = scheduler.Scheduler(autoPostProcesser.PostProcesser(),
cycleTime=datetime.timedelta(
minutes=AUTOPOSTPROCESSER_FREQUENCY),
threadName="POSTPROCESSER",
silent=not PROCESS_AUTOMATICALLY)
traktCheckerScheduler = scheduler.Scheduler(traktChecker.TraktChecker(),
cycleTime=datetime.timedelta(hours=1),
threadName="TRAKTCHECKER",
silent=not USE_TRAKT)
subtitlesFinderScheduler = scheduler.Scheduler(subtitles.SubtitlesFinder(),
cycleTime=datetime.timedelta(hours=SUBTITLES_FINDER_FREQUENCY),
threadName="FINDSUBTITLES",
silent=not USE_SUBTITLES)
showList = []
loadingShowList = {}
__INITIALIZED__ = True
return True
def start():
global started
with INIT_LOCK:
if __INITIALIZED__:
# start sysetm events queue
events.start()
# start the daily search scheduler
dailySearchScheduler.enable = True
dailySearchScheduler.start()
# start the backlog scheduler
backlogSearchScheduler.enable = True
backlogSearchScheduler.start()
# start the show updater
showUpdateScheduler.enable = True
showUpdateScheduler.start()
# start the version checker
versionCheckScheduler.enable = True
versionCheckScheduler.start()
# start the queue checker
showQueueScheduler.enable = True
showQueueScheduler.start()
# start the search queue checker
searchQueueScheduler.enable = True
searchQueueScheduler.start()
# start the proper finder
if DOWNLOAD_PROPERS:
properFinderScheduler.silent = False
properFinderScheduler.enable = True
else:
properFinderScheduler.enable = False
properFinderScheduler.silent = True
properFinderScheduler.start()
# start the post processor
if PROCESS_AUTOMATICALLY:
autoPostProcesserScheduler.silent = False
autoPostProcesserScheduler.enable = True
else:
autoPostProcesserScheduler.enable = False
autoPostProcesserScheduler.silent = True
autoPostProcesserScheduler.start()
# start the subtitles finder
if USE_SUBTITLES:
subtitlesFinderScheduler.silent = False
subtitlesFinderScheduler.enable = True
else:
subtitlesFinderScheduler.enable = False
subtitlesFinderScheduler.silent = True
subtitlesFinderScheduler.start()
# start the trakt checker
if USE_TRAKT:
traktCheckerScheduler.silent = False
traktCheckerScheduler.enable = True
else:
traktCheckerScheduler.enable = False
traktCheckerScheduler.silent = True
traktCheckerScheduler.start()
started = True
def halt():
global __INITIALIZED__, started
with INIT_LOCK:
if __INITIALIZED__:
logger.log(u"Aborting all threads")
events.stop.set()
logger.log(u"Waiting for the EVENTS thread to exit")
try:
events.join(10)
except Exception:
pass
dailySearchScheduler.stop.set()
logger.log(u"Waiting for the DAILYSEARCH thread to exit")
try:
dailySearchScheduler.join(10)
except Exception:
pass
backlogSearchScheduler.stop.set()
logger.log(u"Waiting for the BACKLOG thread to exit")
try:
backlogSearchScheduler.join(10)
except Exception:
pass
showUpdateScheduler.stop.set()
logger.log(u"Waiting for the SHOWUPDATER thread to exit")
try:
showUpdateScheduler.join(10)
except Exception:
pass
versionCheckScheduler.stop.set()
logger.log(u"Waiting for the VERSIONCHECKER thread to exit")
try:
versionCheckScheduler.join(10)
except Exception:
pass
showQueueScheduler.stop.set()
logger.log(u"Waiting for the SHOWQUEUE thread to exit")
try:
showQueueScheduler.join(10)
except Exception:
pass
searchQueueScheduler.stop.set()
logger.log(u"Waiting for the SEARCHQUEUE thread to exit")
try:
searchQueueScheduler.join(10)
except Exception:
pass
autoPostProcesserScheduler.stop.set()
logger.log(u"Waiting for the POSTPROCESSER thread to exit")
try:
autoPostProcesserScheduler.join(10)
except Exception:
pass
traktCheckerScheduler.stop.set()
logger.log(u"Waiting for the TRAKTCHECKER thread to exit")
try:
traktCheckerScheduler.join(10)
except Exception:
pass
properFinderScheduler.stop.set()
logger.log(u"Waiting for the PROPERFINDER thread to exit")
try:
properFinderScheduler.join(10)
except Exception:
pass
subtitlesFinderScheduler.stop.set()
logger.log(u"Waiting for the SUBTITLESFINDER thread to exit")
try:
subtitlesFinderScheduler.join(10)
except Exception:
pass
if ADBA_CONNECTION:
ADBA_CONNECTION.logout()
logger.log(u"Waiting for the ANIDB CONNECTION thread to exit")
try:
ADBA_CONNECTION.join(10)
except Exception:
pass
__INITIALIZED__ = False
started = False
def sig_handler(signum=None, frame=None):
if not isinstance(signum, type(None)):
logger.log(u"Signal %i caught, saving and exiting..." % int(signum))
Shutdown.stop(PID)
def saveAll():
# write all shows
logger.log(u"Saving all shows to the database")
for show in showList:
show.saveToDB()
# save config
logger.log(u"Saving config file to disk")
save_config()
def restart(soft=True):
if soft:
halt()
saveAll()
logger.log(u"Re-initializing all data")
initialize()
else:
events.put(events.SystemEvent.RESTART)
def save_config():
new_config = ConfigObj()
new_config.filename = CONFIG_FILE
# For passwords you must include the word `password` in the item_name and add `helpers.encrypt(ITEM_NAME, ENCRYPTION_VERSION)` in save_config()
new_config['General'] = {}
new_config['General']['git_autoissues'] = int(GIT_AUTOISSUES)
new_config['General']['git_username'] = GIT_USERNAME
new_config['General']['git_password'] = helpers.encrypt(GIT_PASSWORD, ENCRYPTION_VERSION)
new_config['General']['git_reset'] = int(GIT_RESET)
new_config['General']['branch'] = BRANCH
new_config['General']['git_remote'] = GIT_REMOTE
new_config['General']['git_remote_url'] = GIT_REMOTE_URL
new_config['General']['cur_commit_hash'] = CUR_COMMIT_HASH
new_config['General']['cur_commit_branch'] = CUR_COMMIT_BRANCH
new_config['General']['git_newver'] = int(GIT_NEWVER)
new_config['General']['config_version'] = CONFIG_VERSION
new_config['General']['encryption_version'] = int(ENCRYPTION_VERSION)
new_config['General']['encryption_secret'] = ENCRYPTION_SECRET
new_config['General']['log_dir'] = ACTUAL_LOG_DIR if ACTUAL_LOG_DIR else 'Logs'
new_config['General']['log_nr'] = int(LOG_NR)
new_config['General']['log_size'] = int(LOG_SIZE)
new_config['General']['socket_timeout'] = SOCKET_TIMEOUT
new_config['General']['web_port'] = WEB_PORT
new_config['General']['web_host'] = WEB_HOST
new_config['General']['web_ipv6'] = int(WEB_IPV6)
new_config['General']['web_log'] = int(WEB_LOG)
new_config['General']['web_root'] = WEB_ROOT
new_config['General']['web_username'] = WEB_USERNAME
new_config['General']['web_password'] = helpers.encrypt(WEB_PASSWORD, ENCRYPTION_VERSION)
new_config['General']['web_cookie_secret'] = WEB_COOKIE_SECRET
new_config['General']['web_use_gzip'] = int(WEB_USE_GZIP)
new_config['General']['ssl_verify'] = int(SSL_VERIFY)
new_config['General']['download_url'] = DOWNLOAD_URL
new_config['General']['localhost_ip'] = LOCALHOST_IP
new_config['General']['cpu_preset'] = CPU_PRESET
new_config['General']['anon_redirect'] = ANON_REDIRECT
new_config['General']['api_key'] = API_KEY
new_config['General']['debug'] = int(DEBUG)
new_config['General']['default_page'] = DEFAULT_PAGE
new_config['General']['enable_https'] = int(ENABLE_HTTPS)
new_config['General']['https_cert'] = HTTPS_CERT
new_config['General']['https_key'] = HTTPS_KEY
new_config['General']['handle_reverse_proxy'] = int(HANDLE_REVERSE_PROXY)
new_config['General']['use_nzbs'] = int(USE_NZBS)
new_config['General']['use_torrents'] = int(USE_TORRENTS)
new_config['General']['nzb_method'] = NZB_METHOD
new_config['General']['torrent_method'] = TORRENT_METHOD
new_config['General']['usenet_retention'] = int(USENET_RETENTION)
new_config['General']['autopostprocesser_frequency'] = int(AUTOPOSTPROCESSER_FREQUENCY)
new_config['General']['dailysearch_frequency'] = int(DAILYSEARCH_FREQUENCY)
new_config['General']['backlog_frequency'] = int(BACKLOG_FREQUENCY)
new_config['General']['update_frequency'] = int(UPDATE_FREQUENCY)
new_config['General']['showupdate_hour'] = int(SHOWUPDATE_HOUR)
new_config['General']['download_propers'] = int(DOWNLOAD_PROPERS)
new_config['General']['randomize_providers'] = int(RANDOMIZE_PROVIDERS)
new_config['General']['check_propers_interval'] = CHECK_PROPERS_INTERVAL
new_config['General']['allow_high_priority'] = int(ALLOW_HIGH_PRIORITY)
new_config['General']['skip_removed_files'] = int(SKIP_REMOVED_FILES)
new_config['General']['quality_default'] = int(QUALITY_DEFAULT)
new_config['General']['status_default'] = int(STATUS_DEFAULT)
new_config['General']['status_default_after'] = int(STATUS_DEFAULT_AFTER)
new_config['General']['flatten_folders_default'] = int(FLATTEN_FOLDERS_DEFAULT)
new_config['General']['indexer_default'] = int(INDEXER_DEFAULT)
new_config['General']['indexer_timeout'] = int(INDEXER_TIMEOUT)
new_config['General']['anime_default'] = int(ANIME_DEFAULT)
new_config['General']['scene_default'] = int(SCENE_DEFAULT)
new_config['General']['archive_default'] = int(ARCHIVE_DEFAULT)
new_config['General']['provider_order'] = ' '.join(PROVIDER_ORDER)
new_config['General']['version_notify'] = int(VERSION_NOTIFY)
new_config['General']['auto_update'] = int(AUTO_UPDATE)
new_config['General']['notify_on_update'] = int(NOTIFY_ON_UPDATE)
new_config['General']['naming_strip_year'] = int(NAMING_STRIP_YEAR)
new_config['General']['naming_pattern'] = NAMING_PATTERN
new_config['General']['naming_custom_abd'] = int(NAMING_CUSTOM_ABD)
new_config['General']['naming_abd_pattern'] = NAMING_ABD_PATTERN
new_config['General']['naming_custom_sports'] = int(NAMING_CUSTOM_SPORTS)
new_config['General']['naming_sports_pattern'] = NAMING_SPORTS_PATTERN
new_config['General']['naming_custom_anime'] = int(NAMING_CUSTOM_ANIME)
new_config['General']['naming_anime_pattern'] = NAMING_ANIME_PATTERN
new_config['General']['naming_multi_ep'] = int(NAMING_MULTI_EP)
new_config['General']['naming_anime_multi_ep'] = int(NAMING_ANIME_MULTI_EP)
new_config['General']['naming_anime'] = int(NAMING_ANIME)
new_config['General']['indexerDefaultLang'] = INDEXER_DEFAULT_LANGUAGE
new_config['General']['ep_default_deleted_status'] = int(EP_DEFAULT_DELETED_STATUS)
new_config['General']['launch_browser'] = int(LAUNCH_BROWSER)
new_config['General']['trash_remove_show'] = int(TRASH_REMOVE_SHOW)
new_config['General']['trash_rotate_logs'] = int(TRASH_ROTATE_LOGS)
new_config['General']['sort_article'] = int(SORT_ARTICLE)
new_config['General']['proxy_setting'] = PROXY_SETTING
new_config['General']['proxy_indexers'] = int(PROXY_INDEXERS)
new_config['General']['use_listview'] = int(USE_LISTVIEW)
new_config['General']['metadata_kodi'] = METADATA_KODI
new_config['General']['metadata_kodi_12plus'] = METADATA_KODI_12PLUS
new_config['General']['metadata_mediabrowser'] = METADATA_MEDIABROWSER
new_config['General']['metadata_ps3'] = METADATA_PS3
new_config['General']['metadata_wdtv'] = METADATA_WDTV
new_config['General']['metadata_tivo'] = METADATA_TIVO
new_config['General']['metadata_mede8er'] = METADATA_MEDE8ER
new_config['General']['backlog_days'] = int(BACKLOG_DAYS)
new_config['General']['cache_dir'] = ACTUAL_CACHE_DIR if ACTUAL_CACHE_DIR else 'cache'
new_config['General']['root_dirs'] = ROOT_DIRS if ROOT_DIRS else ''
new_config['General']['tv_download_dir'] = TV_DOWNLOAD_DIR
new_config['General']['keep_processed_dir'] = int(KEEP_PROCESSED_DIR)
new_config['General']['process_method'] = PROCESS_METHOD
new_config['General']['del_rar_contents'] = int(DELRARCONTENTS)
new_config['General']['move_associated_files'] = int(MOVE_ASSOCIATED_FILES)
new_config['General']['sync_files'] = SYNC_FILES
new_config['General']['postpone_if_sync_files'] = int(POSTPONE_IF_SYNC_FILES)
new_config['General']['nfo_rename'] = int(NFO_RENAME)
new_config['General']['process_automatically'] = int(PROCESS_AUTOMATICALLY)
new_config['General']['no_delete'] = int(NO_DELETE)
new_config['General']['unpack'] = int(UNPACK)
new_config['General']['rename_episodes'] = int(RENAME_EPISODES)
new_config['General']['airdate_episodes'] = int(AIRDATE_EPISODES)
new_config['General']['file_timestamp_timezone'] = FILE_TIMESTAMP_TIMEZONE
new_config['General']['create_missing_show_dirs'] = int(CREATE_MISSING_SHOW_DIRS)
new_config['General']['add_shows_wo_dir'] = int(ADD_SHOWS_WO_DIR)
new_config['General']['extra_scripts'] = '|'.join(EXTRA_SCRIPTS)
new_config['General']['git_path'] = GIT_PATH
new_config['General']['ignore_words'] = IGNORE_WORDS
new_config['General']['require_words'] = REQUIRE_WORDS
new_config['General']['ignored_subs_list'] = IGNORED_SUBS_LIST
new_config['General']['calendar_unprotected'] = int(CALENDAR_UNPROTECTED)
new_config['General']['calendar_icons'] = int(CALENDAR_ICONS)
new_config['General']['no_restart'] = int(NO_RESTART)
new_config['General']['developer'] = int(DEVELOPER)
new_config['General']['display_all_seasons'] = int(DISPLAY_ALL_SEASONS)
new_config['General']['news_last_read'] = NEWS_LAST_READ
new_config['Blackhole'] = {}
new_config['Blackhole']['nzb_dir'] = NZB_DIR
new_config['Blackhole']['torrent_dir'] = TORRENT_DIR
# dynamically save provider settings
for curTorrentProvider in [curProvider for curProvider in providers.sortedProviderList() if
curProvider.providerType == GenericProvider.TORRENT]:
new_config[curTorrentProvider.getID().upper()] = {}
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID()] = int(curTorrentProvider.enabled)
if hasattr(curTorrentProvider, 'digest'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_digest'] = curTorrentProvider.digest
if hasattr(curTorrentProvider, 'hash'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_hash'] = curTorrentProvider.hash
if hasattr(curTorrentProvider, 'api_key'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_api_key'] = curTorrentProvider.api_key
if hasattr(curTorrentProvider, 'username'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_username'] = curTorrentProvider.username
if hasattr(curTorrentProvider, 'password'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_password'] = helpers.encrypt(
curTorrentProvider.password, ENCRYPTION_VERSION)
if hasattr(curTorrentProvider, 'passkey'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_passkey'] = curTorrentProvider.passkey
if hasattr(curTorrentProvider, 'pin'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_pin'] = curTorrentProvider.pin
if hasattr(curTorrentProvider, 'confirmed'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_confirmed'] = int(
curTorrentProvider.confirmed)
if hasattr(curTorrentProvider, 'ranked'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_ranked'] = int(
curTorrentProvider.ranked)
if hasattr(curTorrentProvider, 'engrelease'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_engrelease'] = int(
curTorrentProvider.engrelease)
if hasattr(curTorrentProvider, 'onlyspasearch'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_onlyspasearch'] = int(
curTorrentProvider.onlyspasearch)
if hasattr(curTorrentProvider, 'sorting'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_sorting'] = curTorrentProvider.sorting
if hasattr(curTorrentProvider, 'ratio'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_ratio'] = curTorrentProvider.ratio
if hasattr(curTorrentProvider, 'minseed'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_minseed'] = int(
curTorrentProvider.minseed)
if hasattr(curTorrentProvider, 'minleech'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_minleech'] = int(
curTorrentProvider.minleech)
if hasattr(curTorrentProvider, 'options'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_options'] = curTorrentProvider.options
if hasattr(curTorrentProvider, 'freeleech'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_freeleech'] = int(
curTorrentProvider.freeleech)
if hasattr(curTorrentProvider, 'search_mode'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_search_mode'] = curTorrentProvider.search_mode
if hasattr(curTorrentProvider, 'search_fallback'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_search_fallback'] = int(
curTorrentProvider.search_fallback)
if hasattr(curTorrentProvider, 'enable_daily'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_enable_daily'] = int(
curTorrentProvider.enable_daily)
if hasattr(curTorrentProvider, 'enable_backlog'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_enable_backlog'] = int(
curTorrentProvider.enable_backlog)
if hasattr(curTorrentProvider, 'cat'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_cat'] = int(
curTorrentProvider.cat)
if hasattr(curTorrentProvider, 'subtitle'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_subtitle'] = int(
curTorrentProvider.subtitle)
for curNzbProvider in [curProvider for curProvider in providers.sortedProviderList() if
curProvider.providerType == GenericProvider.NZB]:
new_config[curNzbProvider.getID().upper()] = {}
new_config[curNzbProvider.getID().upper()][curNzbProvider.getID()] = int(curNzbProvider.enabled)
if hasattr(curNzbProvider, 'api_key'):
new_config[curNzbProvider.getID().upper()][
curNzbProvider.getID() + '_api_key'] = curNzbProvider.api_key
if hasattr(curNzbProvider, 'username'):
new_config[curNzbProvider.getID().upper()][
curNzbProvider.getID() + '_username'] = curNzbProvider.username
if hasattr(curNzbProvider, 'search_mode'):
new_config[curNzbProvider.getID().upper()][
curNzbProvider.getID() + '_search_mode'] = curNzbProvider.search_mode
if hasattr(curNzbProvider, 'search_fallback'):
new_config[curNzbProvider.getID().upper()][curNzbProvider.getID() + '_search_fallback'] = int(
curNzbProvider.search_fallback)
if hasattr(curNzbProvider, 'enable_daily'):
new_config[curNzbProvider.getID().upper()][curNzbProvider.getID() + '_enable_daily'] = int(
curNzbProvider.enable_daily)
if hasattr(curNzbProvider, 'enable_backlog'):
new_config[curNzbProvider.getID().upper()][curNzbProvider.getID() + '_enable_backlog'] = int(
curNzbProvider.enable_backlog)
new_config['NZBs'] = {}
new_config['NZBs']['nzbs'] = int(NZBS)
new_config['NZBs']['nzbs_uid'] = NZBS_UID
new_config['NZBs']['nzbs_hash'] = NZBS_HASH
new_config['Newzbin'] = {}
new_config['Newzbin']['newzbin'] = int(NEWZBIN)
new_config['Newzbin']['newzbin_username'] = NEWZBIN_USERNAME
new_config['Newzbin']['newzbin_password'] = helpers.encrypt(NEWZBIN_PASSWORD, ENCRYPTION_VERSION)
new_config['SABnzbd'] = {}
new_config['SABnzbd']['sab_username'] = SAB_USERNAME
new_config['SABnzbd']['sab_password'] = helpers.encrypt(SAB_PASSWORD, ENCRYPTION_VERSION)
new_config['SABnzbd']['sab_apikey'] = SAB_APIKEY
new_config['SABnzbd']['sab_category'] = SAB_CATEGORY
new_config['SABnzbd']['sab_category_backlog'] = SAB_CATEGORY_BACKLOG
new_config['SABnzbd']['sab_category_anime'] = SAB_CATEGORY_ANIME
new_config['SABnzbd']['sab_category_anime_backlog'] = SAB_CATEGORY_ANIME_BACKLOG
new_config['SABnzbd']['sab_host'] = SAB_HOST
new_config['SABnzbd']['sab_forced'] = int(SAB_FORCED)
new_config['NZBget'] = {}
new_config['NZBget']['nzbget_username'] = NZBGET_USERNAME
new_config['NZBget']['nzbget_password'] = helpers.encrypt(NZBGET_PASSWORD, ENCRYPTION_VERSION)
new_config['NZBget']['nzbget_category'] = NZBGET_CATEGORY
new_config['NZBget']['nzbget_category_backlog'] = NZBGET_CATEGORY_BACKLOG
new_config['NZBget']['nzbget_category_anime'] = NZBGET_CATEGORY_ANIME
new_config['NZBget']['nzbget_category_anime_backlog'] = NZBGET_CATEGORY_ANIME_BACKLOG
new_config['NZBget']['nzbget_host'] = NZBGET_HOST
new_config['NZBget']['nzbget_use_https'] = int(NZBGET_USE_HTTPS)
new_config['NZBget']['nzbget_priority'] = NZBGET_PRIORITY
new_config['TORRENT'] = {}
new_config['TORRENT']['torrent_username'] = TORRENT_USERNAME
new_config['TORRENT']['torrent_password'] = helpers.encrypt(TORRENT_PASSWORD, ENCRYPTION_VERSION)
new_config['TORRENT']['torrent_host'] = TORRENT_HOST
new_config['TORRENT']['torrent_path'] = TORRENT_PATH
new_config['TORRENT']['torrent_seed_time'] = int(TORRENT_SEED_TIME)
new_config['TORRENT']['torrent_paused'] = int(TORRENT_PAUSED)
new_config['TORRENT']['torrent_high_bandwidth'] = int(TORRENT_HIGH_BANDWIDTH)
new_config['TORRENT']['torrent_label'] = TORRENT_LABEL
new_config['TORRENT']['torrent_label_anime'] = TORRENT_LABEL_ANIME
new_config['TORRENT']['torrent_verify_cert'] = int(TORRENT_VERIFY_CERT)
new_config['TORRENT']['torrent_rpcurl'] = TORRENT_RPCURL
new_config['TORRENT']['torrent_auth_type'] = TORRENT_AUTH_TYPE
new_config['KODI'] = {}
new_config['KODI']['use_kodi'] = int(USE_KODI)
new_config['KODI']['kodi_always_on'] = int(KODI_ALWAYS_ON)
new_config['KODI']['kodi_notify_onsnatch'] = int(KODI_NOTIFY_ONSNATCH)
new_config['KODI']['kodi_notify_ondownload'] = int(KODI_NOTIFY_ONDOWNLOAD)
new_config['KODI']['kodi_notify_onsubtitledownload'] = int(KODI_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['KODI']['kodi_update_library'] = int(KODI_UPDATE_LIBRARY)
new_config['KODI']['kodi_update_full'] = int(KODI_UPDATE_FULL)
new_config['KODI']['kodi_update_onlyfirst'] = int(KODI_UPDATE_ONLYFIRST)
new_config['KODI']['kodi_host'] = KODI_HOST
new_config['KODI']['kodi_username'] = KODI_USERNAME
new_config['KODI']['kodi_password'] = helpers.encrypt(KODI_PASSWORD, ENCRYPTION_VERSION)
new_config['Plex'] = {}
new_config['Plex']['use_plex'] = int(USE_PLEX)
new_config['Plex']['plex_notify_onsnatch'] = int(PLEX_NOTIFY_ONSNATCH)
new_config['Plex']['plex_notify_ondownload'] = int(PLEX_NOTIFY_ONDOWNLOAD)
new_config['Plex']['plex_notify_onsubtitledownload'] = int(PLEX_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Plex']['plex_update_library'] = int(PLEX_UPDATE_LIBRARY)
new_config['Plex']['plex_server_host'] = PLEX_SERVER_HOST
new_config['Plex']['plex_server_token'] = PLEX_SERVER_TOKEN
new_config['Plex']['plex_host'] = PLEX_HOST
new_config['Plex']['plex_username'] = PLEX_USERNAME
new_config['Plex']['plex_password'] = helpers.encrypt(PLEX_PASSWORD, ENCRYPTION_VERSION)
new_config['Emby'] = {}
new_config['Emby']['use_emby'] = int(USE_EMBY)
new_config['Emby']['emby_host'] = EMBY_HOST
new_config['Emby']['emby_apikey'] = EMBY_APIKEY
new_config['Growl'] = {}
new_config['Growl']['use_growl'] = int(USE_GROWL)
new_config['Growl']['growl_notify_onsnatch'] = int(GROWL_NOTIFY_ONSNATCH)
new_config['Growl']['growl_notify_ondownload'] = int(GROWL_NOTIFY_ONDOWNLOAD)
new_config['Growl']['growl_notify_onsubtitledownload'] = int(GROWL_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Growl']['growl_host'] = GROWL_HOST
new_config['Growl']['growl_password'] = helpers.encrypt(GROWL_PASSWORD, ENCRYPTION_VERSION)
new_config['FreeMobile'] = {}
new_config['FreeMobile']['use_freemobile'] = int(USE_FREEMOBILE)
new_config['FreeMobile']['freemobile_notify_onsnatch'] = int(FREEMOBILE_NOTIFY_ONSNATCH)
new_config['FreeMobile']['freemobile_notify_ondownload'] = int(FREEMOBILE_NOTIFY_ONDOWNLOAD)
new_config['FreeMobile']['freemobile_notify_onsubtitledownload'] = int(FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['FreeMobile']['freemobile_id'] = FREEMOBILE_ID
new_config['FreeMobile']['freemobile_apikey'] = FREEMOBILE_APIKEY
new_config['Prowl'] = {}
new_config['Prowl']['use_prowl'] = int(USE_PROWL)
new_config['Prowl']['prowl_notify_onsnatch'] = int(PROWL_NOTIFY_ONSNATCH)
new_config['Prowl']['prowl_notify_ondownload'] = int(PROWL_NOTIFY_ONDOWNLOAD)
new_config['Prowl']['prowl_notify_onsubtitledownload'] = int(PROWL_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Prowl']['prowl_api'] = PROWL_API
new_config['Prowl']['prowl_priority'] = PROWL_PRIORITY
new_config['Twitter'] = {}
new_config['Twitter']['use_twitter'] = int(USE_TWITTER)
new_config['Twitter']['twitter_notify_onsnatch'] = int(TWITTER_NOTIFY_ONSNATCH)
new_config['Twitter']['twitter_notify_ondownload'] = int(TWITTER_NOTIFY_ONDOWNLOAD)
new_config['Twitter']['twitter_notify_onsubtitledownload'] = int(TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Twitter']['twitter_username'] = TWITTER_USERNAME
new_config['Twitter']['twitter_password'] = helpers.encrypt(TWITTER_PASSWORD, ENCRYPTION_VERSION)
new_config['Twitter']['twitter_prefix'] = TWITTER_PREFIX
new_config['Twitter']['twitter_dmto'] = TWITTER_DMTO
new_config['Twitter']['twitter_usedm'] = int(TWITTER_USEDM)
new_config['Boxcar'] = {}
new_config['Boxcar']['use_boxcar'] = int(USE_BOXCAR)
new_config['Boxcar']['boxcar_notify_onsnatch'] = int(BOXCAR_NOTIFY_ONSNATCH)
new_config['Boxcar']['boxcar_notify_ondownload'] = int(BOXCAR_NOTIFY_ONDOWNLOAD)
new_config['Boxcar']['boxcar_notify_onsubtitledownload'] = int(BOXCAR_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Boxcar']['boxcar_username'] = BOXCAR_USERNAME
new_config['Boxcar2'] = {}
new_config['Boxcar2']['use_boxcar2'] = int(USE_BOXCAR2)
new_config['Boxcar2']['boxcar2_notify_onsnatch'] = int(BOXCAR2_NOTIFY_ONSNATCH)
new_config['Boxcar2']['boxcar2_notify_ondownload'] = int(BOXCAR2_NOTIFY_ONDOWNLOAD)
new_config['Boxcar2']['boxcar2_notify_onsubtitledownload'] = int(BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Boxcar2']['boxcar2_accesstoken'] = BOXCAR2_ACCESSTOKEN
new_config['Pushover'] = {}
new_config['Pushover']['use_pushover'] = int(USE_PUSHOVER)
new_config['Pushover']['pushover_notify_onsnatch'] = int(PUSHOVER_NOTIFY_ONSNATCH)
new_config['Pushover']['pushover_notify_ondownload'] = int(PUSHOVER_NOTIFY_ONDOWNLOAD)
new_config['Pushover']['pushover_notify_onsubtitledownload'] = int(PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Pushover']['pushover_userkey'] = PUSHOVER_USERKEY
new_config['Pushover']['pushover_apikey'] = PUSHOVER_APIKEY
new_config['Pushover']['pushover_device'] = PUSHOVER_DEVICE
new_config['Pushover']['pushover_sound'] = PUSHOVER_SOUND
new_config['Libnotify'] = {}
new_config['Libnotify']['use_libnotify'] = int(USE_LIBNOTIFY)
new_config['Libnotify']['libnotify_notify_onsnatch'] = int(LIBNOTIFY_NOTIFY_ONSNATCH)
new_config['Libnotify']['libnotify_notify_ondownload'] = int(LIBNOTIFY_NOTIFY_ONDOWNLOAD)
new_config['Libnotify']['libnotify_notify_onsubtitledownload'] = int(LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['NMJ'] = {}
new_config['NMJ']['use_nmj'] = int(USE_NMJ)
new_config['NMJ']['nmj_host'] = NMJ_HOST
new_config['NMJ']['nmj_database'] = NMJ_DATABASE
new_config['NMJ']['nmj_mount'] = NMJ_MOUNT
new_config['NMJv2'] = {}
new_config['NMJv2']['use_nmjv2'] = int(USE_NMJv2)
new_config['NMJv2']['nmjv2_host'] = NMJv2_HOST
new_config['NMJv2']['nmjv2_database'] = NMJv2_DATABASE
new_config['NMJv2']['nmjv2_dbloc'] = NMJv2_DBLOC
new_config['Synology'] = {}
new_config['Synology']['use_synoindex'] = int(USE_SYNOINDEX)
new_config['SynologyNotifier'] = {}
new_config['SynologyNotifier']['use_synologynotifier'] = int(USE_SYNOLOGYNOTIFIER)
new_config['SynologyNotifier']['synologynotifier_notify_onsnatch'] = int(SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH)
new_config['SynologyNotifier']['synologynotifier_notify_ondownload'] = int(SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD)
new_config['SynologyNotifier']['synologynotifier_notify_onsubtitledownload'] = int(
SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Trakt'] = {}
new_config['Trakt']['use_trakt'] = int(USE_TRAKT)
new_config['Trakt']['trakt_username'] = TRAKT_USERNAME
new_config['Trakt']['trakt_access_token'] = TRAKT_ACCESS_TOKEN
new_config['Trakt']['trakt_refresh_token'] = TRAKT_REFRESH_TOKEN
new_config['Trakt']['trakt_remove_watchlist'] = int(TRAKT_REMOVE_WATCHLIST)
new_config['Trakt']['trakt_remove_serieslist'] = int(TRAKT_REMOVE_SERIESLIST)
new_config['Trakt']['trakt_remove_show_from_sickrage'] = int(TRAKT_REMOVE_SHOW_FROM_SICKRAGE)
new_config['Trakt']['trakt_sync_watchlist'] = int(TRAKT_SYNC_WATCHLIST)
new_config['Trakt']['trakt_method_add'] = int(TRAKT_METHOD_ADD)
new_config['Trakt']['trakt_start_paused'] = int(TRAKT_START_PAUSED)
new_config['Trakt']['trakt_use_recommended'] = int(TRAKT_USE_RECOMMENDED)
new_config['Trakt']['trakt_sync'] = int(TRAKT_SYNC)
new_config['Trakt']['trakt_sync_remove'] = int(TRAKT_SYNC_REMOVE)
new_config['Trakt']['trakt_default_indexer'] = int(TRAKT_DEFAULT_INDEXER)
new_config['Trakt']['trakt_timeout'] = int(TRAKT_TIMEOUT)
new_config['Trakt']['trakt_blacklist_name'] = TRAKT_BLACKLIST_NAME
new_config['pyTivo'] = {}
new_config['pyTivo']['use_pytivo'] = int(USE_PYTIVO)
new_config['pyTivo']['pytivo_notify_onsnatch'] = int(PYTIVO_NOTIFY_ONSNATCH)
new_config['pyTivo']['pytivo_notify_ondownload'] = int(PYTIVO_NOTIFY_ONDOWNLOAD)
new_config['pyTivo']['pytivo_notify_onsubtitledownload'] = int(PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['pyTivo']['pyTivo_update_library'] = int(PYTIVO_UPDATE_LIBRARY)
new_config['pyTivo']['pytivo_host'] = PYTIVO_HOST
new_config['pyTivo']['pytivo_share_name'] = PYTIVO_SHARE_NAME
new_config['pyTivo']['pytivo_tivo_name'] = PYTIVO_TIVO_NAME
new_config['NMA'] = {}
new_config['NMA']['use_nma'] = int(USE_NMA)
new_config['NMA']['nma_notify_onsnatch'] = int(NMA_NOTIFY_ONSNATCH)
new_config['NMA']['nma_notify_ondownload'] = int(NMA_NOTIFY_ONDOWNLOAD)
new_config['NMA']['nma_notify_onsubtitledownload'] = int(NMA_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['NMA']['nma_api'] = NMA_API
new_config['NMA']['nma_priority'] = NMA_PRIORITY
new_config['Pushalot'] = {}
new_config['Pushalot']['use_pushalot'] = int(USE_PUSHALOT)
new_config['Pushalot']['pushalot_notify_onsnatch'] = int(PUSHALOT_NOTIFY_ONSNATCH)
new_config['Pushalot']['pushalot_notify_ondownload'] = int(PUSHALOT_NOTIFY_ONDOWNLOAD)
new_config['Pushalot']['pushalot_notify_onsubtitledownload'] = int(PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Pushalot']['pushalot_authorizationtoken'] = PUSHALOT_AUTHORIZATIONTOKEN
new_config['Pushbullet'] = {}
new_config['Pushbullet']['use_pushbullet'] = int(USE_PUSHBULLET)
new_config['Pushbullet']['pushbullet_notify_onsnatch'] = int(PUSHBULLET_NOTIFY_ONSNATCH)
new_config['Pushbullet']['pushbullet_notify_ondownload'] = int(PUSHBULLET_NOTIFY_ONDOWNLOAD)
new_config['Pushbullet']['pushbullet_notify_onsubtitledownload'] = int(PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Pushbullet']['pushbullet_api'] = PUSHBULLET_API
new_config['Pushbullet']['pushbullet_device'] = PUSHBULLET_DEVICE
new_config['Email'] = {}
new_config['Email']['use_email'] = int(USE_EMAIL)
new_config['Email']['email_notify_onsnatch'] = int(EMAIL_NOTIFY_ONSNATCH)
new_config['Email']['email_notify_ondownload'] = int(EMAIL_NOTIFY_ONDOWNLOAD)
new_config['Email']['email_notify_onsubtitledownload'] = int(EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Email']['email_host'] = EMAIL_HOST
new_config['Email']['email_port'] = int(EMAIL_PORT)
new_config['Email']['email_tls'] = int(EMAIL_TLS)
new_config['Email']['email_user'] = EMAIL_USER
new_config['Email']['email_password'] = helpers.encrypt(EMAIL_PASSWORD, ENCRYPTION_VERSION)
new_config['Email']['email_from'] = EMAIL_FROM
new_config['Email']['email_list'] = EMAIL_LIST
new_config['Newznab'] = {}
new_config['Newznab']['newznab_data'] = NEWZNAB_DATA
new_config['TorrentRss'] = {}
new_config['TorrentRss']['torrentrss_data'] = '!!!'.join([x.configStr() for x in torrentRssProviderList])
new_config['GUI'] = {}
new_config['GUI']['gui_name'] = GUI_NAME
new_config['GUI']['theme_name'] = THEME_NAME
new_config['GUI']['home_layout'] = HOME_LAYOUT
new_config['GUI']['history_layout'] = HISTORY_LAYOUT
new_config['GUI']['history_limit'] = HISTORY_LIMIT
new_config['GUI']['display_show_specials'] = int(DISPLAY_SHOW_SPECIALS)
new_config['GUI']['coming_eps_layout'] = COMING_EPS_LAYOUT
new_config['GUI']['coming_eps_display_paused'] = int(COMING_EPS_DISPLAY_PAUSED)
new_config['GUI']['coming_eps_sort'] = COMING_EPS_SORT
new_config['GUI']['coming_eps_missed_range'] = int(COMING_EPS_MISSED_RANGE)
new_config['GUI']['fuzzy_dating'] = int(FUZZY_DATING)
new_config['GUI']['trim_zero'] = int(TRIM_ZERO)
new_config['GUI']['date_preset'] = DATE_PRESET
new_config['GUI']['time_preset'] = TIME_PRESET_W_SECONDS
new_config['GUI']['timezone_display'] = TIMEZONE_DISPLAY
new_config['GUI']['poster_sortby'] = POSTER_SORTBY
new_config['GUI']['poster_sortdir'] = POSTER_SORTDIR
new_config['GUI']['filter_row'] = int(FILTER_ROW)
new_config['Subtitles'] = {}
new_config['Subtitles']['use_subtitles'] = int(USE_SUBTITLES)
new_config['Subtitles']['subtitles_languages'] = ','.join(SUBTITLES_LANGUAGES)
new_config['Subtitles']['SUBTITLES_SERVICES_LIST'] = ','.join(SUBTITLES_SERVICES_LIST)
new_config['Subtitles']['SUBTITLES_SERVICES_ENABLED'] = '|'.join([str(x) for x in SUBTITLES_SERVICES_ENABLED])
new_config['Subtitles']['subtitles_dir'] = SUBTITLES_DIR
new_config['Subtitles']['subtitles_default'] = int(SUBTITLES_DEFAULT)
new_config['Subtitles']['subtitles_history'] = int(SUBTITLES_HISTORY)
new_config['Subtitles']['embedded_subtitles_all'] = int(EMBEDDED_SUBTITLES_ALL)
new_config['Subtitles']['subtitles_hearing_impaired'] = int(SUBTITLES_HEARING_IMPAIRED)
new_config['Subtitles']['subtitles_finder_frequency'] = int(SUBTITLES_FINDER_FREQUENCY)
new_config['Subtitles']['subtitles_multi'] = int(SUBTITLES_MULTI)
new_config['Subtitles']['subtitles_extra_scripts'] = '|'.join(SUBTITLES_EXTRA_SCRIPTS)
new_config['Subtitles']['addic7ed_username'] = ADDIC7ED_USER
new_config['Subtitles']['addic7ed_password'] = helpers.encrypt(ADDIC7ED_PASS, ENCRYPTION_VERSION)
new_config['Subtitles']['legendastv_username'] = LEGENDASTV_USER
new_config['Subtitles']['legendastv_password'] = helpers.encrypt(LEGENDASTV_PASS, ENCRYPTION_VERSION)
new_config['Subtitles']['opensubtitles_username'] = OPENSUBTITLES_USER
new_config['Subtitles']['opensubtitles_password'] = helpers.encrypt(OPENSUBTITLES_PASS, ENCRYPTION_VERSION)
new_config['FailedDownloads'] = {}
new_config['FailedDownloads']['use_failed_downloads'] = int(USE_FAILED_DOWNLOADS)
new_config['FailedDownloads']['delete_failed'] = int(DELETE_FAILED)
new_config['ANIDB'] = {}
new_config['ANIDB']['use_anidb'] = int(USE_ANIDB)
new_config['ANIDB']['anidb_username'] = ANIDB_USERNAME
new_config['ANIDB']['anidb_password'] = helpers.encrypt(ANIDB_PASSWORD, ENCRYPTION_VERSION)
new_config['ANIDB']['anidb_use_mylist'] = int(ANIDB_USE_MYLIST)
new_config['ANIME'] = {}
new_config['ANIME']['anime_split_home'] = int(ANIME_SPLIT_HOME)
new_config.write()
def launchBrowser(protocol='http', startPort=None, web_root='/'):
if not startPort:
startPort = WEB_PORT
browserURL = '%s://localhost:%d%s/home/' % (protocol, startPort, web_root)
try:
webbrowser.open(browserURL, 2, 1)
except Exception:
try:
webbrowser.open(browserURL, 1, 1)
except Exception:
logger.log(u"Unable to launch a browser", logger.ERROR)
def getEpList(epIDs, showid=None):
if epIDs == None or len(epIDs) == 0:
return []
query = "SELECT * FROM tv_episodes WHERE indexerid in (%s)" % (",".join(['?'] * len(epIDs)),)
params = epIDs
if showid != None:
query += " AND showid = ?"
params.append(showid)
myDB = db.DBConnection()
sqlResults = myDB.select(query, params)
epList = []
for curEp in sqlResults:
curShowObj = helpers.findCertainShow(showList, int(curEp["showid"]))
curEpObj = curShowObj.getEpisode(int(curEp["season"]), int(curEp["episode"]))
epList.append(curEpObj)
return epList
|
gpl-3.0
|
aam-at/tensorflow
|
tensorflow/python/ops/ragged/ragged_from_sparse_op_test.py
|
14
|
4592
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RaggedTensor.from_sparse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorToSparseOpTest(test_util.TensorFlowTestCase):
def testDocStringExample(self):
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0]],
values=[1, 2, 3, 4, 5],
dense_shape=[4, 3])
rt = RaggedTensor.from_sparse(st)
self.assertAllEqual(rt, [[1, 2, 3], [4], [], [5]])
def testEmpty(self):
st = sparse_tensor.SparseTensor(
indices=array_ops.zeros([0, 2], dtype=dtypes.int64),
values=[],
dense_shape=[4, 3])
rt = RaggedTensor.from_sparse(st)
self.assertAllEqual(rt, [[], [], [], []])
def testBadSparseTensorRank(self):
st1 = sparse_tensor.SparseTensor(indices=[[0]], values=[0], dense_shape=[3])
self.assertRaisesRegex(ValueError, r'rank\(st_input\) must be 2',
RaggedTensor.from_sparse, st1)
st2 = sparse_tensor.SparseTensor(
indices=[[0, 0, 0]], values=[0], dense_shape=[3, 3, 3])
self.assertRaisesRegex(ValueError, r'rank\(st_input\) must be 2',
RaggedTensor.from_sparse, st2)
if not context.executing_eagerly():
st3 = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=[0],
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertRaisesRegex(ValueError, r'rank\(st_input\) must be 2',
RaggedTensor.from_sparse, st3)
def testGoodPartialSparseTensorRank(self):
if not context.executing_eagerly():
st1 = sparse_tensor.SparseTensor(
indices=[[0, 0]],
values=[0],
dense_shape=array_ops.placeholder(dtypes.int64))
st2 = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=[0],
dense_shape=[4, 3])
# Shouldn't throw ValueError
RaggedTensor.from_sparse(st1)
RaggedTensor.from_sparse(st2)
def testNonRaggedSparseTensor(self):
# "index_suffix" means the value of the innermost dimension of the index
# (i.e., indices[i][-1]).
# See comments in _assert_sparse_indices_are_ragged_right() for more
# details/background.
# index_suffix of first index is not zero.
st1 = sparse_tensor.SparseTensor(
indices=[[0, 1], [0, 2], [2, 0]], values=[1, 2, 3], dense_shape=[3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError,
r'.*SparseTensor is not right-ragged'):
self.evaluate(RaggedTensor.from_sparse(st1))
# index_suffix of an index that starts a new row is not zero.
st2 = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [2, 1]], values=[1, 2, 3], dense_shape=[3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError,
r'.*SparseTensor is not right-ragged'):
self.evaluate(RaggedTensor.from_sparse(st2))
# index_suffix of an index that continues a row skips a cell.
st3 = sparse_tensor.SparseTensor(
indices=[[0, 1], [0, 1], [0, 3]], values=[1, 2, 3], dense_shape=[3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError,
r'.*SparseTensor is not right-ragged'):
self.evaluate(RaggedTensor.from_sparse(st3))
if __name__ == '__main__':
googletest.main()
|
apache-2.0
|
sliceofcode/dogbot
|
dog/ext/gatekeeper/core.py
|
2
|
1390
|
__all__ = ["GatekeeperException", "Report", "Bounce", "Ban", "create_embed"]
import datetime
import discord
from lifesaver.utils import human_delta
class GatekeeperException(RuntimeError):
"""An exception thrown during Gatekeeper processes."""
class CheckFailure(GatekeeperException):
"""An exception thrown due to a check failing to pass."""
#: The name of the check that failed to pass.
check_name = None
#: The check function that failed to pass.
check = None
class Report(GatekeeperException):
"""A Gatekeeper exception that immediately halts all processing and sends
the specified text to the broadcasting channel.
"""
class Bounce(CheckFailure):
"""A Gatekeeper exception that will prevent a user from joining a guild when raised."""
class Ban(CheckFailure):
"""A Gatekeeper exception that will ban a user from the guild when raised."""
def create_embed(
member: discord.Member, *, color: discord.Color, title: str, reason: str
) -> discord.Embed:
"""Create a Gatekeeper bounce or ban embed."""
embed = discord.Embed(color=color, title=title, description=reason)
embed.timestamp = datetime.datetime.utcnow()
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(
name="Account Creation",
value=f"{human_delta(member.created_at)} ago\n{member.created_at}",
)
return embed
|
mit
|
dontnod/weblate
|
weblate/accounts/tests/test_avatars.py
|
1
|
3438
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2019 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
Tests for user handling.
"""
from io import BytesIO
import httpretty
from django.urls import reverse
from PIL import Image
from weblate.accounts import avatar
from weblate.auth.models import User
from weblate.trans.tests.test_views import FixtureTestCase
TEST_URL = (
'https://www.gravatar.com/avatar/'
'55502f40dc8b7c769880b10874abc9d0?d=identicon&s=32'
)
class AvatarTest(FixtureTestCase):
def setUp(self):
super(AvatarTest, self).setUp()
self.user.email = '[email protected]'
self.user.save()
def test_avatar_for_email(self):
url = avatar.avatar_for_email(
self.user.email,
size=32,
)
self.assertEqual(TEST_URL, url)
@httpretty.activate
def test_avatar(self):
image = Image.new('RGB', (32, 32))
storage = BytesIO()
image.save(storage, 'PNG')
imagedata = storage.getvalue()
httpretty.register_uri(
httpretty.GET,
TEST_URL,
body=imagedata,
)
# Real user
response = self.client.get(
reverse(
'user_avatar',
kwargs={'user': self.user.username, 'size': 32}
)
)
self.assert_png(response)
self.assertEqual(response.content, imagedata)
# Test caching
response = self.client.get(
reverse(
'user_avatar',
kwargs={'user': self.user.username, 'size': 32}
)
)
self.assert_png(response)
self.assertEqual(response.content, imagedata)
@httpretty.activate
def test_avatar_error(self):
httpretty.register_uri(
httpretty.GET,
TEST_URL,
status=503,
)
# Choose different username to avoid using cache
self.user.username = 'test2'
self.user.save()
response = self.client.get(
reverse(
'user_avatar',
kwargs={'user': self.user.username, 'size': 32}
)
)
self.assert_png(response)
def test_anonymous_avatar(self):
anonymous = User.objects.get(username='anonymous')
# Anonymous user
response = self.client.get(
reverse(
'user_avatar',
kwargs={'user': anonymous.username, 'size': 32}
)
)
self.assertRedirects(
response, '/static/weblate-32.png',
fetch_redirect_response=False
)
def test_fallback_avatar(self):
self.assert_png_data(
avatar.get_fallback_avatar(32)
)
|
gpl-3.0
|
wilebeast/FireFox-OS
|
B2G/gecko/media/webrtc/trunk/tools/gyp/test/mac/gyptest-archs.py
|
96
|
1171
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests things related to ARCHS.
"""
import TestGyp
import subprocess
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
def CheckFileType(file, expected):
proc = subprocess.Popen(['file', '-b', file], stdout=subprocess.PIPE)
o = proc.communicate()[0].strip()
assert not proc.returncode
if o != expected:
print 'File: Expected %s, got %s' % (expected, o)
test.fail_test()
test.run_gyp('test-no-archs.gyp', chdir='archs')
test.build('test-no-archs.gyp', test.ALL, chdir='archs')
result_file = test.built_file_path('Test', chdir='archs')
test.must_exist(result_file)
CheckFileType(result_file, 'Mach-O executable i386')
test.run_gyp('test-archs-x86_64.gyp', chdir='archs')
test.build('test-archs-x86_64.gyp', test.ALL, chdir='archs')
result_file = test.built_file_path('Test64', chdir='archs')
test.must_exist(result_file)
CheckFileType(result_file, 'Mach-O 64-bit executable x86_64')
|
apache-2.0
|
mcus/SickRage
|
lib/requests/packages/urllib3/fields.py
|
1007
|
5833
|
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
|
gpl-3.0
|
wangxiangyu/horizon
|
openstack_dashboard/test/api_tests/swift_tests.py
|
18
|
8954
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from mox import IsA # noqa
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class SwiftApiTests(test.APITestCase):
def test_swift_get_containers(self):
containers = self.containers.list()
cont_data = [c._apidict for c in containers]
swift_api = self.stub_swiftclient()
swift_api.get_account(limit=1001,
marker=None,
full_listing=True).AndReturn([{}, cont_data])
self.mox.ReplayAll()
(conts, more) = api.swift.swift_get_containers(self.request)
self.assertEqual(len(containers), len(conts))
self.assertFalse(more)
def test_swift_get_container_with_data(self):
container = self.containers.first()
objects = self.objects.list()
swift_api = self.stub_swiftclient()
swift_api.get_object(container.name, "") \
.AndReturn((container, objects))
self.mox.ReplayAll()
cont = api.swift.swift_get_container(self.request, container.name)
self.assertEqual(container.name, cont.name)
self.assertEqual(len(objects), len(cont.data))
def test_swift_get_container_without_data(self):
container = self.containers.first()
swift_api = self.stub_swiftclient()
swift_api.head_container(container.name).AndReturn(container)
self.mox.ReplayAll()
cont = api.swift.swift_get_container(self.request,
container.name,
with_data=False)
self.assertEqual(cont.name, container.name)
self.assertIsNone(cont.data)
def test_swift_create_duplicate_container(self):
metadata = {'is_public': False}
container = self.containers.first()
headers = api.swift._metadata_to_header(metadata=(metadata))
swift_api = self.stub_swiftclient()
# Check for existence, then create
exc = self.exceptions.swift
swift_api.head_container(container.name).AndRaise(exc)
swift_api.put_container(container.name, headers=headers) \
.AndReturn(container)
self.mox.ReplayAll()
# Verification handled by mox, no assertions needed.
api.swift.swift_create_container(self.request,
container.name,
metadata=(metadata))
def test_swift_create_container(self):
metadata = {'is_public': True}
container = self.containers.first()
swift_api = self.stub_swiftclient()
swift_api.head_container(container.name).AndReturn(container)
self.mox.ReplayAll()
# Verification handled by mox, no assertions needed.
with self.assertRaises(exceptions.AlreadyExists):
api.swift.swift_create_container(self.request,
container.name,
metadata=(metadata))
def test_swift_update_container(self):
metadata = {'is_public': True}
container = self.containers.first()
swift_api = self.stub_swiftclient()
headers = api.swift._metadata_to_header(metadata=(metadata))
swift_api.post_container(container.name, headers=headers)\
.AndReturn(container)
self.mox.ReplayAll()
# Verification handled by mox, no assertions needed.
api.swift.swift_update_container(self.request,
container.name,
metadata=(metadata))
def test_swift_get_objects(self):
container = self.containers.first()
objects = self.objects.list()
swift_api = self.stub_swiftclient()
swift_api.get_container(container.name,
limit=1001,
marker=None,
prefix=None,
delimiter='/',
full_listing=True).AndReturn([{}, objects])
self.mox.ReplayAll()
(objs, more) = api.swift.swift_get_objects(self.request,
container.name)
self.assertEqual(len(objects), len(objs))
self.assertFalse(more)
def test_swift_get_object_with_data_non_chunked(self):
container = self.containers.first()
object = self.objects.first()
swift_api = self.stub_swiftclient()
swift_api.get_object(
container.name, object.name, resp_chunk_size=None
).AndReturn([object, object.data])
self.mox.ReplayAll()
obj = api.swift.swift_get_object(self.request, container.name,
object.name, resp_chunk_size=None)
self.assertEqual(object.name, obj.name)
def test_swift_get_object_with_data_chunked(self):
container = self.containers.first()
object = self.objects.first()
swift_api = self.stub_swiftclient()
swift_api.get_object(
container.name, object.name, resp_chunk_size=api.swift.CHUNK_SIZE
).AndReturn([object, object.data])
self.mox.ReplayAll()
obj = api.swift.swift_get_object(
self.request, container.name, object.name)
self.assertEqual(object.name, obj.name)
def test_swift_get_object_without_data(self):
container = self.containers.first()
object = self.objects.first()
swift_api = self.stub_swiftclient()
swift_api.head_object(container.name, object.name) \
.AndReturn(object)
self.mox.ReplayAll()
obj = api.swift.swift_get_object(self.request,
container.name,
object.name,
with_data=False)
self.assertEqual(object.name, obj.name)
self.assertIsNone(obj.data)
def test_swift_upload_object(self):
container = self.containers.first()
obj = self.objects.first()
fake_name = 'fake_object.jpg'
class FakeFile(object):
def __init__(self):
self.name = fake_name
self.data = obj.data
self.size = len(obj.data)
headers = {'X-Object-Meta-Orig-Filename': fake_name}
swift_api = self.stub_swiftclient()
swift_api.put_object(container.name,
obj.name,
IsA(FakeFile),
headers=headers)
self.mox.ReplayAll()
api.swift.swift_upload_object(self.request,
container.name,
obj.name,
FakeFile())
def test_swift_upload_object_without_file(self):
container = self.containers.first()
obj = self.objects.first()
swift_api = self.stub_swiftclient()
swift_api.put_object(container.name,
obj.name,
None,
headers={})
self.mox.ReplayAll()
response = api.swift.swift_upload_object(self.request,
container.name,
obj.name,
None)
self.assertEqual(0, response['bytes'])
def test_swift_object_exists(self):
container = self.containers.first()
obj = self.objects.first()
swift_api = self.stub_swiftclient()
swift_api.head_object(container.name, obj.name).AndReturn(container)
exc = self.exceptions.swift
swift_api.head_object(container.name, obj.name).AndRaise(exc)
self.mox.ReplayAll()
args = self.request, container.name, obj.name
self.assertTrue(api.swift.swift_object_exists(*args))
# Again, for a "non-existent" object
self.assertFalse(api.swift.swift_object_exists(*args))
|
apache-2.0
|
alexlo03/ansible
|
lib/ansible/modules/network/aci/aci_switch_leaf_selector.py
|
7
|
10169
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_switch_leaf_selector
short_description: Bind leaf selectors to switch policy leaf profiles (infra:LeafS, infra:NodeBlk, infra:RsAccNodePGrep)
description:
- Bind leaf selectors (with node block range and policy group) to switch policy leaf profiles on Cisco ACI fabrics.
notes:
- This module is to be used with M(aci_switch_policy_leaf_profile)
One first creates a leaf profile (infra:NodeP) and then creates an associated selector (infra:LeafS),
- More information about the internal APIC classes B(infra:LeafS), B(infra:NodeBlk) and B(infra:RsAccNodePGrp) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Bruno Calogero (@brunocalogero)
version_added: '2.5'
options:
description:
description:
- The description to assign to the C(leaf).
leaf_profile:
description:
- Name of the Leaf Profile to which we add a Selector.
aliases: [ leaf_profile_name ]
leaf:
description:
- Name of Leaf Selector.
aliases: [ name, leaf_name, leaf_profile_leaf_name, leaf_selector_name ]
leaf_node_blk:
description:
- Name of Node Block range to be added to Leaf Selector of given Leaf Profile.
aliases: [ leaf_node_blk_name, node_blk_name ]
leaf_node_blk_description:
description:
- The description to assign to the C(leaf_node_blk)
from:
description:
- Start of Node Block range.
type: int
aliases: [ node_blk_range_from, from_range, range_from ]
to:
description:
- Start of Node Block range.
type: int
aliases: [ node_blk_range_to, to_range, range_to ]
policy_group:
description:
- Name of the Policy Group to be added to Leaf Selector of given Leaf Profile.
aliases: [ name, policy_group_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: adding a switch policy leaf profile selector associated Node Block range (w/ policy group)
aci_switch_leaf_selector:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
leaf: leaf_selector_name
leaf_node_blk: node_blk_name
from: 1011
to: 1011
policy_group: somepolicygroupname
state: present
delegate_to: localhost
- name: adding a switch policy leaf profile selector associated Node Block range (w/o policy group)
aci_switch_leaf_selector:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
leaf: leaf_selector_name
leaf_node_blk: node_blk_name
from: 1011
to: 1011
state: present
delegate_to: localhost
- name: Removing a switch policy leaf profile selector
aci_switch_leaf_selector:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
leaf: leaf_selector_name
state: absent
delegate_to: localhost
- name: Querying a switch policy leaf profile selector
aci_switch_leaf_selector:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
leaf: leaf_selector_name
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update({
'description': dict(type='str'),
'leaf_profile': dict(type='str', aliases=['leaf_profile_name']), # Not required for querying all objects
'leaf': dict(type='str', aliases=['name', 'leaf_name', 'leaf_profile_leaf_name', 'leaf_selector_name']), # Not required for querying all objects
'leaf_node_blk': dict(type='str', aliases=['leaf_node_blk_name', 'node_blk_name']),
'leaf_node_blk_description': dict(type='str'),
# NOTE: Keyword 'from' is a reserved word in python, so we need it as a string
'from': dict(type='int', aliases=['node_blk_range_from', 'from_range', 'range_from']),
'to': dict(type='int', aliases=['node_blk_range_to', 'to_range', 'range_to']),
'policy_group': dict(type='str', aliases=['policy_group_name']),
'state': dict(type='str', default='present', choices=['absent', 'present', 'query']),
})
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['leaf_profile', 'leaf']],
['state', 'present', ['leaf_profile', 'leaf', 'leaf_node_blk', 'from', 'to']]
]
)
description = module.params['description']
leaf_profile = module.params['leaf_profile']
leaf = module.params['leaf']
leaf_node_blk = module.params['leaf_node_blk']
leaf_node_blk_description = module.params['leaf_node_blk_description']
from_ = module.params['from']
to_ = module.params['to']
policy_group = module.params['policy_group']
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraNodeP',
aci_rn='infra/nprof-{0}'.format(leaf_profile),
module_object=leaf_profile,
target_filter={'name': leaf_profile},
),
subclass_1=dict(
aci_class='infraLeafS',
# NOTE: normal rn: leaves-{name}-typ-{type}, hence here hardcoded to range for purposes of module
aci_rn='leaves-{0}-typ-range'.format(leaf),
module_object=leaf,
target_filter={'name': leaf},
),
# NOTE: infraNodeBlk is not made into a subclass because there is a 1-1 mapping between node block and leaf selector name
child_classes=['infraNodeBlk', 'infraRsAccNodePGrp'],
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraLeafS',
class_config=dict(
descr=description,
name=leaf,
),
child_configs=[
dict(
infraNodeBlk=dict(
attributes=dict(
descr=leaf_node_blk_description,
name=leaf_node_blk,
from_=from_,
to_=to_,
),
),
),
dict(
infraRsAccNodePGrp=dict(
attributes=dict(
tDn='uni/infra/funcprof/accnodepgrp-{0}'.format(policy_group),
),
),
),
],
)
aci.get_diff(aci_class='infraLeafS')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
gpl-3.0
|
richard-willowit/odoo
|
odoo/__init__.py
|
2
|
2935
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" OpenERP core library."""
#----------------------------------------------------------
# odoo must be a namespace package for odoo.addons to become one too
# https://packaging.python.org/guides/packaging-namespace-packages/
#----------------------------------------------------------
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
#----------------------------------------------------------
# Running mode flags (gevent, prefork)
#----------------------------------------------------------
# Is the server running with gevent.
import sys
evented = False
if len(sys.argv) > 1 and sys.argv[1] == 'gevent':
sys.argv.remove('gevent')
import gevent.monkey
gevent.monkey.patch_all()
import psycogreen.gevent
psycogreen.gevent.patch_psycopg()
evented = True
# Is the server running in prefork mode (e.g. behind Gunicorn).
# If this is True, the processes have to communicate some events,
# e.g. database update or cache invalidation. Each process has also
# its own copy of the data structure and we don't need to care about
# locks between threads.
multi_process = False
#----------------------------------------------------------
# libc UTC hack
#----------------------------------------------------------
# Make sure the OpenERP server runs in UTC.
import os
os.environ['TZ'] = 'UTC' # Set the timezone
import time
if hasattr(time, 'tzset'):
time.tzset()
#----------------------------------------------------------
# Shortcuts
#----------------------------------------------------------
# The hard-coded super-user id (a.k.a. administrator, or root user).
SUPERUSER_ID = 1
def registry(database_name=None):
"""
Return the model registry for the given database, or the database mentioned
on the current thread. If the registry does not exist yet, it is created on
the fly.
"""
if database_name is None:
import threading
database_name = threading.currentThread().dbname
return modules.registry.Registry(database_name)
#----------------------------------------------------------
# Imports
#----------------------------------------------------------
from . import addons
from . import conf
from . import loglevels
from . import modules
from . import netsvc
from . import osv
from . import release
from . import service
from . import sql_db
from . import tools
#----------------------------------------------------------
# Model classes, fields, api decorators, and translations
#----------------------------------------------------------
from . import models
from . import fields
from . import api
from odoo.tools.translate import _
#----------------------------------------------------------
# Other imports, which may require stuff from above
#----------------------------------------------------------
from . import cli
from . import http
|
gpl-3.0
|
samyoyo/weevely3
|
testsuite/test_net_proxy.py
|
14
|
3484
|
from testsuite.base_test import BaseTest
from testfixtures import log_capture
from testsuite import config
from core.sessions import SessionURL
from core import modules
from core import messages
import subprocess
import logging
import tempfile
import os
import re
class Proxy(BaseTest):
def setUp(self):
session = SessionURL(self.url, self.password, volatile = True)
modules.load_modules(session)
modules.loaded['net_proxy'].run_argv([ ])
fname = 'check1.php'
self.file = os.path.join(config.script_folder, fname)
self.check_call(
config.cmd_env_content_s_to_s % ('<?php print_r(\$_SERVER);print_r(\$_REQUEST); ?>', self.file),
shell=True)
self.url = os.path.sep.join([
config.script_folder_url.rstrip('/'),
fname ]
)
def run_argv(self, arguments):
arguments += [ '--proxy', 'localhost:8080' ]
result = subprocess.check_output(
config.cmd_env_curl_s % ('" "'.join(arguments)),
shell=True).strip()
return result if result != 'None' else None
def tearDown(self):
self.check_call(
config.cmd_env_remove_s % (self.file),
shell=True)
def _clean_result(self, result):
return result if not result else re.sub('[\n]|[ ]{2,}',' ', result)
@log_capture()
def test_all(self, log_captured):
# Simple GET
self.assertIn(
'[REQUEST_METHOD] => GET',
self._clean_result(self.run_argv([ self.url ]))
)
# PUT request
self.assertIn(
'[REQUEST_METHOD] => PUT',
self._clean_result(self.run_argv([ self.url, '-X', 'PUT' ]))
)
# Add header
self.assertIn(
'[HTTP_X_ARBITRARY_HEADER] => bogus',
self._clean_result(self.run_argv([ '-H', 'X-Arbitrary-Header: bogus', self.url ]))
)
# Add cookie
self.assertIn(
'[HTTP_COOKIE] => C1=bogus;C2=bogus2',
self._clean_result(self.run_argv([ self.url, '-b', 'C1=bogus;C2=bogus2']))
)
# POST request with data
result = self._clean_result(self.run_argv([ self.url, '--data', 'f1=data1&f2=data2' ]))
self.assertIn(
'[REQUEST_METHOD] => POST',
result
)
self.assertIn(
'[f1] => data1 [f2] => data2',
result
)
# GET request with URL
result = self._clean_result(self.run_argv([ self.url + '/?f1=data1&f2=data2' ]))
self.assertIn(
'[REQUEST_METHOD] => GET',
result
)
self.assertIn(
'[f1] => data1 [f2] => data2',
result
)
# UNREACHABLE
self.assertIsNone(self.run_argv([ 'http://unreachable-bogus-bogus' ]))
self.assertEqual(messages.module_net_curl.unexpected_response,
log_captured.records[-1].msg)
# FILTERED
self.assertIsNone(self.run_argv([ 'http://www.google.com:9999', '--connect-timeout', '1' ]))
self.assertEqual(messages.module_net_curl.unexpected_response,
log_captured.records[-1].msg)
# CLOSED
self.assertIsNone(self.run_argv([ 'http://localhost:9999', '--connect-timeout', '1' ]))
self.assertEqual(messages.module_net_curl.unexpected_response,
log_captured.records[-1].msg)
|
gpl-3.0
|
catapult-project/catapult-csm
|
third_party/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc4210.py
|
37
|
26208
|
#
# Certificate Management Protocol structures as per RFC4210
#
# Based on Alex Railean's work
#
from pyasn1.type import tag,namedtype,namedval,univ,constraint,char,useful
from pyasn1_modules import rfc2459, rfc2511, rfc2314
MAX = 64
class KeyIdentifier(univ.OctetString): pass
class CMPCertificate(rfc2459.Certificate): pass
class OOBCert(CMPCertificate): pass
class CertAnnContent(CMPCertificate): pass
class PKIFreeText(univ.SequenceOf):
"""
PKIFreeText ::= SEQUENCE SIZE (1..MAX) OF UTF8String
"""
componentType = char.UTF8String()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class PollRepContent(univ.SequenceOf):
"""
PollRepContent ::= SEQUENCE OF SEQUENCE {
certReqId INTEGER,
checkAfter INTEGER, -- time in seconds
reason PKIFreeText OPTIONAL
}
"""
class CertReq(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.NamedType('checkAfter', univ.Integer()),
namedtype.OptionalNamedType('reason', PKIFreeText())
)
componentType = CertReq()
class PollReqContent(univ.SequenceOf):
"""
PollReqContent ::= SEQUENCE OF SEQUENCE {
certReqId INTEGER
}
"""
class CertReq(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer())
)
componentType = CertReq()
class InfoTypeAndValue(univ.Sequence):
"""
InfoTypeAndValue ::= SEQUENCE {
infoType OBJECT IDENTIFIER,
infoValue ANY DEFINED BY infoType OPTIONAL
}"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('infoType', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('infoValue', univ.Any())
)
class GenRepContent(univ.SequenceOf):
componentType = InfoTypeAndValue()
class GenMsgContent(univ.SequenceOf):
componentType = InfoTypeAndValue()
class PKIConfirmContent(univ.Null): pass
class CRLAnnContent(univ.SequenceOf):
componentType = rfc2459.CertificateList()
class CAKeyUpdAnnContent(univ.Sequence):
"""
CAKeyUpdAnnContent ::= SEQUENCE {
oldWithNew CMPCertificate,
newWithOld CMPCertificate,
newWithNew CMPCertificate
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('oldWithNew', CMPCertificate()),
namedtype.NamedType('newWithOld', CMPCertificate()),
namedtype.NamedType('newWithNew', CMPCertificate())
)
class RevDetails(univ.Sequence):
"""
RevDetails ::= SEQUENCE {
certDetails CertTemplate,
crlEntryDetails Extensions OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certDetails', rfc2511.CertTemplate()),
namedtype.OptionalNamedType('crlEntryDetails', rfc2459.Extensions())
)
class RevReqContent(univ.SequenceOf):
componentType = RevDetails()
class CertOrEncCert(univ.Choice):
"""
CertOrEncCert ::= CHOICE {
certificate [0] CMPCertificate,
encryptedCert [1] EncryptedValue
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certificate', CMPCertificate().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.NamedType('encryptedCert', rfc2511.EncryptedValue().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class CertifiedKeyPair(univ.Sequence):
"""
CertifiedKeyPair ::= SEQUENCE {
certOrEncCert CertOrEncCert,
privateKey [0] EncryptedValue OPTIONAL,
publicationInfo [1] PKIPublicationInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certOrEncCert', CertOrEncCert()),
namedtype.OptionalNamedType('privateKey', rfc2511.EncryptedValue().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType('publicationInfo', rfc2511.PKIPublicationInfo().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class POPODecKeyRespContent(univ.SequenceOf):
componentType = univ.Integer()
class Challenge(univ.Sequence):
"""
Challenge ::= SEQUENCE {
owf AlgorithmIdentifier OPTIONAL,
witness OCTET STRING,
challenge OCTET STRING
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('witness', univ.OctetString()),
namedtype.NamedType('challenge', univ.OctetString())
)
class PKIStatus(univ.Integer):
"""
PKIStatus ::= INTEGER {
accepted (0),
grantedWithMods (1),
rejection (2),
waiting (3),
revocationWarning (4),
revocationNotification (5),
keyUpdateWarning (6)
}
"""
namedValues = namedval.NamedValues(
('accepted', 0),
('grantedWithMods', 1),
('rejection', 2),
('waiting', 3),
('revocationWarning', 4),
('revocationNotification', 5),
('keyUpdateWarning', 6)
)
class PKIFailureInfo(univ.BitString):
"""
PKIFailureInfo ::= BIT STRING {
badAlg (0),
badMessageCheck (1),
badRequest (2),
badTime (3),
badCertId (4),
badDataFormat (5),
wrongAuthority (6),
incorrectData (7),
missingTimeStamp (8),
badPOP (9),
certRevoked (10),
certConfirmed (11),
wrongIntegrity (12),
badRecipientNonce (13),
timeNotAvailable (14),
unacceptedPolicy (15),
unacceptedExtension (16),
addInfoNotAvailable (17),
badSenderNonce (18),
badCertTemplate (19),
signerNotTrusted (20),
transactionIdInUse (21),
unsupportedVersion (22),
notAuthorized (23),
systemUnavail (24),
systemFailure (25),
duplicateCertReq (26)
"""
namedValues = namedval.NamedValues(
('badAlg', 0),
('badMessageCheck', 1),
('badRequest', 2),
('badTime', 3),
('badCertId', 4),
('badDataFormat', 5),
('wrongAuthority', 6),
('incorrectData', 7),
('missingTimeStamp', 8),
('badPOP', 9),
('certRevoked', 10),
('certConfirmed', 11),
('wrongIntegrity', 12),
('badRecipientNonce', 13),
('timeNotAvailable', 14),
('unacceptedPolicy', 15),
('unacceptedExtension', 16),
('addInfoNotAvailable', 17),
('badSenderNonce', 18),
('badCertTemplate', 19),
('signerNotTrusted', 20),
('transactionIdInUse', 21),
('unsupportedVersion', 22),
('notAuthorized', 23),
('systemUnavail', 24),
('systemFailure', 25),
('duplicateCertReq', 26)
)
class PKIStatusInfo(univ.Sequence):
"""
PKIStatusInfo ::= SEQUENCE {
status PKIStatus,
statusString PKIFreeText OPTIONAL,
failInfo PKIFailureInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatus()),
namedtype.OptionalNamedType('statusString', PKIFreeText()),
namedtype.OptionalNamedType('failInfo', PKIFailureInfo())
)
class ErrorMsgContent(univ.Sequence):
"""
ErrorMsgContent ::= SEQUENCE {
pKIStatusInfo PKIStatusInfo,
errorCode INTEGER OPTIONAL,
-- implementation-specific error codes
errorDetails PKIFreeText OPTIONAL
-- implementation-specific error details
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('pKIStatusInfo', PKIStatusInfo()),
namedtype.OptionalNamedType('errorCode', univ.Integer()),
namedtype.OptionalNamedType('errorDetails', PKIFreeText())
)
class CertStatus(univ.Sequence):
"""
CertStatus ::= SEQUENCE {
certHash OCTET STRING,
certReqId INTEGER,
statusInfo PKIStatusInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certHash', univ.OctetString()),
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.OptionalNamedType('statusInfo', PKIStatusInfo())
)
class CertConfirmContent(univ.SequenceOf):
componentType = CertStatus()
class RevAnnContent(univ.Sequence):
"""
RevAnnContent ::= SEQUENCE {
status PKIStatus,
certId CertId,
willBeRevokedAt GeneralizedTime,
badSinceDate GeneralizedTime,
crlDetails Extensions OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatus()),
namedtype.NamedType('certId', rfc2511.CertId()),
namedtype.NamedType('willBeRevokedAt', useful.GeneralizedTime()),
namedtype.NamedType('badSinceDate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('crlDetails', rfc2459.Extensions())
)
class RevRepContent(univ.Sequence):
"""
RevRepContent ::= SEQUENCE {
status SEQUENCE SIZE (1..MAX) OF PKIStatusInfo,
revCerts [0] SEQUENCE SIZE (1..MAX) OF CertId
OPTIONAL,
crls [1] SEQUENCE SIZE (1..MAX) OF CertificateList
OPTIONAL
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType('revCerts', univ.SequenceOf(
componentType=rfc2511.CertId()
).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType('crls', univ.SequenceOf(
componentType=rfc2459.CertificateList()
).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class KeyRecRepContent(univ.Sequence):
"""
KeyRecRepContent ::= SEQUENCE {
status PKIStatusInfo,
newSigCert [0] CMPCertificate OPTIONAL,
caCerts [1] SEQUENCE SIZE (1..MAX) OF
CMPCertificate OPTIONAL,
keyPairHist [2] SEQUENCE SIZE (1..MAX) OF
CertifiedKeyPair OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType('newSigCert', CMPCertificate().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType('caCerts', univ.SequenceOf(
componentType=CMPCertificate()
).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1),
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
)
),
namedtype.OptionalNamedType('keyPairHist', univ.SequenceOf(
componentType=CertifiedKeyPair()
).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2),
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
)
)
)
class CertResponse(univ.Sequence):
"""
CertResponse ::= SEQUENCE {
certReqId INTEGER,
status PKIStatusInfo,
certifiedKeyPair CertifiedKeyPair OPTIONAL,
rspInfo OCTET STRING OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType('certifiedKeyPair', CertifiedKeyPair()),
namedtype.OptionalNamedType('rspInfo', univ.OctetString())
)
class CertRepMessage(univ.Sequence):
"""
CertRepMessage ::= SEQUENCE {
caPubs [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
OPTIONAL,
response SEQUENCE OF CertResponse
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('caPubs', univ.SequenceOf(
componentType=CMPCertificate()
).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,1)
)
),
namedtype.NamedType('response', univ.SequenceOf(
componentType=CertResponse())
)
)
class POPODecKeyChallContent(univ.SequenceOf):
componentType = Challenge()
class OOBCertHash(univ.Sequence):
"""
OOBCertHash ::= SEQUENCE {
hashAlg [0] AlgorithmIdentifier OPTIONAL,
certId [1] CertId OPTIONAL,
hashVal BIT STRING
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('hashAlg',
rfc2459.AlgorithmIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0)
)
),
namedtype.OptionalNamedType('certId', rfc2511.CertId().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,1)
)
),
namedtype.NamedType('hashVal', univ.BitString())
)
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
class NestedMessageContent(univ.SequenceOf):
"""
NestedMessageContent ::= PKIMessages
"""
componentType = univ.Any()
class DHBMParameter(univ.Sequence):
"""
DHBMParameter ::= SEQUENCE {
owf AlgorithmIdentifier,
-- AlgId for a One-Way Function (SHA-1 recommended)
mac AlgorithmIdentifier
-- the MAC AlgId (e.g., DES-MAC, Triple-DES-MAC [PKCS11],
} -- or HMAC [RFC2104, RFC2202])
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
)
id_DHBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.30')
class PBMParameter(univ.Sequence):
"""
PBMParameter ::= SEQUENCE {
salt OCTET STRING,
owf AlgorithmIdentifier,
iterationCount INTEGER,
mac AlgorithmIdentifier
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('salt', univ.OctetString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, 128)
)
),
namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('iterationCount', univ.Integer()),
namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
)
id_PasswordBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.13')
class PKIProtection(univ.BitString): pass
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
nestedMessageContent = NestedMessageContent().subtype(explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,20))
class PKIBody(univ.Choice):
"""
PKIBody ::= CHOICE { -- message-specific body elements
ir [0] CertReqMessages, --Initialization Request
ip [1] CertRepMessage, --Initialization Response
cr [2] CertReqMessages, --Certification Request
cp [3] CertRepMessage, --Certification Response
p10cr [4] CertificationRequest, --imported from [PKCS10]
popdecc [5] POPODecKeyChallContent, --pop Challenge
popdecr [6] POPODecKeyRespContent, --pop Response
kur [7] CertReqMessages, --Key Update Request
kup [8] CertRepMessage, --Key Update Response
krr [9] CertReqMessages, --Key Recovery Request
krp [10] KeyRecRepContent, --Key Recovery Response
rr [11] RevReqContent, --Revocation Request
rp [12] RevRepContent, --Revocation Response
ccr [13] CertReqMessages, --Cross-Cert. Request
ccp [14] CertRepMessage, --Cross-Cert. Response
ckuann [15] CAKeyUpdAnnContent, --CA Key Update Ann.
cann [16] CertAnnContent, --Certificate Ann.
rann [17] RevAnnContent, --Revocation Ann.
crlann [18] CRLAnnContent, --CRL Announcement
pkiconf [19] PKIConfirmContent, --Confirmation
nested [20] NestedMessageContent, --Nested Message
genm [21] GenMsgContent, --General Message
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('ir', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0)
)
),
namedtype.NamedType('ip', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,1)
)
),
namedtype.NamedType('cr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,2)
)
),
namedtype.NamedType('cp', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,3)
)
),
namedtype.NamedType('p10cr', rfc2314.CertificationRequest().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,4)
)
),
namedtype.NamedType('popdecc', POPODecKeyChallContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,5)
)
),
namedtype.NamedType('popdecr', POPODecKeyRespContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,6)
)
),
namedtype.NamedType('kur', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,7)
)
),
namedtype.NamedType('kup', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,8)
)
),
namedtype.NamedType('krr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,9)
)
),
namedtype.NamedType('krp', KeyRecRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,10)
)
),
namedtype.NamedType('rr', RevReqContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,11)
)
),
namedtype.NamedType('rp', RevRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,12)
)
),
namedtype.NamedType('ccr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,13)
)
),
namedtype.NamedType('ccp', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,14)
)
),
namedtype.NamedType('ckuann', CAKeyUpdAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,15)
)
),
namedtype.NamedType('cann', CertAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,16)
)
),
namedtype.NamedType('rann', RevAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,17)
)
),
namedtype.NamedType('crlann', CRLAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,18)
)
),
namedtype.NamedType('pkiconf', PKIConfirmContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,19)
)
),
namedtype.NamedType('nested', nestedMessageContent),
# namedtype.NamedType('nested', NestedMessageContent().subtype(
# explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,20)
# )
# ),
namedtype.NamedType('genm', GenMsgContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,21)
)
)
)
class PKIHeader(univ.Sequence):
"""
PKIHeader ::= SEQUENCE {
pvno INTEGER { cmp1999(1), cmp2000(2) },
sender GeneralName,
recipient GeneralName,
messageTime [0] GeneralizedTime OPTIONAL,
protectionAlg [1] AlgorithmIdentifier OPTIONAL,
senderKID [2] KeyIdentifier OPTIONAL,
recipKID [3] KeyIdentifier OPTIONAL,
transactionID [4] OCTET STRING OPTIONAL,
senderNonce [5] OCTET STRING OPTIONAL,
recipNonce [6] OCTET STRING OPTIONAL,
freeText [7] PKIFreeText OPTIONAL,
generalInfo [8] SEQUENCE SIZE (1..MAX) OF
InfoTypeAndValue OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('pvno', univ.Integer(
namedValues=namedval.NamedValues(
('cmp1999', 1),
('cmp2000', 2)
)
)
),
namedtype.NamedType('sender', rfc2459.GeneralName()),
namedtype.NamedType('recipient', rfc2459.GeneralName()),
namedtype.OptionalNamedType('messageTime', useful.GeneralizedTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('protectionAlg', rfc2459.AlgorithmIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.OptionalNamedType('senderKID', rfc2459.KeyIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('recipKID', rfc2459.KeyIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.OptionalNamedType('transactionID', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.OptionalNamedType('senderNonce', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.OptionalNamedType('recipNonce', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
namedtype.OptionalNamedType('freeText', PKIFreeText().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))),
namedtype.OptionalNamedType('generalInfo',
univ.SequenceOf(
componentType=InfoTypeAndValue().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)
)
)
)
)
class ProtectedPart(univ.Sequence):
"""
ProtectedPart ::= SEQUENCE {
header PKIHeader,
body PKIBody
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PKIHeader()),
namedtype.NamedType('infoValue', PKIBody())
)
class PKIMessage(univ.Sequence):
"""
PKIMessage ::= SEQUENCE {
header PKIHeader,
body PKIBody,
protection [0] PKIProtection OPTIONAL,
extraCerts [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
OPTIONAL
}"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PKIHeader()),
namedtype.NamedType('body', PKIBody()),
namedtype.OptionalNamedType('protection', PKIProtection().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType( 'extraCerts',
univ.SequenceOf(
componentType=CMPCertificate()
).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class PKIMessages(univ.SequenceOf):
"""
PKIMessages ::= SEQUENCE SIZE (1..MAX) OF PKIMessage
"""
componentType = PKIMessage()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
NestedMessageContent.componentType = PKIMessages()
nestedMessageContent.componentType = PKIMessages()
|
bsd-3-clause
|
alexhersh/calico-docker
|
calicoctl/tests/unit/node_test.py
|
1
|
30122
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from requests import Response
import signal
import os
import sys
from docker.errors import APIError
from docker import Client as DockerClient
from mock import patch, Mock, call
from nose_parameterized import parameterized
from pycalico.datastore import ETCD_AUTHORITY_DEFAULT
from calico_ctl import node
class TestAttachAndStream(unittest.TestCase):
@patch("calico_ctl.node.docker_client", spec=DockerClient)
@patch("calico_ctl.node.sys", spec=sys)
def test_container_stops_normally(self, m_sys, m_docker_client):
"""
Test _attach_and_stream when the container stops normally.
:return: None
"""
# attach(..., stream=True) returns a generator.
def container_output_gen():
yield ("Some output\n")
yield ("from the container.")
m_docker_client.attach.return_value = container_output_gen()
m_stdout = Mock(spec=sys.stdout)
m_sys.stdout = m_stdout
m_container = Mock()
node._attach_and_stream(m_container)
m_docker_client.attach.assert_called_once_with(m_container,
stream=True)
self.assertFalse(m_container.called)
m_stdout.write.assert_has_calls([call("Some output\n"),
call("from the container.")])
m_docker_client.stop.assert_called_once_with(m_container)
@patch("calico_ctl.node.docker_client", spec=DockerClient)
@patch("calico_ctl.node.sys", spec=sys)
def test_ctrl_c(self, m_sys, m_docker_client):
"""
Test _attach_and_stream when a Keyboard interrupt is generated.
:return: None
"""
# attach(..., stream=True) returns a generator.
def container_output_gen():
yield ("Some output\n")
yield ("from the container.")
raise KeyboardInterrupt()
yield ("This output is not printed.")
m_docker_client.attach.return_value = container_output_gen()
m_stdout = Mock(spec=sys.stdout)
m_sys.stdout = m_stdout
m_container = Mock()
node._attach_and_stream(m_container)
m_docker_client.attach.assert_called_once_with(m_container,
stream=True)
self.assertFalse(m_container.called)
m_stdout.write.assert_has_calls([call("Some output\n"),
call("from the container.")])
self.assertEqual(m_stdout.write.call_count, 2)
m_docker_client.stop.assert_called_once_with(m_container)
@patch("calico_ctl.node.docker_client", spec=DockerClient)
@patch("calico_ctl.node.sys", spec=sys)
def test_killed(self, m_sys, m_docker_client):
"""
Test _attach_and_stream when killed by another process.
:return: None
"""
# attach(..., stream=True) returns a generator.
def container_output_gen():
yield ("Some output\n")
yield ("from the container.")
# Commit suicide, simulating being killed from another terminal.
os.kill(os.getpid(), signal.SIGTERM)
yield ("\nThis output is printed, but only because we nerf'd "
"sys.exit()")
m_docker_client.attach.return_value = container_output_gen()
m_stdout = Mock(spec=sys.stdout)
m_sys.stdout = m_stdout
m_container = Mock()
node._attach_and_stream(m_container)
m_docker_client.attach.assert_called_once_with(m_container,
stream=True)
self.assertFalse(m_container.called)
m_sys.exit.assert_called_once_with(0)
m_stdout.write.assert_has_calls([call("Some output\n"),
call("from the container."),
call("\nThis output is printed, but "
"only because we nerf'd "
"sys.exit()")])
# Stop gets called twice, once for SIGTERM, and because sys.exit() gets
# mocked, the function continues and we get another call when the
# generator ends normally.
m_docker_client.stop.assert_has_calls([call(m_container),
call(m_container)])
self.assertEqual(m_docker_client.stop.call_count, 2)
class TestNode(unittest.TestCase):
@parameterized.expand([
({'--ip': '127.a.0.1'}, True),
({'--ip': 'aa:bb::cc'}, True),
({'--ip': '127.0.0.1', '--ip6': '127.0.0.1'}, True),
({'--ip': '127.0.0.1', '--ip6': 'aa:bb::zz'}, True),
({'<AS_NUM>': None}, False),
({'<AS_NUM>': '65535.65535'}, False),
({'<AS_NUM>': '0.65535'}, False),
({'<AS_NUM>': '1000000'}, False),
({'<AS_NUM>': '65535'}, False),
({'<AS_NUM>': '65536.0'}, True),
({'<AS_NUM>': '65535.65536'}, True),
({'<AS_NUM>': '65535.'}, True)
])
def test_validate_arguments(self, case, sys_exit_called):
"""
Test validate_arguments for calicoctl node command
"""
with patch('sys.exit', autospec=True) as m_sys_exit:
# Call method under test
node.validate_arguments(case)
# Assert that method exits on bad input
self.assertEqual(m_sys_exit.called, sys_exit_called)
@patch('os.path.exists', autospec=True)
@patch('os.makedirs', autospec=True)
@patch('calico_ctl.node.check_system', autospec=True)
@patch('calico_ctl.node._setup_ip_forwarding', autospec=True)
@patch('calico_ctl.node.get_host_ips', autospec=True)
@patch('calico_ctl.node.warn_if_unknown_ip', autospec=True)
@patch('calico_ctl.node.warn_if_hostname_conflict', autospec=True)
@patch('calico_ctl.node.install_plugin', autospec=True)
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
@patch('calico_ctl.node.docker', autospec=True)
@patch('calico_ctl.node._find_or_pull_node_image', autospec=True)
@patch('calico_ctl.node._attach_and_stream', autospec=True)
def test_node_dockerless_start(self, m_attach_and_stream,
m_find_or_pull_node_image, m_docker,
m_docker_client, m_client, m_install_plugin,
m_warn_if_hostname_conflict, m_warn_if_unknown_ip,
m_get_host_ips, m_setup_ip, m_check_system,
m_os_makedirs, m_os_path_exists):
"""
Test that the node_start function performs all necessary configurations
without making Docker calls when runtime=none.
"""
# Set up mock objects
m_os_path_exists.return_value = False
ip_1 = '1.1.1.1'
ip_2 = '2.2.2.2'
m_get_host_ips.return_value = [ip_1, ip_2]
m_docker.utils.create_host_config.return_value = 'host_config'
container = {'Id': 666}
m_docker_client.create_container.return_value = container
m_check_system.return_value = [True, True, True]
# Set up arguments
node_image = 'node_image'
runtime = 'none'
log_dir = './log_dir'
ip = ''
ip6 = 'aa:bb::zz'
as_num = ''
detach = True
kube_plugin_version = 'v0.2.1'
rkt = True
libnetwork = False
# Call method under test
node.node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach,
kube_plugin_version, rkt, libnetwork)
# Set up variables used in assertion statements
environment = [
"HOSTNAME=%s" % node.hostname,
"IP=%s" % ip_2,
"IP6=%s" % ip6,
"ETCD_AUTHORITY=%s" % ETCD_AUTHORITY_DEFAULT, # etcd host:port
"FELIX_ETCDADDR=%s" % ETCD_AUTHORITY_DEFAULT, # etcd host:port
"CALICO_NETWORKING=%s" % node.CALICO_NETWORKING_DEFAULT,
]
binds = {
log_dir:
{
"bind": "/var/log/calico",
"ro": False
}
}
# Assert
m_os_path_exists.assert_called_once_with(log_dir)
m_os_makedirs.assert_called_once_with(log_dir)
m_check_system.assert_called_once_with(quit_if_error=False,
libnetwork=libnetwork,
check_docker=False)
m_setup_ip.assert_called_once_with()
m_get_host_ips.assert_called_once_with(exclude=["^docker.*", "^cbr.*"])
m_warn_if_unknown_ip.assert_called_once_with(ip_2, ip6)
m_warn_if_hostname_conflict.assert_called_once_with(ip_2)
m_client.get_ip_pools.assert_has_calls([call(4), call(6)])
m_client.ensure_global_config.assert_called_once_with()
m_client.create_host.assert_called_once_with(
node.hostname, ip_2, ip6, as_num
)
url = node.KUBERNETES_BINARY_URL % kube_plugin_version
m_install_plugin.assert_has_calls([call(node.KUBERNETES_PLUGIN_DIR, url),
call(node.RKT_PLUGIN_DIR, node.RKT_BINARY_URL)])
self.assertFalse(m_docker_client.remove_container.called)
self.assertFalse(m_docker.utils.create_host_config.called)
self.assertFalse(m_find_or_pull_node_image.called)
self.assertFalse(m_docker_client.create_container.called)
self.assertFalse(m_docker_client.start.called)
self.assertFalse(m_attach_and_stream.called)
@patch('os.path.exists', autospec=True)
@patch('os.makedirs', autospec=True)
@patch('calico_ctl.node.check_system', autospec=True)
@patch('calico_ctl.node._setup_ip_forwarding', autospec=True)
@patch('calico_ctl.node.get_host_ips', autospec=True)
@patch('calico_ctl.node.warn_if_unknown_ip', autospec=True)
@patch('calico_ctl.node.warn_if_hostname_conflict', autospec=True)
@patch('calico_ctl.node.install_plugin', autospec=True)
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
@patch('calico_ctl.node.docker', autospec=True)
@patch('calico_ctl.node._find_or_pull_node_image', autospec=True)
@patch('calico_ctl.node._attach_and_stream', autospec=True)
def test_node_start(self, m_attach_and_stream,
m_find_or_pull_node_image, m_docker,
m_docker_client, m_client, m_install_plugin,
m_warn_if_hostname_conflict, m_warn_if_unknown_ip,
m_get_host_ips, m_setup_ip, m_check_system,
m_os_makedirs, m_os_path_exists):
"""
Test that the node_Start function does not make Docker calls
function returns
"""
# Set up mock objects
m_os_path_exists.return_value = False
ip_1 = '1.1.1.1'
ip_2 = '2.2.2.2'
m_get_host_ips.return_value = [ip_1, ip_2]
m_docker.utils.create_host_config.return_value = 'host_config'
container = {'Id': 666}
m_docker_client.create_container.return_value = container
m_check_system.return_value = [True, True, True]
# Set up arguments
node_image = 'node_image'
runtime = 'docker'
log_dir = './log_dir'
ip = ''
ip6 = 'aa:bb::zz'
as_num = ''
detach = False
kube_plugin_version = 'v0.2.1'
rkt = True
libnetwork = False
# Call method under test
node.node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach,
kube_plugin_version, rkt, libnetwork)
# Set up variables used in assertion statements
environment = [
"HOSTNAME=%s" % node.hostname,
"IP=%s" % ip_2,
"IP6=%s" % ip6,
"ETCD_AUTHORITY=%s" % ETCD_AUTHORITY_DEFAULT, # etcd host:port
"FELIX_ETCDADDR=%s" % ETCD_AUTHORITY_DEFAULT, # etcd host:port
"CALICO_NETWORKING=%s" % node.CALICO_NETWORKING_DEFAULT,
]
binds = {
log_dir:
{
"bind": "/var/log/calico",
"ro": False
}
}
# Assert
m_os_path_exists.assert_called_once_with(log_dir)
m_os_makedirs.assert_called_once_with(log_dir)
m_check_system.assert_called_once_with(quit_if_error=False,
libnetwork=libnetwork,
check_docker=True)
m_setup_ip.assert_called_once_with()
m_get_host_ips.assert_called_once_with(exclude=["^docker.*", "^cbr.*"])
m_warn_if_unknown_ip.assert_called_once_with(ip_2, ip6)
m_warn_if_hostname_conflict.assert_called_once_with(ip_2)
m_client.get_ip_pools.assert_has_calls([call(4), call(6)])
m_client.ensure_global_config.assert_called_once_with()
m_client.create_host.assert_called_once_with(
node.hostname, ip_2, ip6, as_num
)
url = node.KUBERNETES_BINARY_URL % kube_plugin_version
m_install_plugin.assert_has_calls([call(node.KUBERNETES_PLUGIN_DIR, url),
call(node.RKT_PLUGIN_DIR, node.RKT_BINARY_URL)])
m_docker_client.remove_container.assert_called_once_with(
'calico-node', force=True
)
m_docker.utils.create_host_config.assert_called_once_with(
privileged=True,
restart_policy={"Name": "always"},
network_mode="host",
binds=binds
)
m_find_or_pull_node_image.assert_called_once_with('node_image')
m_docker_client.create_container.assert_called_once_with(
node_image,
name='calico-node',
detach=True,
environment=environment,
host_config='host_config',
volumes=['/var/log/calico']
)
m_docker_client.start.assert_called_once_with(container)
m_attach_and_stream.assert_called_once_with(container)
@patch('os.path.exists', autospec=True)
@patch('os.makedirs', autospec=True)
@patch('calico_ctl.node.check_system', autospec=True)
@patch('calico_ctl.node._setup_ip_forwarding', autospec=True)
@patch('calico_ctl.node.get_host_ips', autospec=True)
@patch('calico_ctl.node.warn_if_unknown_ip', autospec=True)
@patch('calico_ctl.node.warn_if_hostname_conflict', autospec=True)
@patch('calico_ctl.node.install_plugin', autospec=True)
def test_node_start_call_backup_kube_directory(
self, m_install_plugin, m_warn_if_hostname_conflict,
m_warn_if_unknown_ip, m_get_host_ips, m_setup_ip, m_check_system,
m_os_makedirs, m_os_path_exists):
"""
Test that node_start calls the backup kuberentes plugin directory
when install_kubernetes cannot access the default kubernetes directory
"""
# Set up mock objects
m_os_path_exists.return_value = True
m_get_host_ips.return_value = ['1.1.1.1']
m_install_plugin.side_effect = OSError
m_check_system.return_value = [True, True, True]
# Set up arguments
node_image = "node_image"
runtime = 'docker'
log_dir = './log_dir'
ip = ''
ip6 = 'aa:bb::zz'
as_num = ''
detach = False
kube_plugin_version = 'v0.2.1'
rkt = False
libnetwork = False
# Test expecting OSError exception
self.assertRaises(OSError, node.node_start,
node_image, runtime, log_dir, ip, ip6, as_num, detach,
kube_plugin_version, rkt, libnetwork)
url = node.KUBERNETES_BINARY_URL % kube_plugin_version
m_install_plugin.assert_has_calls([
call(node.KUBERNETES_PLUGIN_DIR, url),
call(node.KUBERNETES_PLUGIN_DIR_BACKUP, url)
])
@patch('os.path.exists', autospec=True)
@patch('os.makedirs', autospec=True)
@patch('calico_ctl.node.check_system', autospec=True)
@patch('calico_ctl.node._setup_ip_forwarding', autospec=True)
@patch('calico_ctl.node.get_host_ips', autospec=True)
@patch('calico_ctl.node.warn_if_unknown_ip', autospec=True)
@patch('calico_ctl.node.warn_if_hostname_conflict', autospec=True)
@patch('calico_ctl.node.install_plugin', autospec=True)
def test_node_start_call_backup_rkt_directory(
self, m_install_plugin, m_warn_if_hostname_conflict,
m_warn_if_unknown_ip, m_get_host_ips, m_setup_ip,
m_check_system, m_os_makedirs, m_os_path_exists):
"""
Test that node_start calls the backup kuberentes plugin directory
when install_kubernetes cannot access the default kubernetes directory
"""
# Set up mock objects
m_os_path_exists.return_value = True
m_get_host_ips.return_value = ['1.1.1.1']
m_install_plugin.side_effect = OSError
m_check_system.return_value = [True, True, True]
# Set up arguments
node_image = "node_image"
runtime = 'docker'
log_dir = './log_dir'
ip = ''
ip6 = 'aa:bb::zz'
as_num = ''
detach = False
kube_plugin_version = None
rkt = True
libnetwork = False
# Test expecting OSError exception
self.assertRaises(OSError, node.node_start,
node_image, runtime, log_dir, ip, ip6, as_num, detach,
kube_plugin_version, rkt, libnetwork)
m_install_plugin.assert_has_calls([
call(node.RKT_PLUGIN_DIR, node.RKT_BINARY_URL),
call(node.RKT_PLUGIN_DIR_BACKUP, node.RKT_BINARY_URL)
])
@patch('os.path.exists', autospec=True)
@patch('os.makedirs', autospec=True)
@patch('calico_ctl.node.check_system', autospec=True)
@patch('calico_ctl.node._setup_ip_forwarding', autospec=True)
@patch('calico_ctl.node.get_host_ips', autospec=True)
@patch('calico_ctl.node.warn_if_unknown_ip', autospec=True)
@patch('calico_ctl.node.warn_if_hostname_conflict', autospec=True)
@patch('calico_ctl.node.install_plugin', autospec=True)
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_node_start_remove_container_error(
self, m_docker_client, m_client, m_install_plugin,
m_warn_if_hostname_conflict, m_warn_if_unknown_ip,
m_get_host_ips, m_setup_ip, m_check_system,
m_os_makedirs, m_os_path_exists):
"""
Test that the docker client raises an APIError when it fails to
remove a container.
"""
# Set up mock objects
err = APIError("Test error message", Response())
m_docker_client.remove_container.side_effect = err
m_check_system.return_value = [True, True, True]
# Set up arguments
node_image = 'node_image'
runtime = 'docker'
log_dir = './log_dir'
ip = ''
ip6 = 'aa:bb::zz'
as_num = ''
detach = False
kube_plugin_version = 'v0.2.1'
rkt = False
libnetwork = False
# Testing expecting APIError exception
self.assertRaises(APIError, node.node_start,
node_image, runtime, log_dir, ip, ip6, as_num, detach,
kube_plugin_version, rkt, libnetwork)
@patch('sys.exit', autospec=True)
@patch('os.path.exists', autospec=True)
@patch('os.makedirs', autospec=True)
@patch('calico_ctl.node.check_system', autospec=True)
@patch('calico_ctl.node._setup_ip_forwarding', autospec=True)
@patch('calico_ctl.node.get_host_ips', autospec=True)
@patch('calico_ctl.node.warn_if_unknown_ip', autospec=True)
@patch('calico_ctl.node.warn_if_hostname_conflict', autospec=True)
@patch('calico_ctl.node.install_plugin', autospec=True)
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_node_start_no_detected_ips(
self, m_docker_client, m_client, m_install_plugin,
m_warn_if_hostname_conflict, m_warn_if_unknown_ip,
m_get_host_ips, m_setup_ip, m_check_system,
m_os_makedirs, m_os_path_exists, m_sys_exit):
"""
Test that system exits when no ip is provided and host ips cannot be
obtained
"""
# Set up mock objects
m_get_host_ips.return_value = []
m_check_system.return_value = [True, True, True]
# Set up arguments
node_image = 'node_image'
runtime = 'docker'
log_dir = './log_dir'
ip = ''
ip6 = 'aa:bb::zz'
as_num = ''
detach = False
kube_plugin_version = 'v0.2.1'
rkt = False
libnetwork = False
# Call method under test
node.node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach,
kube_plugin_version, rkt, libnetwork)
# Assert
m_sys_exit.assert_called_once_with(1)
@patch('os.path.exists', autospec=True)
@patch('os.makedirs', autospec=True)
@patch('calico_ctl.node.check_system', autospec=True)
@patch('calico_ctl.node._setup_ip_forwarding', autospec=True)
@patch('calico_ctl.node.get_host_ips', autospec=True)
@patch('calico_ctl.node.warn_if_unknown_ip', autospec=True)
@patch('calico_ctl.node.warn_if_hostname_conflict', autospec=True)
@patch('calico_ctl.node.install_plugin', autospec=True)
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_node_start_create_default_ip_pools(
self, m_docker_client, m_client, m_install_plugin,
m_warn_if_hostname_conflict, m_warn_if_unknown_ip,
m_get_host_ips, m_setup_ip, m_check_system,
m_os_makedirs, m_os_path_exists):
"""
Test that the client creates default ipv4 and ipv6 pools when the
client returns an empty ip_pool on etcd setup
"""
# Set up mock objects
m_client.get_ip_pools.return_value = []
m_check_system.return_value = [True, True, True]
# Set up arguments
node_image = 'node_image'
runtime = 'docker'
log_dir = './log_dir'
ip = ''
ip6 = 'aa:bb::zz'
as_num = ''
detach = False
kube_plugin_version = 'v0.2.1'
rkt = False
libnetwork = False
# Call method under test
node.node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach,
kube_plugin_version, rkt, libnetwork)
# Assert
m_client.add_ip_pool.assert_has_calls([
call(4, node.DEFAULT_IPV4_POOL),
call(6, node.DEFAULT_IPV6_POOL)
])
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_node_stop(self, m_docker_client, m_client):
"""
Test the client stops the node when node_stop called when there are
endpoints and the force flag is set.
"""
# Call method under test
m_client.get_endpoints.return_value = [Mock()]
node.node_stop(True)
# Assert
m_client.get_endpoints.assert_called_once_with(hostname=node.hostname)
m_docker_client.stop.assert_has_calls([call('calico-node'),
call('calico-libnetwork')])
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_node_stop_endpoints(self, m_docker_client, m_client):
"""
Test the client does not stops the node when node_stop is called and
there are endpoints and the force flag is not set.
"""
# Call method under test
m_client.get_endpoints.return_value = [Mock()]
self.assertRaises(SystemExit, node.node_stop, False)
# Assert
m_client.get_endpoints.assert_called_once_with(hostname=node.hostname)
self.assertEquals(m_docker_client.stop.call_count, 0)
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_node_stop_error(self, m_docker_client, m_client):
"""
Test node_stop raises an exception when the docker client cannot not
stop the node
"""
# Set up mock objects
m_client.get_endpoints.return_value = [Mock()]
err = APIError("Test error message", Response())
for sidee in ([None, err], [err, None]):
m_docker_client.stop.side_effect = sidee
# Call method under test expecting an exception
self.assertRaises(APIError, node.node_stop, True)
@patch('calico_ctl.node.remove_veth', autospec=True)
@patch('calico_ctl.node._container_running', autospec=True, return_value=False)
@patch('calico_ctl.node.client', autospec=True)
def test_node_remove(self, m_client, m_cont_running, m_veth):
"""
Test the client removes the host when node_remove called, and that
endpoints are removed when remove_endpoints flag is set.
"""
# Call method under test
endpoint1 = Mock()
endpoint1.name = "vethname1"
endpoint2 = Mock()
endpoint2.name = "vethname2"
m_client.get_endpoints.return_value = [endpoint1, endpoint2]
node.node_remove(True)
# Assert
m_client.get_endpoints.assert_called_once_with(hostname=node.hostname)
m_client.remove_host.assert_called_once_with(node.hostname)
m_veth.assert_has_calls([call("vethname1"), call("vethname2")])
m_cont_running.assert_has_calls([call("calico-node"), call("calico-libnetwork")])
@patch('calico_ctl.node.remove_veth', autospec=True)
@patch('calico_ctl.node._container_running', autospec=True, return_value=True)
@patch('calico_ctl.node.client', autospec=True)
def test_node_remove_node_running(self, m_client, m_cont_running, m_veth):
"""
Test the client does not remove host when containers are running and
node_remove is invoked.
"""
# Assert
self.assertRaises(SystemExit, node.node_remove, True)
self.assertEquals(m_client.get_endpoints.call_count, 0)
self.assertEquals(m_client.remove_host.call_count, 0)
self.assertEquals(m_veth.call_count, 0)
@patch('calico_ctl.node.remove_veth', autospec=True)
@patch('calico_ctl.node._container_running', autospec=True, return_value=False)
@patch('calico_ctl.node.client', autospec=True)
def test_node_remove_endpoints_exist(self, m_client, m_cont_running, m_veth):
"""
Test the client does not remove host when endpoints exist and
node_remove is invoked without remove_endpoints flag.
"""
# Call method under test
m_client.get_endpoints.return_value = [Mock()]
self.assertRaises(SystemExit, node.node_remove, False)
# Assert
m_client.get_endpoints.assert_called_once_with(hostname=node.hostname)
self.assertEquals(m_client.remove_host.call_count, 0)
self.assertEquals(m_veth.call_count, 0)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_container_running_no_cont(self, m_docker_client):
"""
Test the _container_running command when no container exists.
"""
response = Response()
response.status_code = 404
m_docker_client.inspect_container.side_effect = APIError("Test error message", response)
self.assertEquals(node._container_running("container1"), False)
m_docker_client.inspect_container.assert_called_once_with("container1")
@patch('calico_ctl.node.docker_client', autospec=True)
def test_container_running_err(self, m_docker_client):
"""
Test the _container_running command when the inspect command errors.
"""
response = Response()
response.status_code = 400
m_docker_client.inspect_container.side_effect = APIError("Test error message", response)
self.assertRaises(APIError, node._container_running, "container1")
m_docker_client.inspect_container.assert_called_once_with("container1")
@patch('calico_ctl.node.docker_client', autospec=True)
def test_container_running_cont_running(self, m_docker_client):
"""
Test the _container_running command when the container is running
"""
for test in (True, False):
m_docker_client.inspect_container.return_value = {"State": {"Running": test}}
self.assertEquals(node._container_running("container1"), test)
@patch("calico_ctl.node.URLGetter", autospec=True)
@patch("calico_ctl.node.os", autospec=True)
def test_install_plugin(self, m_os, m_url_getter):
# Test installation of Kubernetes plugin
url = "http://somefake/url"
path = "/path/to/downloaded/binary/"
# Run method
node.install_plugin(path, url)
# Assert the file URL was downloaded.
m_url_getter().retrieve.assert_called_once_with(url, path + "calico")
|
apache-2.0
|
aprefontaine/TMScheduler
|
django/utils/synch.py
|
376
|
2549
|
"""
Synchronization primitives:
- reader-writer lock (preference to writers)
(Contributed to Django by [email protected])
"""
try:
import threading
except ImportError:
import dummy_threading as threading
class RWLock:
"""
Classic implementation of reader-writer lock with preference to writers.
Readers can access a resource simultaneously.
Writers get an exclusive access.
API is self-descriptive:
reader_enters()
reader_leaves()
writer_enters()
writer_leaves()
"""
def __init__(self):
self.mutex = threading.RLock()
self.can_read = threading.Semaphore(0)
self.can_write = threading.Semaphore(0)
self.active_readers = 0
self.active_writers = 0
self.waiting_readers = 0
self.waiting_writers = 0
def reader_enters(self):
self.mutex.acquire()
try:
if self.active_writers == 0 and self.waiting_writers == 0:
self.active_readers += 1
self.can_read.release()
else:
self.waiting_readers += 1
finally:
self.mutex.release()
self.can_read.acquire()
def reader_leaves(self):
self.mutex.acquire()
try:
self.active_readers -= 1
if self.active_readers == 0 and self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
finally:
self.mutex.release()
def writer_enters(self):
self.mutex.acquire()
try:
if self.active_writers == 0 and self.waiting_writers == 0 and self.active_readers == 0:
self.active_writers += 1
self.can_write.release()
else:
self.waiting_writers += 1
finally:
self.mutex.release()
self.can_write.acquire()
def writer_leaves(self):
self.mutex.acquire()
try:
self.active_writers -= 1
if self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
elif self.waiting_readers != 0:
t = self.waiting_readers
self.waiting_readers = 0
self.active_readers += t
while t > 0:
self.can_read.release()
t -= 1
finally:
self.mutex.release()
|
bsd-3-clause
|
liangjiaxing/sympy
|
sympy/solvers/deutils.py
|
97
|
10013
|
"""Utility functions for classifying and solving
ordinary and partial differential equations.
Contains
========
_preprocess
ode_order
_desolve
"""
from __future__ import print_function, division
from sympy.core.function import Derivative, AppliedUndef
from sympy.core.relational import Equality
from sympy.core.symbol import Wild
def _preprocess(expr, func=None, hint='_Integral'):
"""Prepare expr for solving by making sure that differentiation
is done so that only func remains in unevaluated derivatives and
(if hint doesn't end with _Integral) that doit is applied to all
other derivatives. If hint is None, don't do any differentiation.
(Currently this may cause some simple differential equations to
fail.)
In case func is None, an attempt will be made to autodetect the
function to be solved for.
>>> from sympy.solvers.deutils import _preprocess
>>> from sympy import Derivative, Function, Integral, sin
>>> from sympy.abc import x, y, z
>>> f, g = map(Function, 'fg')
Apply doit to derivatives that contain more than the function
of interest:
>>> _preprocess(Derivative(f(x) + x, x))
(Derivative(f(x), x) + 1, f(x))
Do others if the differentiation variable(s) intersect with those
of the function of interest or contain the function of interest:
>>> _preprocess(Derivative(g(x), y, z), f(y))
(0, f(y))
>>> _preprocess(Derivative(f(y), z), f(y))
(0, f(y))
Do others if the hint doesn't end in '_Integral' (the default
assumes that it does):
>>> _preprocess(Derivative(g(x), y), f(x))
(Derivative(g(x), y), f(x))
>>> _preprocess(Derivative(f(x), y), f(x), hint='')
(0, f(x))
Don't do any derivatives if hint is None:
>>> eq = Derivative(f(x) + 1, x) + Derivative(f(x), y)
>>> _preprocess(eq, f(x), hint=None)
(Derivative(f(x) + 1, x) + Derivative(f(x), y), f(x))
If it's not clear what the function of interest is, it must be given:
>>> eq = Derivative(f(x) + g(x), x)
>>> _preprocess(eq, g(x))
(Derivative(f(x), x) + Derivative(g(x), x), g(x))
>>> try: _preprocess(eq)
... except ValueError: print("A ValueError was raised.")
A ValueError was raised.
"""
derivs = expr.atoms(Derivative)
if not func:
funcs = set().union(*[d.atoms(AppliedUndef) for d in derivs])
if len(funcs) != 1:
raise ValueError('The function cannot be '
'automatically detected for %s.' % expr)
func = funcs.pop()
fvars = set(func.args)
if hint is None:
return expr, func
reps = [(d, d.doit()) for d in derivs if not hint.endswith('_Integral') or
d.has(func) or set(d.variables) & fvars]
eq = expr.subs(reps)
return eq, func
def ode_order(expr, func):
"""
Returns the order of a given differential
equation with respect to func.
This function is implemented recursively.
Examples
========
>>> from sympy import Function
>>> from sympy.solvers.deutils import ode_order
>>> from sympy.abc import x
>>> f, g = map(Function, ['f', 'g'])
>>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 +
... f(x).diff(x), f(x))
2
>>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x))
2
>>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x))
3
"""
a = Wild('a', exclude=[func])
if expr.match(a):
return 0
if isinstance(expr, Derivative):
if expr.args[0] == func:
return len(expr.variables)
else:
order = 0
for arg in expr.args[0].args:
order = max(order, ode_order(arg, func) + len(expr.variables))
return order
else:
order = 0
for arg in expr.args:
order = max(order, ode_order(arg, func))
return order
def _desolve(eq, func=None, hint="default", ics=None, simplify=True, **kwargs):
"""This is a helper function to dsolve and pdsolve in the ode
and pde modules.
If the hint provided to the function is "default", then a dict with
the following keys are returned
'func' - It provides the function for which the differential equation
has to be solved. This is useful when the expression has
more than one function in it.
'default' - The default key as returned by classifier functions in ode
and pde.py
'hint' - The hint given by the user for which the differential equation
is to be solved. If the hint given by the user is 'default',
then the value of 'hint' and 'default' is the same.
'order' - The order of the function as returned by ode_order
'match' - It returns the match as given by the classifier functions, for
the default hint.
If the hint provided to the function is not "default" and is not in
('all', 'all_Integral', 'best'), then a dict with the above mentioned keys
is returned along with the keys which are returned when dict in
classify_ode or classify_pde is set True
If the hint given is in ('all', 'all_Integral', 'best'), then this function
returns a nested dict, with the keys, being the set of classified hints
returned by classifier functions, and the values being the dict of form
as mentioned above.
Key 'eq' is a common key to all the above mentioned hints which returns an
expression if eq given by user is an Equality.
See Also
========
classify_ode(ode.py)
classify_pde(pde.py)
"""
prep = kwargs.pop('prep', True)
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
# preprocess the equation and find func if not given
if prep or func is None:
eq, func = _preprocess(eq, func)
prep = False
# type is an argument passed by the solve functions in ode and pde.py
# that identifies whether the function caller is an ordinary
# or partial differential equation. Accordingly corresponding
# changes are made in the function.
type = kwargs.get('type', None)
xi = kwargs.get('xi')
eta = kwargs.get('eta')
x0 = kwargs.get('x0', 0)
terms = kwargs.get('n')
if type == 'ode':
from sympy.solvers.ode import classify_ode, allhints
classifier = classify_ode
string = 'ODE '
dummy = ''
elif type == 'pde':
from sympy.solvers.pde import classify_pde, allhints
classifier = classify_pde
string = 'PDE '
dummy = 'p'
# Magic that should only be used internally. Prevents classify_ode from
# being called more than it needs to be by passing its results through
# recursive calls.
if kwargs.get('classify', True):
hints = classifier(eq, func, dict=True, ics=ics, xi=xi, eta=eta,
n=terms, x0=x0, prep=prep)
else:
# Here is what all this means:
#
# hint: The hint method given to _desolve() by the user.
# hints: The dictionary of hints that match the DE, along with other
# information (including the internal pass-through magic).
# default: The default hint to return, the first hint from allhints
# that matches the hint; obtained from classify_ode().
# match: Dictionary containing the match dictionary for each hint
# (the parts of the DE for solving). When going through the
# hints in "all", this holds the match string for the current
# hint.
# order: The order of the DE, as determined by ode_order().
hints = kwargs.get('hint',
{'default': hint,
hint: kwargs['match'],
'order': kwargs['order']})
if hints['order'] == 0:
raise ValueError(
str(eq) + " is not a differential equation in " + str(func))
if not hints['default']:
# classify_ode will set hints['default'] to None if no hints match.
if hint not in allhints and hint != 'default':
raise ValueError("Hint not recognized: " + hint)
elif hint not in hints['ordered_hints'] and hint != 'default':
raise ValueError(string + str(eq) + " does not match hint " + hint)
else:
raise NotImplementedError(dummy + "solve" + ": Cannot solve " + str(eq))
if hint == 'default':
return _desolve(eq, func, ics=ics, hint=hints['default'], simplify=simplify,
prep=prep, x0=x0, classify=False, order=hints['order'],
match=hints[hints['default']], xi=xi, eta=eta, n=terms, type=type)
elif hint in ('all', 'all_Integral', 'best'):
retdict = {}
failedhints = {}
gethints = set(hints) - set(['order', 'default', 'ordered_hints'])
if hint == 'all_Integral':
for i in hints:
if i.endswith('_Integral'):
gethints.remove(i[:-len('_Integral')])
# special cases
for k in ["1st_homogeneous_coeff_best", "1st_power_series",
"lie_group", "2nd_power_series_ordinary", "2nd_power_series_regular"]:
if k in gethints:
gethints.remove(k)
for i in gethints:
sol = _desolve(eq, func, ics=ics, hint=i, x0=x0, simplify=simplify, prep=prep,
classify=False, n=terms, order=hints['order'], match=hints[i], type=type)
retdict[i] = sol
retdict['all'] = True
retdict['eq'] = eq
return retdict
elif hint not in allhints: # and hint not in ('default', 'ordered_hints'):
raise ValueError("Hint not recognized: " + hint)
elif hint not in hints:
raise ValueError(string + str(eq) + " does not match hint " + hint)
else:
# Key added to identify the hint needed to solve the equation
hints['hint'] = hint
hints.update({'func': func, 'eq': eq})
return hints
|
bsd-3-clause
|
HonzaKral/django
|
tests/utils_tests/test_timesince.py
|
44
|
5931
|
from __future__ import unicode_literals
import datetime
import unittest
from django.test.utils import requires_tz_support
from django.utils import timezone
from django.utils.timesince import timesince, timeuntil
class TimesinceTests(unittest.TestCase):
def setUp(self):
self.t = datetime.datetime(2007, 8, 14, 13, 46, 0)
self.onemicrosecond = datetime.timedelta(microseconds=1)
self.onesecond = datetime.timedelta(seconds=1)
self.oneminute = datetime.timedelta(minutes=1)
self.onehour = datetime.timedelta(hours=1)
self.oneday = datetime.timedelta(days=1)
self.oneweek = datetime.timedelta(days=7)
self.onemonth = datetime.timedelta(days=30)
self.oneyear = datetime.timedelta(days=365)
def test_equal_datetimes(self):
""" equal datetimes. """
# NOTE: \xa0 avoids wrapping between value and unit
self.assertEqual(timesince(self.t, self.t), '0\xa0minutes')
def test_ignore_microseconds_and_seconds(self):
""" Microseconds and seconds are ignored. """
self.assertEqual(timesince(self.t, self.t + self.onemicrosecond),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t + self.onesecond),
'0\xa0minutes')
def test_other_units(self):
""" Test other units. """
self.assertEqual(timesince(self.t, self.t + self.oneminute),
'1\xa0minute')
self.assertEqual(timesince(self.t, self.t + self.onehour), '1\xa0hour')
self.assertEqual(timesince(self.t, self.t + self.oneday), '1\xa0day')
self.assertEqual(timesince(self.t, self.t + self.oneweek), '1\xa0week')
self.assertEqual(timesince(self.t, self.t + self.onemonth),
'1\xa0month')
self.assertEqual(timesince(self.t, self.t + self.oneyear), '1\xa0year')
def test_multiple_units(self):
""" Test multiple units. """
self.assertEqual(timesince(self.t,
self.t + 2 * self.oneday + 6 * self.onehour), '2\xa0days, 6\xa0hours')
self.assertEqual(timesince(self.t,
self.t + 2 * self.oneweek + 2 * self.oneday), '2\xa0weeks, 2\xa0days')
def test_display_first_unit(self):
"""
If the two differing units aren't adjacent, only the first unit is
displayed.
"""
self.assertEqual(timesince(self.t,
self.t + 2 * self.oneweek + 3 * self.onehour + 4 * self.oneminute),
'2\xa0weeks')
self.assertEqual(timesince(self.t,
self.t + 4 * self.oneday + 5 * self.oneminute), '4\xa0days')
def test_display_second_before_first(self):
"""
When the second date occurs before the first, we should always
get 0 minutes.
"""
self.assertEqual(timesince(self.t, self.t - self.onemicrosecond),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.onesecond),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneminute),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.onehour),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneday),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneweek),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.onemonth),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneyear),
'0\xa0minutes')
self.assertEqual(timesince(self.t,
self.t - 2 * self.oneday - 6 * self.onehour), '0\xa0minutes')
self.assertEqual(timesince(self.t,
self.t - 2 * self.oneweek - 2 * self.oneday), '0\xa0minutes')
self.assertEqual(timesince(self.t,
self.t - 2 * self.oneweek - 3 * self.onehour - 4 * self.oneminute),
'0\xa0minutes')
self.assertEqual(timesince(self.t,
self.t - 4 * self.oneday - 5 * self.oneminute), '0\xa0minutes')
@requires_tz_support
def test_different_timezones(self):
""" When using two different timezones. """
now = datetime.datetime.now()
now_tz = timezone.make_aware(now, timezone.get_default_timezone())
now_tz_i = timezone.localtime(now_tz, timezone.get_fixed_timezone(195))
self.assertEqual(timesince(now), '0\xa0minutes')
self.assertEqual(timesince(now_tz), '0\xa0minutes')
self.assertEqual(timesince(now_tz_i), '0\xa0minutes')
self.assertEqual(timesince(now_tz, now_tz_i), '0\xa0minutes')
self.assertEqual(timeuntil(now), '0\xa0minutes')
self.assertEqual(timeuntil(now_tz), '0\xa0minutes')
self.assertEqual(timeuntil(now_tz_i), '0\xa0minutes')
self.assertEqual(timeuntil(now_tz, now_tz_i), '0\xa0minutes')
def test_date_objects(self):
""" Both timesince and timeuntil should work on date objects (#17937). """
today = datetime.date.today()
self.assertEqual(timesince(today + self.oneday), '0\xa0minutes')
self.assertEqual(timeuntil(today - self.oneday), '0\xa0minutes')
def test_both_date_objects(self):
""" Timesince should work with both date objects (#9672) """
today = datetime.date.today()
self.assertEqual(timeuntil(today + self.oneday, today), '1\xa0day')
self.assertEqual(timeuntil(today - self.oneday, today), '0\xa0minutes')
self.assertEqual(timeuntil(today + self.oneweek, today), '1\xa0week')
def test_naive_datetime_with_tzinfo_attribute(self):
class naive(datetime.tzinfo):
def utcoffset(self, dt):
return None
future = datetime.datetime(2080, 1, 1, tzinfo=naive())
self.assertEqual(timesince(future), '0\xa0minutes')
past = datetime.datetime(1980, 1, 1, tzinfo=naive())
self.assertEqual(timeuntil(past), '0\xa0minutes')
|
bsd-3-clause
|
Endika/hr
|
hr_payslip_ytd_amount/hr_payslip_line.py
|
27
|
1237
|
# -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Savoir-faire Linux. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
import openerp.addons.decimal_precision as dp
class hr_payslip_line(orm.Model):
_inherit = 'hr.payslip.line'
_columns = {
'total_ytd': fields.float(
'Total Year-to-date',
digits_compute=dp.get_precision('Payroll'))
}
|
agpl-3.0
|
VitalPet/odoo
|
addons/mail/res_partner.py
|
52
|
3485
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import fields, osv
class res_partner_mail(osv.Model):
""" Update partner to add a field about notification preferences """
_name = "res.partner"
_inherit = ['res.partner', 'mail.thread']
_mail_flat_thread = False
_columns = {
'notification_email_send': fields.selection([
('none', 'Never'),
('email', 'Incoming Emails only'),
('comment', 'Incoming Emails and Discussions'),
('all', 'All Messages (discussions, emails, followed system notifications)'),
], 'Receive Messages by Email', required=True,
help="Policy to receive emails for new messages pushed to your personal Inbox:\n"
"- Never: no emails are sent\n"
"- Incoming Emails only: for messages received by the system via email\n"
"- Incoming Emails and Discussions: for incoming emails along with internal discussions\n"
"- All Messages: for every notification you receive in your Inbox"),
}
_defaults = {
'notification_email_send': lambda *args: 'comment'
}
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(res_partner_mail, self).message_get_suggested_recipients(cr, uid, ids, context=context)
for partner in self.browse(cr, uid, ids, context=context):
self._message_add_suggested_recipient(cr, uid, recipients, partner, partner=partner, reason=_('Partner Profile'))
return recipients
def message_post(self, cr, uid, thread_id, **kwargs):
""" Override related to res.partner. In case of email message, set it as
private:
- add the target partner in the message partner_ids
- set thread_id as None, because this will trigger the 'private'
aspect of the message (model=False, res_id=False)
"""
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
if kwargs.get('type') == 'email':
partner_ids = kwargs.get('partner_ids', [])
if thread_id not in [command[1] for command in partner_ids]:
partner_ids.append((4, thread_id))
kwargs['partner_ids'] = partner_ids
thread_id = False
return super(res_partner_mail, self).message_post(cr, uid, thread_id, **kwargs)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
kavardak/suds
|
suds/options.py
|
32
|
5358
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Suds basic options classes.
"""
from suds.properties import *
from suds.wsse import Security
from suds.xsd.doctor import Doctor
from suds.transport import Transport
from suds.cache import Cache, NoCache
class TpLinker(AutoLinker):
"""
Transport (auto) linker used to manage linkage between
transport objects Properties and those Properties that contain them.
"""
def updated(self, properties, prev, next):
if isinstance(prev, Transport):
tp = Unskin(prev.options)
properties.unlink(tp)
if isinstance(next, Transport):
tp = Unskin(next.options)
properties.link(tp)
class Options(Skin):
"""
Options:
- B{cache} - The XML document cache. May be set (None) for no caching.
- type: L{Cache}
- default: L{NoCache}
- B{faults} - Raise faults raised by server,
else return tuple from service method invocation as (httpcode, object).
- type: I{bool}
- default: True
- B{service} - The default service name.
- type: I{str}
- default: None
- B{port} - The default service port name, not tcp port.
- type: I{str}
- default: None
- B{location} - This overrides the service port address I{URL} defined
in the WSDL.
- type: I{str}
- default: None
- B{transport} - The message transport.
- type: L{Transport}
- default: None
- B{soapheaders} - The soap headers to be included in the soap message.
- type: I{any}
- default: None
- B{wsse} - The web services I{security} provider object.
- type: L{Security}
- default: None
- B{doctor} - A schema I{doctor} object.
- type: L{Doctor}
- default: None
- B{xstq} - The B{x}ml B{s}chema B{t}ype B{q}ualified flag indicates
that the I{xsi:type} attribute values should be qualified by namespace.
- type: I{bool}
- default: True
- B{prefixes} - Elements of the soap message should be qualified (when needed)
using XML prefixes as opposed to xmlns="" syntax.
- type: I{bool}
- default: True
- B{retxml} - Flag that causes the I{raw} soap envelope to be returned instead
of the python object graph.
- type: I{bool}
- default: False
- B{prettyxml} - Flag that causes I{pretty} xml to be rendered when generating
the outbound soap envelope.
- type: I{bool}
- default: False
- B{autoblend} - Flag that ensures that the schema(s) defined within the
WSDL import each other.
- type: I{bool}
- default: False
- B{cachingpolicy} - The caching policy.
- type: I{int}
- 0 = Cache XML documents.
- 1 = Cache WSDL (pickled) object.
- default: 0
- B{plugins} - A plugin container.
- type: I{list}
- B{nosend} - Create the soap envelope but don't send.
When specified, method invocation returns a I{RequestContext}
instead of sending it.
- type: I{bool}
- default: False
"""
def __init__(self, **kwargs):
domain = __name__
definitions = [
Definition('cache', Cache, NoCache()),
Definition('faults', bool, True),
Definition('transport', Transport, None, TpLinker()),
Definition('service', (int, basestring), None),
Definition('port', (int, basestring), None),
Definition('location', basestring, None),
Definition('soapheaders', (), ()),
Definition('wsse', Security, None),
Definition('doctor', Doctor, None),
Definition('xstq', bool, True),
Definition('prefixes', bool, True),
Definition('retxml', bool, False),
Definition('prettyxml', bool, False),
Definition('autoblend', bool, False),
Definition('cachingpolicy', int, 0),
Definition('plugins', (list, tuple), []),
Definition('nosend', bool, False),
]
Skin.__init__(self, domain, definitions, kwargs)
|
lgpl-3.0
|
traveloka/ansible
|
lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py
|
12
|
34556
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ec2_vpc_nat_gateway
short_description: Manage AWS VPC NAT Gateways.
description:
- Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids.
version_added: "2.2"
requirements: [boto3, botocore]
options:
state:
description:
- Ensure NAT Gateway is present or absent.
required: false
default: "present"
choices: ["present", "absent"]
nat_gateway_id:
description:
- The id AWS dynamically allocates to the NAT Gateway on creation.
This is required when the absent option is present.
required: false
default: None
subnet_id:
description:
- The id of the subnet to create the NAT Gateway in. This is required
with the present option.
required: false
default: None
allocation_id:
description:
- The id of the elastic IP allocation. If this is not passed and the
eip_address is not passed. An EIP is generated for this NAT Gateway.
required: false
default: None
eip_address:
description:
- The elastic IP address of the EIP you want attached to this NAT Gateway.
If this is not passed and the allocation_id is not passed,
an EIP is generated for this NAT Gateway.
required: false
if_exist_do_not_create:
description:
- if a NAT Gateway exists already in the subnet_id, then do not create a new one.
required: false
default: false
release_eip:
description:
- Deallocate the EIP from the VPC.
- Option is only valid with the absent state.
- You should use this with the wait option. Since you can not release an address while a delete operation is happening.
required: false
default: true
wait:
description:
- Wait for operation to complete before returning.
required: false
default: false
wait_timeout:
description:
- How many seconds to wait for an operation to complete before timing out.
required: false
default: 300
client_token:
description:
- Optional unique token to be used during create to ensure idempotency.
When specifying this option, ensure you specify the eip_address parameter
as well otherwise any subsequent runs will fail.
required: false
author:
- "Allen Sanabria (@linuxdynasty)"
- "Jon Hadfield (@jonhadfield)"
- "Karen Cheng(@Etherdaemon)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create new nat gateway with client token.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
eip_address: 52.1.1.1
region: ap-southeast-2
client_token: abcd-12345678
register: new_nat_gateway
- name: Create new nat gateway using an allocation-id.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
allocation_id: eipalloc-12345678
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway, using an EIP address and wait for available status.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
eip_address: 52.1.1.1
wait: yes
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway and allocate new EIP.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
wait: yes
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
wait: yes
region: ap-southeast-2
if_exist_do_not_create: true
register: new_nat_gateway
- name: Delete nat gateway using discovered nat gateways from facts module.
ec2_vpc_nat_gateway:
state: absent
region: ap-southeast-2
wait: yes
nat_gateway_id: "{{ item.NatGatewayId }}"
release_eip: yes
register: delete_nat_gateway_result
with_items: "{{ gateways_to_remove.result }}"
- name: Delete nat gateway and wait for deleted status.
ec2_vpc_nat_gateway:
state: absent
nat_gateway_id: nat-12345678
wait: yes
wait_timeout: 500
region: ap-southeast-2
- name: Delete nat gateway and release EIP.
ec2_vpc_nat_gateway:
state: absent
nat_gateway_id: nat-12345678
release_eip: yes
wait: yes
wait_timeout: 300
region: ap-southeast-2
'''
RETURN = '''
create_time:
description: The ISO 8601 date time formatin UTC.
returned: In all cases.
type: string
sample: "2016-03-05T05:19:20.282000+00:00'"
nat_gateway_id:
description: id of the VPC NAT Gateway
returned: In all cases.
type: string
sample: "nat-0d1e3a878585988f8"
subnet_id:
description: id of the Subnet
returned: In all cases.
type: string
sample: "subnet-12345"
state:
description: The current state of the NAT Gateway.
returned: In all cases.
type: string
sample: "available"
vpc_id:
description: id of the VPC.
returned: In all cases.
type: string
sample: "vpc-12345"
nat_gateway_addresses:
description: List of dictionairies containing the public_ip, network_interface_id, private_ip, and allocation_id.
returned: In all cases.
type: string
sample: [
{
'public_ip': '52.52.52.52',
'network_interface_id': 'eni-12345',
'private_ip': '10.0.0.100',
'allocation_id': 'eipalloc-12345'
}
]
'''
try:
import botocore
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
import datetime
import random
import re
import time
from dateutil.tz import tzutc
DRY_RUN_GATEWAYS = [
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "available",
"create_time": "2016-03-05T05:19:20.282000+00:00",
"vpc_id": "vpc-12345678"
}
]
DRY_RUN_GATEWAY_UNCONVERTED = [
{
'VpcId': 'vpc-12345678',
'State': 'available',
'NatGatewayId': 'nat-123456789',
'SubnetId': 'subnet-123456789',
'NatGatewayAddresses': [
{
'PublicIp': '55.55.55.55',
'NetworkInterfaceId': 'eni-1234567',
'AllocationId': 'eipalloc-1234567',
'PrivateIp': '10.0.0.102'
}
],
'CreateTime': datetime.datetime(2016, 3, 5, 5, 19, 20, 282000, tzinfo=tzutc())
}
]
DRY_RUN_ALLOCATION_UNCONVERTED = {
'Addresses': [
{
'PublicIp': '55.55.55.55',
'Domain': 'vpc',
'AllocationId': 'eipalloc-1234567'
}
]
}
DRY_RUN_MSGS = 'DryRun Mode:'
def convert_to_lower(data):
"""Convert all uppercase keys in dict with lowercase_
Args:
data (dict): Dictionary with keys that have upper cases in them
Example.. FooBar == foo_bar
if a val is of type datetime.datetime, it will be converted to
the ISO 8601
Basic Usage:
>>> test = {'FooBar': []}
>>> test = convert_to_lower(test)
{
'foo_bar': []
}
Returns:
Dictionary
"""
results = dict()
if isinstance(data, dict):
for key, val in data.items():
key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower()
if key[0] == '_':
key = key[1:]
if isinstance(val, datetime.datetime):
results[key] = val.isoformat()
elif isinstance(val, dict):
results[key] = convert_to_lower(val)
elif isinstance(val, list):
converted = list()
for item in val:
converted.append(convert_to_lower(item))
results[key] = converted
else:
results[key] = val
return results
def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None,
states=None, check_mode=False):
"""Retrieve a list of NAT Gateways
Args:
client (botocore.client.EC2): Boto3 client
Kwargs:
subnet_id (str): The subnet_id the nat resides in.
nat_gateway_id (str): The Amazon nat id.
states (list): States available (pending, failed, available, deleting, and deleted)
default=None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-12345678'
>>> get_nat_gateways(client, subnet_id)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-12345678"
}
Returns:
Tuple (bool, str, list)
"""
params = dict()
err_msg = ""
gateways_retrieved = False
existing_gateways = list()
if not states:
states = ['available', 'pending']
if nat_gateway_id:
params['NatGatewayIds'] = [nat_gateway_id]
else:
params['Filter'] = [
{
'Name': 'subnet-id',
'Values': [subnet_id]
},
{
'Name': 'state',
'Values': states
}
]
try:
if not check_mode:
gateways = client.describe_nat_gateways(**params)['NatGateways']
if gateways:
for gw in gateways:
existing_gateways.append(convert_to_lower(gw))
gateways_retrieved = True
else:
gateways_retrieved = True
if nat_gateway_id:
if DRY_RUN_GATEWAYS[0]['nat_gateway_id'] == nat_gateway_id:
existing_gateways = DRY_RUN_GATEWAYS
elif subnet_id:
if DRY_RUN_GATEWAYS[0]['subnet_id'] == subnet_id:
existing_gateways = DRY_RUN_GATEWAYS
err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return gateways_retrieved, err_msg, existing_gateways
def wait_for_status(client, wait_timeout, nat_gateway_id, status,
check_mode=False):
"""Wait for the NAT Gateway to reach a status
Args:
client (botocore.client.EC2): Boto3 client
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
nat_gateway_id (str): The Amazon nat id.
status (str): The status to wait for.
examples. status=available, status=deleted
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-12345678'
>>> allocation_id = 'eipalloc-12345678'
>>> wait_for_status(client, subnet_id, allocation_id)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-1234567",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-12345678"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-12345677"
}
]
Returns:
Tuple (bool, str, dict)
"""
polling_increment_secs = 5
wait_timeout = time.time() + wait_timeout
status_achieved = False
nat_gateway = dict()
states = ['pending', 'failed', 'available', 'deleting', 'deleted']
err_msg = ""
while wait_timeout > time.time():
try:
gws_retrieved, err_msg, nat_gateways = (
get_nat_gateways(
client, nat_gateway_id=nat_gateway_id,
states=states, check_mode=check_mode
)
)
if gws_retrieved and nat_gateways:
nat_gateway = nat_gateways[0]
if check_mode:
nat_gateway['state'] = status
if nat_gateway.get('state') == status:
status_achieved = True
break
elif nat_gateway.get('state') == 'failed':
err_msg = nat_gateway.get('failure_message')
break
elif nat_gateway.get('state') == 'pending':
if 'failure_message' in nat_gateway:
err_msg = nat_gateway.get('failure_message')
status_achieved = False
break
else:
time.sleep(polling_increment_secs)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
if not status_achieved:
err_msg = "Wait time out reached, while waiting for results"
return status_achieved, err_msg, nat_gateway
def gateway_in_subnet_exists(client, subnet_id, allocation_id=None,
check_mode=False):
"""Retrieve all NAT Gateways for a subnet.
Args:
subnet_id (str): The subnet_id the nat resides in.
Kwargs:
allocation_id (str): The EIP Amazon identifier.
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-1234567'
>>> allocation_id = 'eipalloc-1234567'
>>> gateway_in_subnet_exists(client, subnet_id, allocation_id)
(
[
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-1234567"
}
],
False
)
Returns:
Tuple (list, bool)
"""
allocation_id_exists = False
gateways = []
states = ['available', 'pending']
gws_retrieved, _, gws = (
get_nat_gateways(
client, subnet_id, states=states, check_mode=check_mode
)
)
if not gws_retrieved:
return gateways, allocation_id_exists
for gw in gws:
for address in gw['nat_gateway_addresses']:
if allocation_id:
if address.get('allocation_id') == allocation_id:
allocation_id_exists = True
gateways.append(gw)
else:
gateways.append(gw)
return gateways, allocation_id_exists
def get_eip_allocation_id_by_address(client, eip_address, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
eip_address (str): The Elastic IP Address of the EIP.
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> eip_address = '52.87.29.36'
>>> get_eip_allocation_id_by_address(client, eip_address)
'eipalloc-36014da3'
Returns:
Tuple (str, str)
"""
params = {
'PublicIps': [eip_address],
}
allocation_id = None
err_msg = ""
try:
if not check_mode:
allocations = client.describe_addresses(**params)['Addresses']
if len(allocations) == 1:
allocation = allocations[0]
else:
allocation = None
else:
dry_run_eip = (
DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]['PublicIp']
)
if dry_run_eip == eip_address:
allocation = DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]
else:
allocation = None
if allocation:
if allocation.get('Domain') != 'vpc':
err_msg = (
"EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP"
.format(eip_address)
)
else:
allocation_id = allocation.get('AllocationId')
else:
err_msg = (
"EIP {0} does not exist".format(eip_address)
)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return allocation_id, err_msg
def allocate_eip_address(client, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> allocate_eip_address(client)
True
Returns:
Tuple (bool, str)
"""
ip_allocated = False
new_eip = None
err_msg = ''
params = {
'Domain': 'vpc',
}
try:
if check_mode:
ip_allocated = True
random_numbers = (
''.join(str(x) for x in random.sample(range(0, 9), 7))
)
new_eip = 'eipalloc-{0}'.format(random_numbers)
else:
new_eip = client.allocate_address(**params)['AllocationId']
ip_allocated = True
err_msg = 'eipalloc id {0} created'.format(new_eip)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return ip_allocated, err_msg, new_eip
def release_address(client, allocation_id, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
allocation_id (str): The eip Amazon identifier.
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> allocation_id = "eipalloc-123456"
>>> release_address(client, allocation_id)
True
Returns:
Boolean, string
"""
err_msg = ''
if check_mode:
return True, ''
ip_released = False
params = {
'AllocationId': allocation_id,
}
try:
client.release_address(**params)
ip_released = True
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return ip_released, err_msg
def create(client, subnet_id, allocation_id, client_token=None,
wait=False, wait_timeout=0, if_exist_do_not_create=False,
check_mode=False):
"""Create an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
subnet_id (str): The subnet_id the nat resides in.
allocation_id (str): The eip Amazon identifier.
Kwargs:
if_exist_do_not_create (bool): if a nat gateway already exists in this
subnet, than do not create another one.
default = False
wait (bool): Wait for the nat to be in the deleted state before returning.
default = False
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
default = 0
client_token (str):
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-1234567'
>>> allocation_id = 'eipalloc-1234567'
>>> create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-1234567",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-1234567"
}
]
Returns:
Tuple (bool, str, list)
"""
params = {
'SubnetId': subnet_id,
'AllocationId': allocation_id
}
request_time = datetime.datetime.utcnow()
changed = False
success = False
token_provided = False
err_msg = ""
if client_token:
token_provided = True
params['ClientToken'] = client_token
try:
if not check_mode:
result = client.create_nat_gateway(**params)["NatGateway"]
else:
result = DRY_RUN_GATEWAY_UNCONVERTED[0]
result['CreateTime'] = datetime.datetime.utcnow()
result['NatGatewayAddresses'][0]['AllocationId'] = allocation_id
result['SubnetId'] = subnet_id
success = True
changed = True
create_time = result['CreateTime'].replace(tzinfo=None)
if token_provided and (request_time > create_time):
changed = False
elif wait:
success, err_msg, result = (
wait_for_status(
client, wait_timeout, result['NatGatewayId'], 'available',
check_mode=check_mode
)
)
if success:
err_msg = (
'NAT gateway {0} created'.format(result['nat_gateway_id'])
)
except botocore.exceptions.ClientError as e:
if "IdempotentParameterMismatch" in e.message:
err_msg = (
'NAT Gateway does not support update and token has already been provided'
)
else:
err_msg = str(e)
success = False
changed = False
result = None
return success, changed, err_msg, result
def pre_create(client, subnet_id, allocation_id=None, eip_address=None,
if_exist_do_not_create=False, wait=False, wait_timeout=0,
client_token=None, check_mode=False):
"""Create an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
subnet_id (str): The subnet_id the nat resides in.
Kwargs:
allocation_id (str): The EIP Amazon identifier.
default = None
eip_address (str): The Elastic IP Address of the EIP.
default = None
if_exist_do_not_create (bool): if a nat gateway already exists in this
subnet, than do not create another one.
default = False
wait (bool): Wait for the nat to be in the deleted state before returning.
default = False
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
default = 0
client_token (str):
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-w4t12897'
>>> allocation_id = 'eipalloc-36014da3'
>>> pre_create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
[
true,
"",
{
"nat_gateway_id": "nat-03835afb6e31df79b",
"subnet_id": "subnet-w4t12897",
"nat_gateway_addresses": [
{
"public_ip": "52.87.29.36",
"network_interface_id": "eni-5579742d",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-36014da3"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-w68571b5"
}
]
Returns:
Tuple (bool, bool, str, list)
"""
success = False
changed = False
err_msg = ""
results = list()
if not allocation_id and not eip_address:
existing_gateways, allocation_id_exists = (
gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode)
)
if len(existing_gateways) > 0 and if_exist_do_not_create:
success = True
changed = False
results = existing_gateways[0]
err_msg = (
'NAT Gateway {0} already exists in subnet_id {1}'
.format(
existing_gateways[0]['nat_gateway_id'], subnet_id
)
)
return success, changed, err_msg, results
else:
success, err_msg, allocation_id = (
allocate_eip_address(client, check_mode=check_mode)
)
if not success:
return success, 'False', err_msg, dict()
elif eip_address or allocation_id:
if eip_address and not allocation_id:
allocation_id, err_msg = (
get_eip_allocation_id_by_address(
client, eip_address, check_mode=check_mode
)
)
if not allocation_id:
success = False
changed = False
return success, changed, err_msg, dict()
existing_gateways, allocation_id_exists = (
gateway_in_subnet_exists(
client, subnet_id, allocation_id, check_mode=check_mode
)
)
if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create):
success = True
changed = False
results = existing_gateways[0]
err_msg = (
'NAT Gateway {0} already exists in subnet_id {1}'
.format(
existing_gateways[0]['nat_gateway_id'], subnet_id
)
)
return success, changed, err_msg, results
success, changed, err_msg, results = create(
client, subnet_id, allocation_id, client_token,
wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode
)
return success, changed, err_msg, results
def remove(client, nat_gateway_id, wait=False, wait_timeout=0,
release_eip=False, check_mode=False):
"""Delete an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
nat_gateway_id (str): The Amazon nat id.
Kwargs:
wait (bool): Wait for the nat to be in the deleted state before returning.
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc.
Basic Usage:
>>> client = boto3.client('ec2')
>>> nat_gw_id = 'nat-03835afb6e31df79b'
>>> remove(client, nat_gw_id, wait=True, wait_timeout=500, release_eip=True)
[
true,
"",
{
"nat_gateway_id": "nat-03835afb6e31df79b",
"subnet_id": "subnet-w4t12897",
"nat_gateway_addresses": [
{
"public_ip": "52.87.29.36",
"network_interface_id": "eni-5579742d",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-36014da3"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-w68571b5"
}
]
Returns:
Tuple (bool, str, list)
"""
params = {
'NatGatewayId': nat_gateway_id
}
success = False
changed = False
err_msg = ""
results = list()
states = ['pending', 'available' ]
try:
exist, _, gw = (
get_nat_gateways(
client, nat_gateway_id=nat_gateway_id,
states=states, check_mode=check_mode
)
)
if exist and len(gw) == 1:
results = gw[0]
if not check_mode:
client.delete_nat_gateway(**params)
allocation_id = (
results['nat_gateway_addresses'][0]['allocation_id']
)
changed = True
success = True
err_msg = (
'NAT gateway {0} is in a deleting state. Delete was successful'
.format(nat_gateway_id)
)
if wait:
status_achieved, err_msg, results = (
wait_for_status(
client, wait_timeout, nat_gateway_id, 'deleted',
check_mode=check_mode
)
)
if status_achieved:
err_msg = (
'NAT gateway {0} was deleted successfully'
.format(nat_gateway_id)
)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
if release_eip:
eip_released, eip_err = (
release_address(client, allocation_id, check_mode)
)
if not eip_released:
err_msg = (
"{0}: Failed to release EIP {1}: {2}"
.format(err_msg, allocation_id, eip_err)
)
success = False
return success, changed, err_msg, results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
subnet_id=dict(type='str'),
eip_address=dict(type='str'),
allocation_id=dict(type='str'),
if_exist_do_not_create=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=320, required=False),
release_eip=dict(type='bool', default=False),
nat_gateway_id=dict(type='str'),
client_token=dict(type='str'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['allocation_id', 'eip_address']
]
)
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='botocore/boto3 is required.')
state = module.params.get('state').lower()
check_mode = module.check_mode
subnet_id = module.params.get('subnet_id')
allocation_id = module.params.get('allocation_id')
eip_address = module.params.get('eip_address')
nat_gateway_id = module.params.get('nat_gateway_id')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
release_eip = module.params.get('release_eip')
client_token = module.params.get('client_token')
if_exist_do_not_create = module.params.get('if_exist_do_not_create')
try:
region, ec2_url, aws_connect_kwargs = (
get_aws_connection_info(module, boto3=True)
)
client = (
boto3_conn(
module, conn_type='client', resource='ec2',
region=region, endpoint=ec2_url, **aws_connect_kwargs
)
)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
changed = False
err_msg = ''
if state == 'present':
if not subnet_id:
module.fail_json(msg='subnet_id is required for creation')
success, changed, err_msg, results = (
pre_create(
client, subnet_id, allocation_id, eip_address,
if_exist_do_not_create, wait, wait_timeout,
client_token, check_mode=check_mode
)
)
else:
if not nat_gateway_id:
module.fail_json(msg='nat_gateway_id is required for removal')
else:
success, changed, err_msg, results = (
remove(
client, nat_gateway_id, wait, wait_timeout, release_eip,
check_mode=check_mode
)
)
if not success:
module.fail_json(
msg=err_msg, success=success, changed=changed
)
else:
module.exit_json(
msg=err_msg, success=success, changed=changed, **results
)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
hkariti/ansible
|
test/units/modules/network/nxos/nxos_module.py
|
46
|
3583
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
from units.modules.utils import set_module_args as _set_module_args
def set_module_args(args):
if 'provider' not in args:
args['provider'] = {'transport': args.get('transport') or 'cli'}
return _set_module_args(args)
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(module_name, name, device=''):
path = os.path.join(fixture_path, module_name, device, name)
if not os.path.exists(path):
path = os.path.join(fixture_path, module_name, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class TestNxosModule(ModuleTestCase):
def execute_module_devices(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
module_name = self.module.__name__.rsplit('.', 1)[1]
local_fixture_path = os.path.join(fixture_path, module_name)
models = []
for path in os.listdir(local_fixture_path):
path = os.path.join(local_fixture_path, path)
if os.path.isdir(path):
models.append(os.path.basename(path))
if not models:
models = ['']
retvals = {}
for model in models:
retvals[model] = self.execute_module(failed, changed, commands, sort, device=model)
return retvals
def execute_module(self, failed=False, changed=False, commands=None, sort=True, device=''):
self.load_fixtures(commands, device=device)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None, device=''):
pass
|
gpl-3.0
|
ryfeus/lambda-packs
|
Tensorflow_LightGBM_Scipy_nightly/source/numpy/distutils/npy_pkg_config.py
|
66
|
13243
|
from __future__ import division, absolute_import, print_function
import sys
import re
import os
if sys.version_info[0] < 3:
from ConfigParser import RawConfigParser, NoOptionError
else:
from configparser import RawConfigParser, NoOptionError
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
'read_config', 'parse_flags']
_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}')
class FormatError(IOError):
"""
Exception thrown when there is a problem parsing a configuration file.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class PkgNotFound(IOError):
"""Exception raised when a package can not be located."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def parse_flags(line):
"""
Parse a line from a config file containing compile flags.
Parameters
----------
line : str
A single line containing one or more compile flags.
Returns
-------
d : dict
Dictionary of parsed flags, split into relevant categories.
These categories are the keys of `d`:
* 'include_dirs'
* 'library_dirs'
* 'libraries'
* 'macros'
* 'ignored'
"""
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
'macros': [], 'ignored': []}
flags = (' ' + line).split(' -')
for flag in flags:
flag = '-' + flag
if len(flag) > 0:
if flag.startswith('-I'):
d['include_dirs'].append(flag[2:].strip())
elif flag.startswith('-L'):
d['library_dirs'].append(flag[2:].strip())
elif flag.startswith('-l'):
d['libraries'].append(flag[2:].strip())
elif flag.startswith('-D'):
d['macros'].append(flag[2:].strip())
else:
d['ignored'].append(flag)
return d
def _escape_backslash(val):
return val.replace('\\', '\\\\')
class LibraryInfo(object):
"""
Object containing build information about a library.
Parameters
----------
name : str
The library name.
description : str
Description of the library.
version : str
Version string.
sections : dict
The sections of the configuration file for the library. The keys are
the section headers, the values the text under each header.
vars : class instance
A `VariableSet` instance, which contains ``(name, value)`` pairs for
variables defined in the configuration file for the library.
requires : sequence, optional
The required libraries for the library to be installed.
Notes
-----
All input parameters (except "sections" which is a method) are available as
attributes of the same name.
"""
def __init__(self, name, description, version, sections, vars, requires=None):
self.name = name
self.description = description
if requires:
self.requires = requires
else:
self.requires = []
self.version = version
self._sections = sections
self.vars = vars
def sections(self):
"""
Return the section headers of the config file.
Parameters
----------
None
Returns
-------
keys : list of str
The list of section headers.
"""
return list(self._sections.keys())
def cflags(self, section="default"):
val = self.vars.interpolate(self._sections[section]['cflags'])
return _escape_backslash(val)
def libs(self, section="default"):
val = self.vars.interpolate(self._sections[section]['libs'])
return _escape_backslash(val)
def __str__(self):
m = ['Name: %s' % self.name, 'Description: %s' % self.description]
if self.requires:
m.append('Requires:')
else:
m.append('Requires: %s' % ",".join(self.requires))
m.append('Version: %s' % self.version)
return "\n".join(m)
class VariableSet(object):
"""
Container object for the variables defined in a config file.
`VariableSet` can be used as a plain dictionary, with the variable names
as keys.
Parameters
----------
d : dict
Dict of items in the "variables" section of the configuration file.
"""
def __init__(self, d):
self._raw_data = dict([(k, v) for k, v in d.items()])
self._re = {}
self._re_sub = {}
self._init_parse()
def _init_parse(self):
for k, v in self._raw_data.items():
self._init_parse_var(k, v)
def _init_parse_var(self, name, value):
self._re[name] = re.compile(r'\$\{%s\}' % name)
self._re_sub[name] = value
def interpolate(self, value):
# Brute force: we keep interpolating until there is no '${var}' anymore
# or until interpolated string is equal to input string
def _interpolate(value):
for k in self._re.keys():
value = self._re[k].sub(self._re_sub[k], value)
return value
while _VAR.search(value):
nvalue = _interpolate(value)
if nvalue == value:
break
value = nvalue
return value
def variables(self):
"""
Return the list of variable names.
Parameters
----------
None
Returns
-------
names : list of str
The names of all variables in the `VariableSet` instance.
"""
return list(self._raw_data.keys())
# Emulate a dict to set/get variables values
def __getitem__(self, name):
return self._raw_data[name]
def __setitem__(self, name, value):
self._raw_data[name] = value
self._init_parse_var(name, value)
def parse_meta(config):
if not config.has_section('meta'):
raise FormatError("No meta section found !")
d = {}
for name, value in config.items('meta'):
d[name] = value
for k in ['name', 'description', 'version']:
if not k in d:
raise FormatError("Option %s (section [meta]) is mandatory, "
"but not found" % k)
if not 'requires' in d:
d['requires'] = []
return d
def parse_variables(config):
if not config.has_section('variables'):
raise FormatError("No variables section found !")
d = {}
for name, value in config.items("variables"):
d[name] = value
return VariableSet(d)
def parse_sections(config):
return meta_d, r
def pkg_to_filename(pkg_name):
return "%s.ini" % pkg_name
def parse_config(filename, dirs=None):
if dirs:
filenames = [os.path.join(d, filename) for d in dirs]
else:
filenames = [filename]
config = RawConfigParser()
n = config.read(filenames)
if not len(n) >= 1:
raise PkgNotFound("Could not find file(s) %s" % str(filenames))
# Parse meta and variables sections
meta = parse_meta(config)
vars = {}
if config.has_section('variables'):
for name, value in config.items("variables"):
vars[name] = _escape_backslash(value)
# Parse "normal" sections
secs = [s for s in config.sections() if not s in ['meta', 'variables']]
sections = {}
requires = {}
for s in secs:
d = {}
if config.has_option(s, "requires"):
requires[s] = config.get(s, 'requires')
for name, value in config.items(s):
d[name] = value
sections[s] = d
return meta, vars, sections, requires
def _read_config_imp(filenames, dirs=None):
def _read_config(f):
meta, vars, sections, reqs = parse_config(f, dirs)
# recursively add sections and variables of required libraries
for rname, rvalue in reqs.items():
nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue))
# Update var dict for variables not in 'top' config file
for k, v in nvars.items():
if not k in vars:
vars[k] = v
# Update sec dict
for oname, ovalue in nsections[rname].items():
if ovalue:
sections[rname][oname] += ' %s' % ovalue
return meta, vars, sections, reqs
meta, vars, sections, reqs = _read_config(filenames)
# FIXME: document this. If pkgname is defined in the variables section, and
# there is no pkgdir variable defined, pkgdir is automatically defined to
# the path of pkgname. This requires the package to be imported to work
if not 'pkgdir' in vars and "pkgname" in vars:
pkgname = vars["pkgname"]
if not pkgname in sys.modules:
raise ValueError("You should import %s to get information on %s" %
(pkgname, meta["name"]))
mod = sys.modules[pkgname]
vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__))
return LibraryInfo(name=meta["name"], description=meta["description"],
version=meta["version"], sections=sections, vars=VariableSet(vars))
# Trivial cache to cache LibraryInfo instances creation. To be really
# efficient, the cache should be handled in read_config, since a same file can
# be parsed many time outside LibraryInfo creation, but I doubt this will be a
# problem in practice
_CACHE = {}
def read_config(pkgname, dirs=None):
"""
Return library info for a package from its configuration file.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of directories - usually including
the NumPy base directory - where to look for npy-pkg-config files.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
misc_util.get_info, misc_util.get_pkg_info
Examples
--------
>>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath')
>>> type(npymath_info)
<class 'numpy.distutils.npy_pkg_config.LibraryInfo'>
>>> print(npymath_info)
Name: npymath
Description: Portable, core math library implementing C99 standard
Requires:
Version: 0.1 #random
"""
try:
return _CACHE[pkgname]
except KeyError:
v = _read_config_imp(pkg_to_filename(pkgname), dirs)
_CACHE[pkgname] = v
return v
# TODO:
# - implements version comparison (modversion + atleast)
# pkg-config simple emulator - useful for debugging, and maybe later to query
# the system
if __name__ == '__main__':
import sys
from optparse import OptionParser
import glob
parser = OptionParser()
parser.add_option("--cflags", dest="cflags", action="store_true",
help="output all preprocessor and compiler flags")
parser.add_option("--libs", dest="libs", action="store_true",
help="output all linker flags")
parser.add_option("--use-section", dest="section",
help="use this section instead of default for options")
parser.add_option("--version", dest="version", action="store_true",
help="output version")
parser.add_option("--atleast-version", dest="min_version",
help="Minimal version")
parser.add_option("--list-all", dest="list_all", action="store_true",
help="Minimal version")
parser.add_option("--define-variable", dest="define_variable",
help="Replace variable with the given value")
(options, args) = parser.parse_args(sys.argv)
if len(args) < 2:
raise ValueError("Expect package name on the command line:")
if options.list_all:
files = glob.glob("*.ini")
for f in files:
info = read_config(f)
print("%s\t%s - %s" % (info.name, info.name, info.description))
pkg_name = args[1]
import os
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
else:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.'])
if options.section:
section = options.section
else:
section = "default"
if options.define_variable:
m = re.search(r'([\S]+)=([\S]+)', options.define_variable)
if not m:
raise ValueError("--define-variable option should be of " \
"the form --define-variable=foo=bar")
else:
name = m.group(1)
value = m.group(2)
info.vars[name] = value
if options.cflags:
print(info.cflags(section))
if options.libs:
print(info.libs(section))
if options.version:
print(info.version)
if options.min_version:
print(info.version >= options.min_version)
|
mit
|
Lektorium-LLC/edx-platform
|
lms/djangoapps/instructor/tests/test_enrollment_store_provider.py
|
10
|
2740
|
"""
Exercises tests on the base_store_provider file
"""
from django.test import TestCase
from lms.djangoapps.instructor.enrollment_report import AbstractEnrollmentReportProvider
from lms.djangoapps.instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
class BadImplementationAbstractEnrollmentReportProvider(AbstractEnrollmentReportProvider):
"""
Test implementation of EnrollmentProvider to assert that non-implementations of methods
raises the correct methods
"""
def get_user_profile(self, user_id):
"""
Fake implementation of method which calls base class, which should throw NotImplementedError
"""
super(BadImplementationAbstractEnrollmentReportProvider, self).get_user_profile(user_id)
def get_enrollment_info(self, user, course_id):
"""
Fake implementation of method which calls base class, which should throw NotImplementedError
"""
super(BadImplementationAbstractEnrollmentReportProvider, self).get_enrollment_info(user, course_id)
def get_payment_info(self, user, course_id):
"""
Fake implementation of method which calls base class, which should throw NotImplementedError
"""
super(BadImplementationAbstractEnrollmentReportProvider, self).get_payment_info(user, course_id)
class TestBaseNotificationDataProvider(TestCase):
"""
Cover the EnrollmentReportProvider class
"""
def test_cannot_create_instance(self):
"""
EnrollmentReportProvider is an abstract class and we should not be able
to create an instance of it
"""
with self.assertRaises(TypeError):
# parent of the BaseEnrollmentReportProvider is EnrollmentReportProvider
super(BadImplementationAbstractEnrollmentReportProvider, self)
def test_get_provider(self):
"""
Makes sure we get an instance of the registered enrollment provider
"""
provider = PaidCourseEnrollmentReportProvider()
self.assertIsNotNone(provider)
self.assertTrue(isinstance(provider, PaidCourseEnrollmentReportProvider))
def test_base_methods_exceptions(self):
"""
Asserts that all base-methods on the EnrollmentProvider interface will throw
an NotImplementedError
"""
bad_provider = BadImplementationAbstractEnrollmentReportProvider()
with self.assertRaises(NotImplementedError):
bad_provider.get_enrollment_info(None, None)
with self.assertRaises(NotImplementedError):
bad_provider.get_payment_info(None, None)
with self.assertRaises(NotImplementedError):
bad_provider.get_user_profile(None)
|
agpl-3.0
|
felixc/transcribe
|
transcribe.py
|
1
|
12620
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2013–2015 Felix Crux <[email protected]>
# Released under the terms of the MIT (Expat) License (see LICENSE for details)
#
"""Generate static HTML from Django templates and YAML content."""
import argparse
import collections
import datetime
import imp
import json
import math
import os
import os.path
import shutil
import sys
import yaml
from django.conf import settings
import django.template
import django.template.loader
import django.utils.feedgenerator
DEFAULT_CONF = {
'context': {},
'content': os.path.join(os.getcwd(), 'content'),
'templates': os.path.join(os.getcwd(), 'templates'),
'output': os.path.join(os.getcwd(), 'out'),
'static': [],
'meta': {}
}
DEBUG = False # Ugly, but I really don't want to thread conf['debug'] through
# all the possible paths/layers that wind up at Context()
class RssFeed(django.utils.feedgenerator.Rss201rev2Feed):
"""Wrapper around Django's RSS feed class that uses transcribe's config."""
def __init__(self, root, items, config, *args, **kwargs):
super(RssFeed, self).__init__(
title=config['title'],
link=config['link'],
description=config['desc'],
feed_url=config['link'] + '/' + root + '/rss.xml',
*args, **kwargs)
for item in items:
self.add_item(
title=item[config['item_title']],
link=config['link'] + '/' + root + '/' + item['slug'],
unique_id=item['slug'],
unique_id_is_permalink=False,
pubdate=item[config['item_pub_date']],
description=(item[config['item_desc']] +
'<p><a href="' + config['link'] + '/' + root +
'/' + item['slug'] + '">Read more...</a></p>')
# TODO: Get rid of that hardcoded 'Read more' string.
)
class Context(django.template.Context):
"""Custom Django template context that includes extra helper variables."""
_context = {}
def __init__(self, root, content):
content['root'] = root
super(Context, self).__init__(content)
def output_context_to_template(context, template_path, output_path):
"""Write the given context to the output path using a template.
The 'template_path' parameter may be a list of templates to look for."""
with open(output_path, 'w') as output:
template_path = \
template_path if type(template_path) == list else [template_path]
template = django.template.loader.select_template(template_path)
output.write(template.render(context))
def output_item(item, item_root, output_root):
"""Transcribe the given item to HTML using the appropriate template."""
out_file_name = item['slug'] + '.html'
template = os.path.join(item_root, 'item.html')
specialized_template = os.path.join(item_root, out_file_name)
output_context_to_template(
Context(item_root, item),
[specialized_template, template],
os.path.join(output_root, out_file_name))
def output_archive(all_items, item_root, output_root, archive_by):
"""Arrange items into a date-based hierarchy and output to template."""
# TODO: Get rid of this gross copy/sort hack.
# TODO: Come up with a more sensible structure for the date hierarchy.
archive = collections.defaultdict(lambda: collections.defaultdict(list))
for item in all_items:
date = item[archive_by]
year = date.strftime('%Y')
month = datetime.date(date.year, date.month, 1)
archive[year][month].append(item)
sorted_archive = []
for year in archive:
sorted_months = []
for month in archive[year]:
sorted_months.append(
{'month': month, 'posts': archive[year][month]})
sorted_archive.append({'year': year, 'months': sorted_months})
sorted_archive[-1]['months'].sort(key=lambda v: v['month'])
sorted_archive.sort(key=lambda v: v['year'])
archive_root = os.path.join(output_root, 'archives')
os.mkdir(archive_root)
output_context_to_template(
Context(item_root,
{'all_items': all_items, item_root: sorted_archive}),
os.path.join(item_root, 'archive.html'),
os.path.join(archive_root, 'index.html'))
for year in set(item['year'] for item in sorted_archive):
year_root = os.path.join(archive_root, str(year))
year_items = [i for i in sorted_archive if i['year'] == year]
os.mkdir(year_root)
output_context_to_template(
Context(item_root,
{'all_items': all_items, item_root: year_items}),
os.path.join(item_root, 'archive.html'),
os.path.join(year_root, 'index.html'))
for month in set(item['month'] for item in year_items[0]['months']):
month_name = month.strftime('%b').lower()
month_root = os.path.join(year_root, month_name)
month_items = [i for i in year_items[0]['months']
if i['month'] == month]
os.mkdir(month_root)
output_context_to_template(
Context(item_root, {
'all_items': all_items,
item_root: [{'year': year, 'months': month_items}]}),
os.path.join(item_root, 'archive.html'),
os.path.join(month_root, 'index.html'))
def output_linkables(all_items, item_root, output_root, linkable_attrs):
"""Write lists of items according to their linkable attributes."""
# TODO: Shouldn't necessitate this kind of copy.
linkables = collections.defaultdict(lambda: collections.defaultdict(list))
for content in all_items:
for attr in linkable_attrs:
for attr_value in content[attr]:
linkables[attr][attr_value].append(content)
for attr in linkables:
attr_root = os.path.join(output_root, attr)
os.mkdir(attr_root)
for attr_value in linkables[attr]:
output_context_to_template(
Context(item_root, {
'context': 'Posts Tagged "' + attr_value + '"',
item_root.split('/')[-1]: linkables[attr][attr_value]}),
os.path.join(item_root, 'list.html'),
os.path.join(attr_root, attr_value + '.html'))
def output_feed(all_items, item_root, output_root, config):
"""Produce an RSS feed of the items."""
feed = RssFeed(item_root, all_items[:config['num_items']], config)
with open(os.path.join(output_root, 'rss.xml'), 'w') as out_file:
feed.write(out_file, 'utf-8')
def output_all(all_items, item_root, output_root, config):
"""Perform all of the templated output."""
for item in all_items:
output_item(item, item_root, output_root)
if item_root not in config:
return
if 'order_by' in config[item_root]:
all_items.sort(key=lambda v: v[config[item_root]['order_by']])
all_items.reverse()
if 'linkable_by' in config[item_root]:
output_linkables(all_items, item_root, output_root,
config[item_root]['linkable_by'])
if 'archive_by' in config[item_root]:
output_archive(all_items, item_root, output_root,
config[item_root]['archive_by'])
if 'feed' in config[item_root]:
output_feed(all_items, item_root, output_root,
config[item_root]['feed'])
num_per_page = config[item_root].get('num_per_page', len(all_items))
paginator = paginate(num_per_page, all_items)
for page_num, items in paginator:
output_context_to_template(
Context(item_root, {
'all_items': all_items,
'page': page_num,
'page_count': int(
math.ceil(float(len(all_items)) / num_per_page)),
item_root.split('/')[-1]: items}),
os.path.join(item_root, 'list.html'),
os.path.join(
output_root,
'index.html' if (page_num == 1) else str(page_num) + '.html'))
def paginate(size, items):
"""Generate (page-number, items) pairs given a page length and items."""
for i in range(0, len(items), size):
yield (int(i / size) + 1, items[i:i + size])
def recreate_dir(path):
"""Delete and recreate the given directory path."""
shutil.rmtree(path, True)
os.mkdir(path)
def generate_config(argv):
"""Returns the configuration to use based on all sources."""
class AddToContextDictAction(argparse.Action):
"""Helper for adding parsed arguments to the context dictionary."""
def __call__(self, parser, namespace, values, option_string=None):
new_context = {}
if namespace.context:
new_context.update(namespace.context)
new_context[values[0]] = json.loads(values[1])
setattr(namespace, 'context', new_context)
config = DEFAULT_CONF
arg_parser = argparse.ArgumentParser(
description='Generate HTML from Django templates and YAML content.')
arg_parser.add_argument(
'-cx', '--context', nargs=2, action=AddToContextDictAction,
help='Extra context information to make available in your templates.')
arg_parser.add_argument(
'-i', '--content',
help='Directory containing YAML input content.')
arg_parser.add_argument(
'-t', '--templates',
help='Directory containing Django templates.')
arg_parser.add_argument(
'-o', '--output',
help='Directory to write output to. Note: Contents are not preserved.')
arg_parser.add_argument(
'-s', '--static', action='append',
help='Files/dirs that should be copied verbatim to output directory.')
arg_parser.add_argument(
'-c', '--config-file',
help='Full path to the configuration file to use.')
arg_conf = dict([(key, val) for key, val
in vars(arg_parser.parse_args(argv)).items()
if val is not None])
if 'config_file' in arg_conf:
config_dir = os.path.dirname(arg_conf['config_file'])
config_file = os.path.basename(arg_conf['config_file'])
if config_file[-3:] == '.py':
config_file = config_file[:-3]
file_handle, file_name, desc = imp.find_module(
config_file, [config_dir])
file_conf = imp.load_module(
'transcribe_config', file_handle, file_name, desc
).TRANSCRIBE_CONFIG
config.update(file_conf)
config.update(arg_conf)
Context._context.update(config['context'])
return config
def copy_static_content(sources, destination):
"""Perform a simple copy of files/dirs to the destination dir."""
for content in sources:
(shutil.copytree if os.path.isdir(content) else shutil.copy)(
content, os.path.join(destination, os.path.basename(content)))
def _timestamp_loader(loader, node):
"""Timezone-aware datetime loader for PyYAML."""
try:
return datetime.datetime.strptime(node.value, '%Y-%m-%dT%H:%M:%S%z')
except ValueError:
pass
dt = datetime.datetime.strptime(node.value, '%Y-%m-%dT%H:%M:%SZ')
return dt.replace(tzinfo=datetime.timezone.utc)
def main(argv):
"""Transcribe YAML content into HTML through Django templates."""
conf = generate_config(argv)
settings.configure(
TEMPLATE_DIRS=(conf['templates'], ),
TEMPLATE_LOADERS=(
('django.template.loaders.cached.Loader',
('django.template.loaders.filesystem.Loader', )), )
)
import django.contrib.syndication.views # Requires Django to be configured
django.setup()
yaml.add_constructor('tag:yaml.org,2002:timestamp', _timestamp_loader)
recreate_dir(conf['output'])
copy_static_content(conf['static'], conf['output'])
for root, _, files in os.walk(conf['content']):
all_items = []
item_root = os.path.relpath(root, conf['content'])
output_root = os.path.join(conf['output'], item_root)
if item_root != '.':
os.mkdir(output_root)
for file_name in files:
content = yaml.load(open(os.path.join(root, file_name)))
content['slug'] = os.path.splitext(file_name)[0]
all_items.append(content)
if all_items:
output_all(all_items, item_root, output_root, conf['meta'])
if __name__ == '__main__':
exit(main(sys.argv[1:]))
|
mit
|
lucyparsons/OpenOversight
|
flickrscraper/flickrgroup.py
|
1
|
2788
|
from __future__ import print_function
from builtins import input
# creates a folder in the current directory and downloads pics and csv into it.
# need to fix last loop so very last entry doesn't generate error
import flickrapi
import wgetter
import time
import io
import os
api_key = ''
secret = ''
flickr = flickrapi.FlickrAPI(api_key, secret, format='parsed-json')
os.system('clear')
group_url = input("Enter a Flickr Group URL: ")
group_id = group_url.strip('/').split('/')[-1]
print(" ")
print("Files will be saved in folder " + group_id)
time.sleep(1)
print("Retrieving ID's. Please wait.")
group_pool_photos = []
page = 1
perpage = 300
success = True
# create folder if nec
if not os.path.exists(group_id):
os.makedirs(group_id)
with io.FileIO(group_id + "/" + "list.csv", "w") as file:
while True:
response = flickr.groups.pools.getPhotos(group_id=group_id, page=page, perpage=perpage)
if response['stat'] != 'ok':
print('Error occurred in flickr.groups.pools.getPhotos')
print(response)
success = False
break
if len(response['photos']['photo']) == 0:
break
group_pool_photos.extend(response['photos']['photo'])
page += 1
if success:
print('Photos: {}'.format(len(group_pool_photos)))
time.sleep(1)
print('Downloading now.')
print(" ")
file.write('PICID, PICURL, TAKEN, LOCATION, REALNAME, TITLE, DESCRIPTION, PATH_ALIAS')
file.write('\r\n')
for line in group_pool_photos:
photoinfo = flickr.photos.getInfo(photo_id=line['id'])
description = (photoinfo['photo']['description']['_content']).replace(",", "").encode("utf-8")
if not description:
description = 'na'
taken = photoinfo['photo']['dates']['taken']
path_alias = photoinfo['photo']['owner']['path_alias']
if not path_alias:
path_alias = 'na'
title = photoinfo['photo']['title']['_content'].replace(";", "").replace(",", "").encode("utf-8")
location = photoinfo['photo']['owner']['location'].replace(";", "").replace(",", "").encode("utf-8")
realname = photoinfo['photo']['owner']['realname'].replace(";", "").replace(",", "").replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').encode("utf-8")
picsize = flickr.photos.getSizes(photo_id=line['id'])
picurl = (picsize['sizes']['size'][-1]['source'])
file.write(line['id'] + "," + picurl + "," + taken + "," + location + "," + realname + "," + title + "," + description + "," + path_alias)
file.write('\r\n')
filename = wgetter.download(picurl, outdir=group_id)
time.sleep(0.5)
|
gpl-3.0
|
luckielordie/conan
|
conans/test/command/package_test.py
|
2
|
10927
|
import unittest
from conans import tools
from conans.test.utils.tools import TestClient
import os
from conans.paths import CONANFILE
from conans.util.files import load, mkdir
from conans.test.utils.test_files import temp_folder
from parameterized import parameterized
class PackageLocalCommandTest(unittest.TestCase):
def package_with_destination_test(self):
client = TestClient()
def prepare_for_package(the_client):
the_client.save({"src/header.h": "contents"}, clean_first=True)
the_client.run("new lib/1.0 -s")
# don't need build method
tools.replace_in_file(os.path.join(client.current_folder, "conanfile.py"),
"def build",
"def skip_build")
the_client.run("install . --install-folder build")
mkdir(os.path.join(client.current_folder, "build2"))
# In current dir subdir
prepare_for_package(client)
client.run("package . --build-folder build2 --install-folder build --package-folder=subdir")
self.assertNotIn("package(): WARN: No files copied", client.out)
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "subdir")))
# In current dir subdir with conanfile path
prepare_for_package(client)
client.run("package ./conanfile.py --build-folder build2 --install-folder build --package-folder=subdir")
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "subdir")))
# Default path
prepare_for_package(client)
client.run("package . --build-folder build")
self.assertNotIn("package(): WARN: No files copied", client.out)
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "build", "package")))
# Default path with conanfile path
prepare_for_package(client)
client.run("package conanfile.py --build-folder build")
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "build", "package")))
# Abs path
prepare_for_package(client)
pf = os.path.join(client.current_folder, "mypackage/two")
client.run("package . --build-folder build --package-folder='%s'" % pf)
self.assertNotIn("package(): WARN: No files copied", client.out)
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "mypackage", "two")))
# Abs path with conanfile path
prepare_for_package(client)
pf = os.path.join(client.current_folder, "mypackage/two")
os.rename(os.path.join(client.current_folder, "conanfile.py"),
os.path.join(client.current_folder, "my_conanfile.py"))
client.run("package ./my_conanfile.py --build-folder build --package-folder='%s'" % pf)
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "mypackage", "two")))
def package_with_path_errors_test(self):
client = TestClient()
client.save({"conanfile.txt": "contents"}, clean_first=True)
# Path with conanfile.txt
error = client.run("package conanfile.txt --build-folder build2 --install-folder build",
ignore_error=True)
self.assertTrue(error)
self.assertIn("A conanfile.py is needed (not valid conanfile.txt)", client.out)
# Path with wrong conanfile path
error = client.run("package not_real_dir/conanfile.py --build-folder build2 --install-folder build",
ignore_error=True)
self.assertTrue(error)
self.assertIn("Conanfile not found: %s" % os.path.join(client.current_folder, "not_real_dir",
"conanfile.py"), client.out)
def package_with_reference_errors_test(self):
client = TestClient()
error = client.run("package MyLib/0.1@lasote/stable", ignore_error=True)
self.assertTrue(error)
self.assertIn("conan package' doesn't accept a reference anymore",
client.out)
def local_package_test(self):
client = TestClient()
conanfile_template = """
from conans import ConanFile
class MyConan(ConanFile):
def package(self):
self.copy(pattern="*.h", dst="include", src="include")
"""
client.save({"include/file.h": "foo",
CONANFILE: conanfile_template})
client.run("install .")
path = client.current_folder
client.run('package "%s"' % path)
package_folder = os.path.join(client.current_folder, "package")
content = load(os.path.join(package_folder, "include/file.h"))
self.assertEqual(content, "foo")
self.assertEqual(sorted(os.listdir(package_folder)),
sorted(["include", "conaninfo.txt", "conanmanifest.txt"]))
self.assertEqual(os.listdir(os.path.join(package_folder, "include")), ["file.h"])
@parameterized.expand([(False, False), (True, False), (True, True), (False, True)])
def local_package_build_test(self, default_folder, conanfile_path):
client = TestClient()
conanfile_template = """
from conans import ConanFile
class MyConan(ConanFile):
def package(self):
self.copy(pattern="*.h", dst="include", src="include")
self.copy(pattern="*.lib")
"""
client.save({"include/file.h": "foo",
"build/lib/mypkg.lib": "mylib",
CONANFILE: conanfile_template})
path = client.current_folder
client.current_folder = os.path.join(client.current_folder, "build")
client.run("install ..")
if default_folder:
package_folder = os.path.join(client.current_folder, "package")
path = "../conanfile.py" if conanfile_path else ".."
client.run('package {0} --build-folder=.'.format(path))
self.assertEqual(sorted(os.listdir(package_folder)),
sorted(["include", "lib", "conaninfo.txt", "conanmanifest.txt"]))
else:
package_folder = temp_folder()
client.current_folder = package_folder
build_folder = os.path.join(path, "build")
if conanfile_path:
path = os.path.join(path, "conanfile.py")
client.run('package "{0}" --build-folder="{2}"'
' --package-folder="{1}"'.format(path, package_folder, build_folder))
self.assertEqual(sorted(os.listdir(package_folder)),
sorted(["include", "lib", "conaninfo.txt",
"conanmanifest.txt"]))
content = load(os.path.join(package_folder, "include/file.h"))
self.assertEqual(content, "foo")
self.assertEqual(os.listdir(os.path.join(package_folder, "include")), ["file.h"])
self.assertEqual(os.listdir(os.path.join(package_folder, "lib")), ["mypkg.lib"])
@parameterized.expand([(False, False), (True, False), (True, True), (False, True)])
def local_package_source_test(self, default_folder, conanfile_path):
client = TestClient()
conanfile_template = """
from conans import ConanFile
class MyConan(ConanFile):
def package(self):
self.copy(pattern="*.h", dst="include", src="include")
self.copy(pattern="*.lib")
self.copy(pattern="myapp", src="bin", dst="bin")
"""
client.save({"src/include/file.h": "foo",
"build/lib/mypkg.lib": "mylib",
"build/bin/myapp": "",
CONANFILE: conanfile_template})
conanfile_folder = client.current_folder
path = conanfile_folder
client.current_folder = os.path.join(client.current_folder, "build")
client.run("install ..")
if default_folder:
package_folder = os.path.join(client.current_folder, "package")
path = "../conanfile.py" if conanfile_path else ".."
client.run('package {0} --build-folder=. --source-folder=../src'.format(path))
else:
package_folder = temp_folder()
if conanfile_path:
path = os.path.join(path, "conanfile.py")
client.run('package "{0}" --build-folder="{1}/build" '
'--package-folder="{2}" --source-folder="{1}/src"'.
format(path, conanfile_folder, package_folder))
self.assertNotIn("package(): Copied 1 \'\' file", client.out)
self.assertIn("package(): Copied 1 file: myapp", client.out)
content = load(os.path.join(package_folder, "include/file.h"))
self.assertEqual(content, "foo")
self.assertEqual(sorted(os.listdir(package_folder)),
sorted(["include", "lib", "bin", "conaninfo.txt", "conanmanifest.txt"]))
self.assertEqual(os.listdir(os.path.join(package_folder, "include")), ["file.h"])
self.assertEqual(os.listdir(os.path.join(package_folder, "lib")), ["mypkg.lib"])
self.assertEqual(os.listdir(os.path.join(package_folder, "bin")), ["myapp"])
def no_files_copied_local_package_test(self):
# https://github.com/conan-io/conan/issues/2753
client = TestClient()
conanfile = """
from conans import ConanFile
class MyConan(ConanFile):
def build(self):
pass
def package(self):
self.copy(pattern="*.lib")
"""
client.save({"source/include/file.h": "foo",
"build/bin/library.lib": "",
CONANFILE: conanfile})
client.run("install . --install-folder=install")
client.run('package . --source-folder=source --install-folder=install --build-folder=build')
self.assertIn("No files copied from source folder!", client.out)
self.assertIn("Copied 1 '.lib' file: library.lib", client.out)
conanfile = """
from conans import ConanFile
class MyConan(ConanFile):
def build(self):
pass
def package(self):
self.copy(pattern="*.h")
"""
client.save({CONANFILE: conanfile})
client.run('package . --source-folder=source --install-folder=install --build-folder=build')
self.assertIn("No files copied from build folder!", client.out)
self.assertIn("Copied 1 '.h' file: file.h", client.out)
conanfile = """
from conans import ConanFile
class MyConan(ConanFile):
def build(self):
pass
def package(self):
self.copy(pattern="*.fake")
"""
client.save({CONANFILE: conanfile})
client.run('package . --source-folder=source --install-folder=install --build-folder=build')
self.assertIn("No files copied from source folder!", client.out)
self.assertIn("No files copied from build folder!", client.out)
self.assertNotIn("Copied 1 '.h' file: file.h", client.out)
self.assertNotIn("Copied 1 '.lib' file: library.lib", client.out)
|
mit
|
Yottabits/102shows
|
server/lightshows/rgbtest.py
|
1
|
1053
|
# RGBTest
# (c) 2016-2017 Simon Leiner
# licensed under the GNU Public License, version 2
from helpers.color import blend_whole_strip_to_color
from lightshows.templates.base import *
class RGBTest(Lightshow):
"""\
turns on all red, then all green, then all blue leds and then all together
No parameters necessary
"""
def init_parameters(self):
pass
def check_runnable(self):
return True
def run(self):
while True:
# single leds
blend_whole_strip_to_color(self.strip, (255, 0, 0), fadetime_sec=0)
self.sleep(10)
blend_whole_strip_to_color(self.strip, (0, 255, 0), fadetime_sec=0)
self.sleep(10)
blend_whole_strip_to_color(self.strip, (0, 0, 255), fadetime_sec=0)
self.sleep(10)
# all leds together
blend_whole_strip_to_color(self.strip, (255, 255, 255), fadetime_sec=0)
self.sleep(10)
# clear strip
self.strip.clear_strip()
self.sleep(5)
|
gpl-2.0
|
cudadog/django-allauth
|
allauth/socialaccount/providers/edmodo/provider.py
|
39
|
1141
|
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class EdmodoAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('profile_url')
def get_avatar_url(self):
return self.account.extra_data.get('avatar_url')
class EdmodoProvider(OAuth2Provider):
id = 'edmodo'
name = 'Edmodo'
package = 'allauth.socialaccount.providers.edmodo'
account_class = EdmodoAccount
def get_default_scope(self):
return ['basic']
def extract_uid(self, data):
return str(data['id'])
def extract_common_fields(self, data):
return dict(first_name=data.get('first_name'),
last_name=data.get('last_name'),
email=data.get('email', ''))
def extract_extra_data(self, data):
return dict(user_type=data.get('type'),
profile_url=data.get('url'),
avatar_url=data.get('avatars').get('large'))
providers.registry.register(EdmodoProvider)
|
mit
|
Stavitsky/neutron
|
neutron/policy.py
|
4
|
20337
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy engine for neutron. Largely copied from nova.
"""
import collections
import itertools
import logging as std_logging
import re
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import policy
LOG = logging.getLogger(__name__)
_ENFORCER = None
ADMIN_CTX_POLICY = 'context_is_admin'
ADVSVC_CTX_POLICY = 'context_is_advsvc'
# Maps deprecated 'extension' policies to new-style policies
DEPRECATED_POLICY_MAP = {
'extension:provider_network':
['network:provider:network_type',
'network:provider:physical_network',
'network:provider:segmentation_id'],
'extension:router':
['network:router:external'],
'extension:port_binding':
['port:binding:vif_type', 'port:binding:vif_details',
'port:binding:profile', 'port:binding:host_id']
}
DEPRECATED_ACTION_MAP = {
'view': ['get'],
'set': ['create', 'update']
}
def reset():
global _ENFORCER
if _ENFORCER:
_ENFORCER.clear()
_ENFORCER = None
def init():
"""Init an instance of the Enforcer class."""
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer()
_ENFORCER.load_rules(True)
def refresh():
"""Reset policy and init a new instance of Enforcer."""
reset()
init()
def get_resource_and_action(action, pluralized=None):
"""Extract resource and action (write, read) from api operation."""
data = action.split(':', 1)[0].split('_', 1)
resource = pluralized or ("%ss" % data[-1])
return (resource, data[0] != 'get')
def set_rules(policies, overwrite=True):
"""Set rules based on the provided dict of rules.
:param policies: New policies to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
LOG.debug("Loading policies from file: %s", _ENFORCER.policy_path)
# Ensure backward compatibility with folsom/grizzly convention
# for extension rules
for pol in policies.keys():
if any([pol.startswith(depr_pol) for depr_pol in
DEPRECATED_POLICY_MAP.keys()]):
LOG.warn(_LW("Found deprecated policy rule:%s. Please consider "
"upgrading your policy configuration file"), pol)
pol_name, action = pol.rsplit(':', 1)
try:
new_actions = DEPRECATED_ACTION_MAP[action]
new_policies = DEPRECATED_POLICY_MAP[pol_name]
# bind new actions and policies together
for actual_policy in ['_'.join(item) for item in
itertools.product(new_actions,
new_policies)]:
if actual_policy not in policies:
# New policy, same rule
LOG.info(_LI("Inserting policy:%(new_policy)s in "
"place of deprecated "
"policy:%(old_policy)s"),
{'new_policy': actual_policy,
'old_policy': pol})
policies[actual_policy] = policies[pol]
# Remove old-style policy
del policies[pol]
except KeyError:
LOG.error(_LE("Backward compatibility unavailable for "
"deprecated policy %s. The policy will "
"not be enforced"), pol)
init()
_ENFORCER.set_rules(policies, overwrite)
def _is_attribute_explicitly_set(attribute_name, resource, target, action):
"""Verify that an attribute is present and is explicitly set."""
if 'update' in action:
# In the case of update, the function should not pay attention to a
# default value of an attribute, but check whether it was explicitly
# marked as being updated instead.
return (attribute_name in target[const.ATTRIBUTES_TO_UPDATE] and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED)
return ('default' in resource[attribute_name] and
attribute_name in target and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and
target[attribute_name] != resource[attribute_name]['default'])
def _should_validate_sub_attributes(attribute, sub_attr):
"""Verify that sub-attributes are iterable and should be validated."""
validate = attribute.get('validate')
return (validate and isinstance(sub_attr, collections.Iterable) and
any([k.startswith('type:dict') and
v for (k, v) in validate.iteritems()]))
def _build_subattr_match_rule(attr_name, attr, action, target):
"""Create the rule to match for sub-attribute policy checks."""
# TODO(salv-orlando): Instead of relying on validator info, introduce
# typing for API attributes
# Expect a dict as type descriptor
validate = attr['validate']
key = filter(lambda k: k.startswith('type:dict'), validate.keys())
if not key:
LOG.warn(_LW("Unable to find data type descriptor for attribute %s"),
attr_name)
return
data = validate[key[0]]
if not isinstance(data, dict):
LOG.debug("Attribute type descriptor is not a dict. Unable to "
"generate any sub-attr policy rule for %s.",
attr_name)
return
sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' %
(action, attr_name,
sub_attr_name)) for
sub_attr_name in data if sub_attr_name in
target[attr_name]]
return policy.AndCheck(sub_attr_rules)
def _process_rules_list(rules, match_rule):
"""Recursively walk a policy rule to extract a list of match entries."""
if isinstance(match_rule, policy.RuleCheck):
rules.append(match_rule.match)
elif isinstance(match_rule, policy.AndCheck):
for rule in match_rule.rules:
_process_rules_list(rules, rule)
return rules
def _build_match_rule(action, target, pluralized):
"""Create the rule to match for a given action.
The policy rule to be matched is built in the following way:
1) add entries for matching permission on objects
2) add an entry for the specific action (e.g.: create_network)
3) add an entry for attributes of a resource for which the action
is being executed (e.g.: create_network:shared)
4) add an entry for sub-attributes of a resource for which the
action is being executed
(e.g.: create_router:external_gateway_info:network_id)
"""
match_rule = policy.RuleCheck('rule', action)
resource, is_write = get_resource_and_action(action, pluralized)
# Attribute-based checks shall not be enforced on GETs
if is_write:
# assigning to variable with short name for improving readability
res_map = attributes.RESOURCE_ATTRIBUTE_MAP
if resource in res_map:
for attribute_name in res_map[resource]:
if _is_attribute_explicitly_set(attribute_name,
res_map[resource],
target, action):
attribute = res_map[resource][attribute_name]
if 'enforce_policy' in attribute:
attr_rule = policy.RuleCheck('rule', '%s:%s' %
(action, attribute_name))
# Build match entries for sub-attributes
if _should_validate_sub_attributes(
attribute, target[attribute_name]):
attr_rule = policy.AndCheck(
[attr_rule, _build_subattr_match_rule(
attribute_name, attribute,
action, target)])
match_rule = policy.AndCheck([match_rule, attr_rule])
return match_rule
# This check is registered as 'tenant_id' so that it can override
# GenericCheck which was used for validating parent resource ownership.
# This will prevent us from having to handling backward compatibility
# for policy.json
# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks
@policy.register('tenant_id')
class OwnerCheck(policy.Check):
"""Resource ownership check.
This check verifies the owner of the current resource, or of another
resource referenced by the one under analysis.
In the former case it falls back to a regular GenericCheck, whereas
in the latter case it leverages the plugin to load the referenced
resource and perform the check.
"""
def __init__(self, kind, match):
# Process the match
try:
self.target_field = re.findall(r'^\%\((.*)\)s$',
match)[0]
except IndexError:
err_reason = (_("Unable to identify a target field from:%s. "
"Match should be in the form %%(<field_name>)s") %
match)
LOG.exception(err_reason)
raise exceptions.PolicyInitError(
policy="%s:%s" % (kind, match),
reason=err_reason)
super(OwnerCheck, self).__init__(kind, match)
def __call__(self, target, creds, enforcer):
if self.target_field not in target:
# policy needs a plugin check
# target field is in the form resource:field
# however if they're not separated by a colon, use an underscore
# as a separator for backward compatibility
def do_split(separator):
parent_res, parent_field = self.target_field.split(
separator, 1)
return parent_res, parent_field
for separator in (':', '_'):
try:
parent_res, parent_field = do_split(separator)
break
except ValueError:
LOG.debug("Unable to find ':' as separator in %s.",
self.target_field)
else:
# If we are here split failed with both separators
err_reason = (_("Unable to find resource name in %s") %
self.target_field)
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get(
"%ss" % parent_res, None)
if not parent_foreign_key:
err_reason = (_("Unable to verify match:%(match)s as the "
"parent resource: %(res)s was not found") %
{'match': self.match, 'res': parent_res})
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
# NOTE(salv-orlando): This check currently assumes the parent
# resource is handled by the core plugin. It might be worth
# having a way to map resources to plugins so to make this
# check more general
# NOTE(ihrachys): if import is put in global, circular
# import failure occurs
manager = importutils.import_module('neutron.manager')
f = getattr(manager.NeutronManager.get_instance().plugin,
'get_%s' % parent_res)
# f *must* exist, if not found it is better to let neutron
# explode. Check will be performed with admin context
context = importutils.import_module('neutron.context')
try:
data = f(context.get_admin_context(),
target[parent_foreign_key],
fields=[parent_field])
target[self.target_field] = data[parent_field]
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Policy check error while calling %s!'),
f)
match = self.match % target
if self.kind in creds:
return match == unicode(creds[self.kind])
return False
@policy.register('field')
class FieldCheck(policy.Check):
def __init__(self, kind, match):
# Process the match
resource, field_value = match.split(':', 1)
field, value = field_value.split('=', 1)
super(FieldCheck, self).__init__(kind, '%s:%s:%s' %
(resource, field, value))
# Value might need conversion - we need help from the attribute map
try:
attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field]
conv_func = attr['convert_to']
except KeyError:
conv_func = lambda x: x
self.field = field
self.value = conv_func(value)
def __call__(self, target_dict, cred_dict, enforcer):
target_value = target_dict.get(self.field)
# target_value might be a boolean, explicitly compare with None
if target_value is None:
LOG.debug("Unable to find requested field: %(field)s in target: "
"%(target_dict)s",
{'field': self.field, 'target_dict': target_dict})
return False
return target_value == self.value
def _prepare_check(context, action, target, pluralized):
"""Prepare rule, target, and credentials for the policy engine."""
# Compare with None to distinguish case in which target is {}
if target is None:
target = {}
match_rule = _build_match_rule(action, target, pluralized)
credentials = context.to_dict()
return match_rule, target, credentials
def log_rule_list(match_rule):
if LOG.isEnabledFor(std_logging.DEBUG):
rules = _process_rules_list([], match_rule)
LOG.debug("Enforcing rules: %s", rules)
def check(context, action, target, plugin=None, might_not_exist=False,
pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param might_not_exist: If True the policy check is skipped (and the
function returns True) if the specified policy does not exist.
Defaults to false.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:return: Returns True if access is permitted else False.
"""
if might_not_exist and not (_ENFORCER.rules and action in _ENFORCER.rules):
return True
match_rule, target, credentials = _prepare_check(context,
action,
target,
pluralized)
result = _ENFORCER.enforce(match_rule,
target,
credentials,
pluralized=pluralized)
# logging applied rules in case of failure
if not result:
log_rule_list(match_rule)
return result
def enforce(context, action, target, plugin=None, pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:raises neutron.openstack.common.policy.PolicyNotAuthorized:
if verification fails.
"""
rule, target, credentials = _prepare_check(context,
action,
target,
pluralized)
try:
result = _ENFORCER.enforce(rule, target, credentials, action=action,
do_raise=True)
except policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception():
log_rule_list(rule)
LOG.debug("Failed policy check for '%s'", action)
return result
def check_is_admin(context):
"""Verify context has admin rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
target = credentials
# Backward compatibility: if ADMIN_CTX_POLICY is not
# found, default to validating role:admin
admin_policy = (ADMIN_CTX_POLICY if ADMIN_CTX_POLICY in _ENFORCER.rules
else 'role:admin')
return _ENFORCER.enforce(admin_policy, target, credentials)
def check_is_advsvc(context):
"""Verify context has advsvc rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
target = credentials
# Backward compatibility: if ADVSVC_CTX_POLICY is not
# found, default to validating role:advsvc
advsvc_policy = (ADVSVC_CTX_POLICY in _ENFORCER.rules
and ADVSVC_CTX_POLICY or 'role:advsvc')
return _ENFORCER.enforce(advsvc_policy, target, credentials)
def _extract_roles(rule, roles):
if isinstance(rule, policy.RoleCheck):
roles.append(rule.match.lower())
elif isinstance(rule, policy.RuleCheck):
_extract_roles(_ENFORCER.rules[rule.match], roles)
elif hasattr(rule, 'rules'):
for rule in rule.rules:
_extract_roles(rule, roles)
def get_admin_roles():
"""Return a list of roles which are granted admin rights according
to policy settings.
"""
# NOTE(salvatore-orlando): This function provides a solution for
# populating implicit contexts with the appropriate roles so that
# they correctly pass policy checks, and will become superseded
# once all explicit policy checks are removed from db logic and
# plugin modules. For backward compatibility it returns the literal
# admin if ADMIN_CTX_POLICY is not defined
init()
if not _ENFORCER.rules or ADMIN_CTX_POLICY not in _ENFORCER.rules:
return ['admin']
try:
admin_ctx_rule = _ENFORCER.rules[ADMIN_CTX_POLICY]
except (KeyError, TypeError):
return
roles = []
_extract_roles(admin_ctx_rule, roles)
return roles
|
apache-2.0
|
sebastien-forestier/explaupoppydiva
|
explaupoppydiva/config.py
|
1
|
6989
|
from numpy import array
from explauto.utils.config import make_configuration
from explauto.sensorimotor_model.nearest_neighbor import NearestNeighbor
from explauto.sensorimotor_model.non_parametric import NonParametric
from environment.mathEnv import MathEnvironment, mathEnv_config
from agent.supervisor import Supervisor
#from agent.zpdes_sup import ZPDESSupervisor
class Config(object):
def __init__(self, name = None, iterations = None, debug = False):
################################### EXPERIMENT CONFIG ###################################
self.debug = debug
if not debug:
self.name = name or 'Experiment'
self.init_rest_trial = False
self.bootstrap = 0
self.bootstrap_range_div = 1.
self.iter = iterations or 50
self.log_each = self.iter/10 #must be <= iter
self.eval_at = range(1, self.iter +2, self.iter/10)
self.n_eval = 10
self.eval_modes = ['explo'] # explo, inverse
else:
self.name = name or 'Experiment'
self.init_rest_trial = False
self.bootstrap = 20
self.bootstrap_range_div = 1.
self.iter = 10
self.log_each = 1
self.eval_at = [10]
self.n_eval = 10
self.eval_modes = ['explo_comp'] # explo, inverse
self.gui = False
self.audio = False
################################### AGENT CONFIG ###################################
self.n_motors = 1
self.motor_dims = [0]
self.sensori_dims = [1, 2]
self.used_dims = 3
#self.n_motors = 3
#self.motor_dims = [0, 1, 2]
#self.sensori_dims = [3, 4, 5, 6, 7, 8, 9]
#self.used_dims = 10
self.sms = {
'LWLR': (NonParametric, {'fwd': 'LWLR', 'inv': 'L-BFGS-B', 'sigma':0.05, 'k':10}),
'NSLWLR': (NonParametric, {'fwd': 'NSLWLR', 'inv': 'L-BFGS-B', 'sigma':0.05, 'sigma_t':100, 'k':20}),
'NSNN': (NonParametric, {'fwd': 'NSNN', 'inv': 'L-BFGS-B', 'sigma':0.05, 'sigma_t':100, 'k':20}),
'WNN': (NonParametric, {'fwd': 'WNN', 'inv': 'WNN', 'sigma':0.05, 'k':5}),
'NSWNN': (NonParametric, {'fwd': 'NSWNN', 'inv': 'NSWNN', 'sigma':0.2, 'sigma_t':100, 'k':20}),
'knn1': (NearestNeighbor, {'sigma_ratio': 1. / 30.}),
'knn2': (NearestNeighbor, {'sigma_ratio': 1. / 30.}),
}
self.std_range = [0,1]
self.mids = ['mod1',
'mod2',
#'mod3',
] # module ids
m1 = [0]
s1 = [1]
s2 = [2]
self.m_spaces = dict(m1=m1)
self.s_spaces = dict(s1=s1, s2=s2)
self.operators = ["par", "seq"]
self.modules = dict(mod1 = dict(m = m1,
s = s1,
m_list = [m1],
operator = "par",
babbling_name = 'goal',
sm_name = 'knn1',
im_name = 'TDDensity',
from_log = None,
#children = ['m1'],
motor_babbling_n_iter=10),
#
# mod2 = dict(m = m2,
# s = s1,
# m_list = [m2],
# operator = "par",
# babbling_name = 'goal',
# sm_name = 'knn1',
# im_name = 'miscRandom',
# from_log = None,
# #children = ["mod1"],
# motor_babbling_n_iter=10),
mod2 = dict(m = s1,
s = s2,
m_list = [s1],
operator = "par",
babbling_name = 'goal',
sm_name = 'knn1',
im_name = 'miscRandom',
from_log = None,
#children = ["mod2"],
motor_babbling_n_iter=10),
)
#
# self.learning = dict(training_mode = 'seq',#seq, par, comb
# seq = dict(not_babbling_mode = 'updating',#updating, fixed
# updating = {}),
# par = dict(par_mode = 'exploring',#exploring, random
# exploring = dict(exploring_mode = 'cma',
# cma = {'n_points':20, 'sigma0':40, 'popsize':4},
# random = {'n_points':20})),
# comb = dict())
self.eval_dims = s2
self.eval_explo_dims = s2
self.eval_range = array([[0.],
[1.]])
self.eval_explo_eps = 0.002
self.eval_explo_comp_eps = 0.002
################################### Process CONFIG ###################################
self.m_mins = [0]
self.m_maxs = [1]
self.s_mins = [0, 0]
self.s_maxs = [1, 1]
self.agent = make_configuration(self.m_mins,
self.m_maxs,
self.s_mins,
self.s_maxs)
# self.supervisor_cls = ZPDESSupervisor
# self.supervisor_config = dict(alpha_reward=0.5,
# activate_succ_th=0.5,
# deactivate_succ_th=0.8,
# progress_window_size=20)
self.supervisor_cls = Supervisor
self.supervisor_config = dict()
self.env_config = mathEnv_config
self.env_cls = MathEnvironment
print self.agent
self.tag = self.name + '-_{}'.format(self.iter)
self.log_dir = ''#determined later
if __name__ == '__main__':
print Config()
|
gpl-3.0
|
vaibhav345/lenskit
|
lenskit-cli/src/man/filter-manpage.py
|
12
|
3152
|
#!/usr/bin/env python
# LensKit, an open source recommender systems toolkit.
# Copyright 2010-2014 Regents of the University of Minnesota and contributors
# Work on LensKit has been funded by the National Science Foundation under
# grants IIS 05-34939, 08-08692, 08-12148, and 10-17697.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# A Pandoc filter script to make a manpage standards-conformant but look sane in source.
# It does 3 things:
# - lift first H1 element (if it starts the document) to the title
# - lift all other headers by one level
# - upcase all L2 headers
#
# This script requires the 'pandocfilters' package (pip install pandocfilters)
import json
import sys
import re
from collections import OrderedDict
import pandocfilters
from pandocfilters import walk, stringify, Header, Str, Strong, Span, attributes
MetaString = pandocfilters.elt('MetaString', 1)
def liftTitle(doc):
"Lift the title from the document."
meta = doc[0]
content = doc[1]
heading = None
if content[0]['t'] == 'Header':
if content[0]['c'][0] == 1:
heading = content[0]
if heading is None:
print >> sys.stderr, 'warning: first block not a heading'
sys.exit(1)
title = stringify(heading['c'][2])
meta['unMeta']['title'] = MetaString(title)
return [meta, content[1:]]
def upcase(key, value, fmt, meta):
if key == 'Str':
return Str(value.upper())
def liftHeaders(key, value, fmt, meta):
if key == 'Header':
level, attrs, content = value
level -= 1
if level == 1:
content = walk(content, upcase, fmt, meta)
return Header(level, attrs, content)
_man_link_re = re.compile(r'^man:(.*)\((\d)\)')
def interpretManLinks(key, value, fmt, meta):
if key == 'Link':
text, link = value
url, title = link
match = _man_link_re.match(url)
if match is not None:
str = stringify(text)
if str.startswith("lenskit"):
return text
else:
rv = Span(attributes(None),
text + [Str(" ("), Strong([Str(match.group(1))]), Str("(%s))" % (match.group(2),))])
return rv
else:
return None
doc = json.load(sys.stdin, object_pairs_hook=OrderedDict)
doc = liftTitle(doc)
doc = walk(doc, liftHeaders, 'man', doc[0]['unMeta'])
doc = walk(doc, interpretManLinks, 'man', doc[0]['unMeta'])
json.dump(doc, sys.stdout)
|
lgpl-2.1
|
tonycpsu/urwid
|
urwid/tests/test_escapes.py
|
4
|
2944
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Tests covering escape sequences processing """
import unittest
import urwid.escape
class InputEscapeSequenceParserTest(unittest.TestCase):
""" Tests for parser of input escape sequences """
def test_bare_escape(self):
codes = [27]
expected = ['esc']
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
def test_meta(self):
codes = [27, ord('4'), ord('2')]
expected = ['meta 4']
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([ord('2')], rest)
def test_shift_arrows(self):
codes = [27, ord('['), ord('a')]
expected = ['shift up']
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
def test_ctrl_pgup(self):
codes = [27, 91, 53, 59, 53, 126]
expected = ['ctrl page up']
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
def test_esc_meta_1(self):
codes = [27, 27, 49]
expected = ['esc', 'meta 1']
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
def test_midsequence(self):
# '[11~' is F1, '[12~' is F2, etc
codes = [27, ord('['), ord('1')]
with self.assertRaises(urwid.escape.MoreInputRequired):
urwid.escape.process_keyqueue(codes, more_available=True)
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(['meta ['], actual)
self.assertListEqual([ord('1')], rest)
def test_mouse_press(self):
codes = [27, 91, 77, 32, 41, 48]
expected = [('mouse press', 1.0, 8, 15)]
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
def test_bug_104(self):
""" GH #104: click-Esc & Esc-click crashes urwid apps """
codes = [27, 27, 91, 77, 32, 127, 59]
expected = ['esc', ('mouse press', 1.0, 94, 26)]
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
codes = [27, 27, 91, 77, 35, 120, 59]
expected = ['esc', ('mouse release', 0, 87, 26)]
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
|
lgpl-2.1
|
fforres/presentaciones-y-charlas
|
ProgramadoresIV/una hiJStoria de amor y odio/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py
|
2354
|
10366
|
# Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
mit
|
Ifetayo/alt1
|
qa/rpc-tests/nodehandling.py
|
56
|
3626
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test node handling
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class NodeHandlingTest (BitcoinTestFramework):
def run_test(self):
###########################
# setban/listbanned tests #
###########################
assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point
self.nodes[2].setban("127.0.0.1", "add")
time.sleep(3) #wait till the nodes are disconected
assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].setban("127.0.0.0/24", "add")
assert_equal(len(self.nodes[2].listbanned()), 1)
try:
self.nodes[2].setban("127.0.0.1", "add") #throws exception because 127.0.0.1 is within range 127.0.0.0/24
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
try:
self.nodes[2].setban("127.0.0.1", "remove")
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
##test persisted banlist
self.nodes[2].setban("127.0.0.0/32", "add")
self.nodes[2].setban("127.0.0.0/24", "add")
self.nodes[2].setban("192.168.0.1", "add", 1) #ban for 1 seconds
self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) #ban for 1000 seconds
listBeforeShutdown = self.nodes[2].listbanned();
assert_equal("192.168.0.1/255.255.255.255", listBeforeShutdown[2]['address']) #must be here
time.sleep(2) #make 100% sure we expired 192.168.0.1 node time
#stop node
stop_node(self.nodes[2], 2)
self.nodes[2] = start_node(2, self.options.tmpdir)
listAfterShutdown = self.nodes[2].listbanned();
assert_equal("127.0.0.0/255.255.255.0", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/255.255.255.255", listAfterShutdown[1]['address'])
assert_equal("2001:4000::/ffff:e000:0:0:0:0:0:0", listAfterShutdown[2]['address'])
###########################
# RPC disconnectnode test #
###########################
url = urlparse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
time.sleep(2) #disconnecting a node needs a little bit of time
for node in self.nodes[0].getpeerinfo():
assert(node['addr'] != url.hostname+":"+str(p2p_port(1)))
connect_nodes_bi(self.nodes,0,1) #reconnect the node
found = False
for node in self.nodes[0].getpeerinfo():
if node['addr'] == url.hostname+":"+str(p2p_port(1)):
found = True
assert(found)
if __name__ == '__main__':
NodeHandlingTest ().main ()
|
mit
|
rsalmaso/django-cms
|
cms/management/commands/subcommands/base.py
|
1
|
3997
|
import os
from collections import OrderedDict
from django.core.management.base import BaseCommand, CommandParser
from django.core.management.color import no_style, color_style
from cms.utils.compat import DJANGO_3_0, DJANGO_3_1, DJANGO_3_2
def add_builtin_arguments(parser):
parser.add_argument(
'--noinput',
action='store_false',
dest='interactive',
default=True,
help='Tells Django CMS to NOT prompt the user for input of any kind.'
)
# These are taking "as-is" from Django's management base
# management command.
parser.add_argument('-v', '--verbosity', action='store', dest='verbosity', default='1',
type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output')
parser.add_argument('--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_argument('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".')
parser.add_argument('--traceback', action='store_true',
help='Raise on CommandError exceptions')
parser.add_argument('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output.")
parser.add_argument('--force-color', action='store_true', dest='force_color', default=False,
help="Colorize the command output.")
if DJANGO_3_0 or DJANGO_3_1 or DJANGO_3_2:
parser.add_argument('--skip-checks', action='store_true', dest='skip_checks', default=False,
help="Skip the checks.")
class SubcommandsCommand(BaseCommand):
subcommands = OrderedDict()
instances = {}
help_string = ''
command_name = ''
stealth_options = ('interactive',)
subcommand_dest = 'subcmd'
def create_parser(self, prog_name, subcommand):
kwargs = {}
parser = CommandParser(
prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None,
**kwargs
)
add_builtin_arguments(parser)
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
self.instances = {}
if self.subcommands:
stealth_options = set(self.stealth_options)
subparsers = parser.add_subparsers(dest=self.subcommand_dest)
for command, cls in self.subcommands.items():
instance = cls(self.stdout._out, self.stderr._out)
instance.style = self.style
kwargs = {}
parser_sub = subparsers.add_parser(
name=instance.command_name, help=instance.help_string,
description=instance.help_string, **kwargs
)
add_builtin_arguments(parser=parser_sub)
instance.add_arguments(parser_sub)
stealth_options.update({action.dest for action in parser_sub._actions})
self.instances[command] = instance
self.stealth_options = tuple(stealth_options)
def handle(self, *args, **options):
if options[self.subcommand_dest] in self.instances:
command = self.instances[options[self.subcommand_dest]]
if options.get('no_color'):
command.style = no_style()
command.stderr.style_func = None
if options.get('force_color'):
command.style = color_style(force_color=True)
if options.get('stdout'):
command.stdout._out = options.get('stdout')
if options.get('stderr'):
command.stderr._out = options.get('stderr')
command.handle(*args, **options)
else:
self.print_help('manage.py', 'cms')
|
bsd-3-clause
|
giliam/sharbrary
|
library/migrations/0024_auto_20150831_1533.py
|
1
|
2955
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('library', '0023_auto_20150820_1615'),
]
operations = [
migrations.AlterModelOptions(
name='author',
options={'ordering': ['lastname', 'firstname'], 'default_permissions': [], 'verbose_name': 'author', 'verbose_name_plural': 'authors', 'permissions': (('author_new', 'Add an author'), ('author_detail', 'Show an author details'), ('author_edit', 'Edit an author'), ('author_moderate', 'Moderate an author'), ('author_delete', 'Delete an author'), ('author_list', 'Show the list of authors'))},
),
migrations.AlterModelOptions(
name='book',
options={'ordering': ['title'], 'default_permissions': [], 'verbose_name': 'book', 'verbose_name_plural': 'books', 'permissions': (('book_new', 'Add a book'), ('book_detail', 'Show a book page'), ('book_edit', 'Edit a book'), ('book_moderate', 'Moderate a book'), ('book_delete', 'Delete a book'), ('book_remove_from_library', 'Remove a book from your library'), ('book_remove_from_all_libraries', 'Remove a book from all libraries'), ('book_list', 'Show the list of books'))},
),
migrations.AlterModelOptions(
name='editor',
options={'ordering': ['name'], 'default_permissions': [], 'verbose_name': 'editor', 'verbose_name_plural': 'editors', 'permissions': (('editor_new', 'Add an editor'), ('editor_edit', 'Edit an editor'), ('editor_moderate', 'Moderate an editor'), ('editor_delete', 'Delete an editor'), ('editor_list', 'Show the list of editors'))},
),
migrations.AlterModelOptions(
name='ownership',
options={'ordering': ['book__title', 'owner__username'], 'default_permissions': [], 'verbose_name': 'ownership', 'verbose_name_plural': 'ownerships', 'permissions': (('ownership_new', 'Have a book'), ('ownership_edit', 'Edit a ownership'), ('ownership_moderate', 'Moderate a ownership'), ('ownership_delete', 'Delete a ownership'))},
),
migrations.AlterModelOptions(
name='period',
options={'ordering': ['name'], 'default_permissions': [], 'verbose_name': 'period', 'verbose_name_plural': 'periods', 'permissions': (('period_new', 'Add a period'), ('period_edit', 'Edit a period'), ('period_moderate', 'Moderate a period'), ('period_delete', 'Delete a period'), ('period_list', 'Show the list of periods'))},
),
migrations.AlterModelOptions(
name='theme',
options={'ordering': ['name'], 'default_permissions': [], 'verbose_name': 'theme', 'verbose_name_plural': 'themes', 'permissions': (('theme_new', 'Add a theme'), ('theme_edit', 'Edit a theme'), ('theme_moderate', 'Moderate a theme'), ('theme_delete', 'Delete a theme'), ('theme_list', 'Show the list of themes'))},
),
]
|
gpl-2.0
|
PYPIT/PYPIT
|
pypeit/deprecated/bpmimage.py
|
1
|
2693
|
# Module for generating the BPM image
from __future__ import absolute_import, division, print_function
import numpy as np
import os
from pypeit import msgs
from pypeit.core import procimg
from pypeit.core import parse
from pypeit.spectrographs.util import load_spectrograph
from pypeit import debugger
class BPMImage(object):
"""
This class is primarily designed to generate an Bad Pixel Image
The master() method will return the image
Should provide both shape and filename to ensure that the
spectrograph can construct the bpm, if reduce_badpix is False.
There are several ways to build a BPM:
1. keck_lris_red
Provide binning and detector number to generate this instrument specific BPM
2. keck_deimos
Provide detector number to generate this instrument specific BPM
3. From a bias image (not well tested)
Set reduce_badpix='bias'
Provide msbias image
4. Dummy image from shape
Args:
spectrograph (str or :class:pypeit.spectrographs.spectrograph.Spectrograph):
Used to specify properties of the detector (for processing)
Attempt to set with settings['run']['spectrograph'] if not input
shape (tuple):
Image shape; used to construct a dummy BPM if all else fails
spec, spat This is the TRIMMED size of the raw images.
det (int, optional):
Detector index
Required for LRISr and DEIMOS
Attributes:
bpm_img (np.ndarray): BPM image
"""
# Frametype is a class attribute
frametype = 'bpm'
# Keep order same as processimages (or else!)
def __init__(self, spectrograph, shape, det=None):
# This function interprets both strings and spectrograph
# instances now
self.spectrograph = load_spectrograph(spectrograph)
# Used to construct the BPM using the spectrograph class
self.shape = shape
#self.filename = filename
self.det = det
# Used to construct the BPM from the bias
#self.msbias = msbias JFH This is not yet supported anyway
#self.trim = trim -- Killing this option for now
# spectrograph or msbias must be defined
if self.spectrograph is None:
msgs.error('BPMImage instantiation incomplete. Must provide spectrograph.')
# Output
self.bpm_img = None
def build(self, filename=None):
"""
Generate the BPM Image
Simmpl
Args:
datasec:
filename:
Returns:
"""
self.bpm_img = self.spectrograph.bpm(shape=self.shape, det=self.det, filename=filename)
|
gpl-3.0
|
luo2chun1lei2/AgileEditor
|
vx/src/ViewHistoryTextCmd.py
|
1
|
6486
|
#-*- coding:utf-8 -*-
'''
显示运行的列表。
'''
from gi.repository import Gtk, Gdk, GObject, GLib, Pango
from VxEventPipe import *
from VxSourceCmd import VxSourceCmdMng
from ViewMenu import ViewMenu
class CmdGObject(GObject.GObject):
__gtype_name__ = 'CmdGObject'
def __init__(self, cmd):
GObject.GObject.__init__(self)
self.cmd = cmd
# 需要注册这个对象到GObject中。
GObject.type_register(CmdGObject)
class ViewHistoryTextCmd:
'''
显示和编辑命令组的管理控件
'''
(COLUMN_SOURCE,
COLUMN_COMMAND,
COLUMN_CMD_OBJ,
NUM_COLUMNS) = range(4)
def __init__(self, on_process_func):
self.process_func = on_process_func
# 生成模型
self.model = self.create_model()
# 创建TreeView的控件
treeview = Gtk.TreeView(model=self.model)
treeview.set_rules_hint(True)
self.add_columns(treeview)
treeview.connect("row-activated", self.on_row_activated, self.model)
self.treeview = treeview
# VxEventPipe.register_event(VxEventPipe.EVENT_CMD_START, self.sm_on_cmd_start)
# VxEventPipe.register_event(VxEventPipe.EVENT_CMD_PROCESS, self.sm_on_cmd_process)
# VxEventPipe.register_event(VxEventPipe.EVENT_CMD_FINISH, self.sm_on_cmd_finish)
# VxEventPipe.register_event(VxEventPipe.EVENT_CMD_CANCEL, self.sm_on_cmd_cancel)
# 滚动条。
self.scrolledwindow = Gtk.ScrolledWindow()
self.scrolledwindow.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
self.scrolledwindow.set_size_request(150, 0)
self.scrolledwindow.add(self.treeview)
def create_model(self):
model = Gtk.ListStore( str, str, GObject.TYPE_OBJECT)
col = 0
vx_source_cmd_mng = VxSourceCmdMng.instance()
for source_cmd in vx_source_cmd_mng.list:
model.append([source_cmd.source[0:100],
source_cmd.command,
CmdGObject(source_cmd)])
col += 1
return model
def refresh_model(self):
model = self.treeview.get_model()
vx_source_cmd_mng = VxSourceCmdMng.instance()
model.clear()
for source_cmd in vx_source_cmd_mng.list:
model.append([source_cmd.source[0:100],
source_cmd.command,
CmdGObject(source_cmd)])
def create_selected_cmd_list(self):
''' 生成可以选择的命令列表。'''
self.cmds = Gtk.ListStore(str)
for cmd in VcCmdTemplateMng.instance().list:
self.cmds.append([cmd.get_content()])
return self.cmds
def add_columns(self, treeview):
# 处理的内容
renderer = Gtk.CellRendererText()
renderer.set_property("ellipsize-set", True)
renderer.set_property("ellipsize", Pango.EllipsizeMode.END)
renderer.set_fixed_height_from_font(3)
self.renderer_source = renderer
column = Gtk.TreeViewColumn("文本", renderer, text=self.COLUMN_SOURCE)
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
column.set_alignment(0)
column.set_expand(True)
column.set_resizable(True)
treeview.append_column(column)
# 命令
renderer = Gtk.CellRendererText()
self.renderer_command = renderer
column = Gtk.TreeViewColumn("命令", renderer, text=self.COLUMN_COMMAND)
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
column.set_alignment(0)
column.set_expand(True)
column.set_resizable(True)
treeview.append_column(column)
def on_row_activated(self, treeview, path_str, column, model):
iter_ = model.get_iter(path_str)
vc_source_cmd = model.get_value(iter_, self.COLUMN_CMD_OBJ).cmd
self.process_func(self, ViewMenu.ACTION_BACK_TO, vc_source_cmd)
def sm_on_cmd_start(self, vc_cmd):
Gdk.threads_add_idle(GLib.PRIORITY_DEFAULT_IDLE, self._on_cmd_start, vc_cmd)
def _on_cmd_start(self, vc_cmd):
index = self.vc_cmd_grp.commands.index(vc_cmd)
if index < 0:
return
vc_cmd.process = 0
model = self.treeview.get_model()
iter_ = model.get_iter(Gtk.TreePath.new_from_string( str(index) ))
model.set_value(iter_, self.COLUMN_PROGRESS, vc_cmd.process)
model.set_value(iter_, self.COLUMN_CMD_START_PROCESS, True)
# 清除日志输出
VxEventPipe.send_event(VxEventPipe.EVENT_LOG_CLEAN)
def sm_on_cmd_process(self, vc_cmd, process):
Gdk.threads_add_idle(GLib.PRIORITY_DEFAULT_IDLE, self._on_cmd_process, vc_cmd, process)
def _on_cmd_process(self, vc_cmd, process):
index = self.vc_cmd_grp.commands.index(vc_cmd)
if index < 0:
return
vc_cmd.process = process
model = self.treeview.get_model()
iter_ = model.get_iter(Gtk.TreePath.new_from_string( str(index) ))
model.set_value(iter_, self.COLUMN_PROGRESS, vc_cmd.process)
def sm_on_cmd_finish(self, vc_cmd, is_ok, result):
Gdk.threads_add_idle(GLib.PRIORITY_DEFAULT_IDLE, self._on_cmd_finish, vc_cmd, is_ok, result)
def _on_cmd_finish(self, vc_cmd, is_ok, result):
index = self.vc_cmd_grp.commands.index(vc_cmd)
if index < 0:
return
vc_cmd.process = 0
if is_ok:
icon_name = Gtk.STOCK_OK
else:
icon_name = Gtk.STOCK_NO
model = self.treeview.get_model()
iter_ = model.get_iter(Gtk.TreePath.new_from_string( str(index) ))
model.set_value(iter_, self.COLUMN_PROGRESS, vc_cmd.process)
model.set_value(iter_, self.COLUMN_CMD_START_PROCESS, False)
model.set_value(iter_, self.COLUMN_IS_OK, icon_name)
if index == len(self.vc_cmd_grp.commands) - 1:
# 是最后一个
self.unfreeze()
def sm_on_cmd_cancel(self):
Gdk.threads_add_idle(GLib.PRIORITY_DEFAULT_IDLE, self._on_cmd_cancel)
def _on_cmd_cancel(self):
self.unfreeze()
|
gpl-2.0
|
caperren/Archives
|
OSU Robotics Club/Mars Rover 2013-2014/Control Board Firmware/PrototypeBoardFirmware/PythonTest/JoystickTest.py
|
1
|
4956
|
#######################################
# Code coded by Mike Doty
#
# If you want trackball checking, you will
# have to code it yourself. Sorry!
#
# Oh, and it just grabs the first joystick.
# Yes, that makes me lazy.
#
# Released February 8, 2008.
#######################################
import pygame
from pygame.locals import *
class App:
def __init__(self):
pygame.init()
pygame.display.set_caption("Joystick Analyzer")
# Set up the joystick
pygame.joystick.init()
self.my_joystick = None
self.joystick_names = []
# Enumerate joysticks
for i in range(0, pygame.joystick.get_count()):
self.joystick_names.append(pygame.joystick.Joystick(i).get_name())
print self.joystick_names
# By default, load the first available joystick.
if (len(self.joystick_names) > 0):
self.my_joystick = pygame.joystick.Joystick(0)
self.my_joystick.init()
max_joy = max(self.my_joystick.get_numaxes(),
self.my_joystick.get_numbuttons(),
self.my_joystick.get_numhats())
self.screen = pygame.display.set_mode( (max_joy * 30 + 10, 170) )
self.font = pygame.font.SysFont("Courier", 20)
# A couple of joystick functions...
def check_axis(self, p_axis):
if (self.my_joystick):
if (p_axis <, self.my_joystick.get_numaxes()):
return self.my_joystick.get_axis(p_axis)
return 0
def check_button(self, p_button):
if (self.my_joystick):
if (p_button <, self.my_joystick.get_numbuttons()):
return self.my_joystick.get_button(p_button)
return False
def check_hat(self, p_hat):
if (self.my_joystick):
if (p_hat &l, self.my_joystick.get_numhats()):
return self.my_joystick.get_hat(p_hat)
return (0, 0)
def draw_text(self, text, x, y, color, align_right=False):
surface = self.font.render(text, True, color, (0, 0, 0))
surface.set_colorkey( (0, 0, 0) )
self.screen.blit(surface, (x, y))
def center_text(self, text, x, y, color):
surface = self.font.render(text, True, color, (0, 0, 0))
surface.set_colorkey( (0, 0, 0) )
self.screen.blit(surface, (x - surface.get_width() / 2,
y - surface.get_height() / 2))
def main(self):
while (True):
self.g_keys = pygame.event.get()
self.screen.fill(0)
for event in self.g_keys:
if (event.type == KEYDOWN and event.key == K_ESCAPE):
self.quit()
return
elif (event.type == QUIT):
self.quit()
return
self.draw_text("Joystick Name: %s" % self.joystick_names[0],
5, 5, (0, 255, 0))
self.draw_text("Axes (%d)" % self.my_joystick.get_numaxes(),
5, 25, (255, 255, 255))
for i in range(0, self.my_joystick.get_numaxes()):
if (self.my_joystick.get_axis(i)):
pygame.draw.circle(self.screen, (0, 0, 200),
(20 + (i * 30), 50), 10, 0)
else:
pygame.draw.circle(self.screen, (255, 0, 0),
(20 + (i * 30), 50), 10, 0)
self.center_text("%d" % i, 20 + (i * 30), 50, (255, 255, 255))
self.draw_text("Buttons (%d)" % self.my_joystick.get_numbuttons(),
5, 75, (255, 255, 255))
for i in range(0, self.my_joystick.get_numbuttons()):
if (self.my_joystick.get_button(i)):
pygame.draw.circle(self.screen, (0, 0, 200),
(20 + (i * 30), 100), 10, 0)
else:
pygame.draw.circle(self.screen, (255, 0, 0),
(20 + (i * 30), 100), 10, 0)
self.center_text("%d" % i, 20 + (i * 30), 100, (255, 255, 255))
self.draw_text("POV Hats (%d)" % self.my_joystick.get_numhats(),
5, 125, (255, 255, 255))
for i in range(0, self.my_joystick.get_numhats()):
if (self.my_joystick.get_hat(i) != (0, 0)):
pygame.draw.circle(self.screen, (0, 0, 200),
(20 + (i * 30), 150), 10, 0)
else:
pygame.draw.circle(self.screen, (255, 0, 0),
(20 + (i * 30), 150), 10, 0)
self.center_text("%d" % i, 20 + (i * 30), 100, (255, 255, 255))
pygame.display.flip()
def quit(self):
pygame.display.quit()
app = App()
app.main()
|
gpl-3.0
|
sconecontainers/sconecontainers.github.io
|
node_modules/node-gyp/gyp/pylib/gyp/easy_xml_test.py
|
2698
|
3270
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
|
mit
|
aospx-kitkat/platform_external_chromium_org
|
tools/telemetry/telemetry/core/chrome/desktop_browser_finder_unittest.py
|
29
|
6927
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
from telemetry.core.chrome import desktop_browser_finder
from telemetry.unittest import system_stub
# This file verifies the logic for finding a browser instance on all platforms
# at once. It does so by providing stubs for the OS/sys/subprocess primitives
# that the underlying finding logic usually uses to locate a suitable browser.
# We prefer this approach to having to run the same test on every platform on
# which we want this code to work.
class FindTestBase(unittest.TestCase):
def setUp(self):
self._options = browser_options.BrowserOptions()
self._options.chrome_root = '../../../'
self._stubs = system_stub.Override(desktop_browser_finder,
['os', 'subprocess', 'sys'])
def tearDown(self):
self._stubs.Restore()
@property
def _files(self):
return self._stubs.os.path.files
def DoFindAll(self):
return desktop_browser_finder.FindAllAvailableBrowsers(self._options)
def DoFindAllTypes(self):
browsers = self.DoFindAll()
return [b.browser_type for b in browsers]
def has_type(array, browser_type):
return len([x for x in array if x.browser_type == browser_type]) != 0
class FindSystemTest(FindTestBase):
def setUp(self):
super(FindSystemTest, self).setUp()
self._stubs.sys.platform = 'win32'
def testFindProgramFiles(self):
self._files.append(
'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe')
self._stubs.os.program_files = 'C:\\Program Files'
self.assertTrue('system' in self.DoFindAllTypes())
def testFindProgramFilesX86(self):
self._files.append(
'C:\\Program Files(x86)\\Google\\Chrome\\Application\\chrome.exe')
self._stubs.os.program_files_x86 = 'C:\\Program Files(x86)'
self.assertTrue('system' in self.DoFindAllTypes())
def testFindLocalAppData(self):
self._files.append(
'C:\\Local App Data\\Google\\Chrome\\Application\\chrome.exe')
self._stubs.os.local_app_data = 'C:\\Local App Data'
self.assertTrue('system' in self.DoFindAllTypes())
class FindLocalBuildsTest(FindTestBase):
def setUp(self):
super(FindLocalBuildsTest, self).setUp()
self._stubs.sys.platform = 'win32'
def testFindBuild(self):
self._files.append('..\\..\\..\\build\\Release\\chrome.exe')
self.assertTrue('release' in self.DoFindAllTypes())
def testFindOut(self):
self._files.append('..\\..\\..\\out\\Release\\chrome.exe')
self.assertTrue('release' in self.DoFindAllTypes())
def testFindSconsbuild(self):
self._files.append('..\\..\\..\\sconsbuild\\Release\\chrome.exe')
self.assertTrue('release' in self.DoFindAllTypes())
def testFindXcodebuild(self):
self._files.append('..\\..\\..\\xcodebuild\\Release\\chrome.exe')
self.assertTrue('release' in self.DoFindAllTypes())
class OSXFindTest(FindTestBase):
def setUp(self):
super(OSXFindTest, self).setUp()
self._stubs.sys.platform = 'darwin'
self._files.append('/Applications/Google Chrome Canary.app/'
'Contents/MacOS/Google Chrome Canary')
self._files.append('/Applications/Google Chrome.app/' +
'Contents/MacOS/Google Chrome')
self._files.append(
'../../../out/Release/Chromium.app/Contents/MacOS/Chromium')
self._files.append(
'../../../out/Debug/Chromium.app/Contents/MacOS/Chromium')
self._files.append(
'../../../out/Release/Content Shell.app/Contents/MacOS/Content Shell')
self._files.append(
'../../../out/Debug/Content Shell.app/Contents/MacOS/Content Shell')
def testFindAll(self):
types = self.DoFindAllTypes()
self.assertEquals(
set(types),
set(['debug', 'release',
'content-shell-debug', 'content-shell-release',
'canary', 'system']))
class LinuxFindTest(FindTestBase):
def setUp(self):
super(LinuxFindTest, self).setUp()
self._stubs.sys.platform = 'linux2'
self._files.append('/foo/chrome')
self._files.append('../../../out/Release/chrome')
self._files.append('../../../out/Debug/chrome')
self._files.append('../../../out/Release/content_shell')
self._files.append('../../../out/Debug/content_shell')
self.has_google_chrome_on_path = False
this = self
def call_hook(*args, **kwargs): # pylint: disable=W0613
if this.has_google_chrome_on_path:
return 0
raise OSError('Not found')
self._stubs.subprocess.call = call_hook
def testFindAllWithExact(self):
types = self.DoFindAllTypes()
self.assertEquals(
set(types),
set(['debug', 'release',
'content-shell-debug', 'content-shell-release']))
def testFindWithProvidedExecutable(self):
self._options.browser_executable = '/foo/chrome'
self.assertTrue('exact' in self.DoFindAllTypes())
def testFindUsingDefaults(self):
self.has_google_chrome_on_path = True
self.assertTrue('release' in self.DoFindAllTypes())
del self._files[1]
self.has_google_chrome_on_path = True
self.assertTrue('system' in self.DoFindAllTypes())
self.has_google_chrome_on_path = False
del self._files[1]
self.assertEquals(['content-shell-debug', 'content-shell-release'],
self.DoFindAllTypes())
def testFindUsingRelease(self):
self.assertTrue('release' in self.DoFindAllTypes())
class WinFindTest(FindTestBase):
def setUp(self):
super(WinFindTest, self).setUp()
self._stubs.sys.platform = 'win32'
self._stubs.os.local_app_data = 'c:\\Users\\Someone\\AppData\\Local'
self._files.append('c:\\tmp\\chrome.exe')
self._files.append('..\\..\\..\\build\\Release\\chrome.exe')
self._files.append('..\\..\\..\\build\\Debug\\chrome.exe')
self._files.append('..\\..\\..\\build\\Release\\content_shell.exe')
self._files.append('..\\..\\..\\build\\Debug\\content_shell.exe')
self._files.append(self._stubs.os.local_app_data + '\\' +
'Google\\Chrome\\Application\\chrome.exe')
self._files.append(self._stubs.os.local_app_data + '\\' +
'Google\\Chrome SxS\\Application\\chrome.exe')
def testFindAllGivenDefaults(self):
types = self.DoFindAllTypes()
self.assertEquals(set(types),
set(['debug', 'release',
'content-shell-debug', 'content-shell-release',
'system', 'canary']))
def testFindAllWithExact(self):
self._options.browser_executable = 'c:\\tmp\\chrome.exe'
types = self.DoFindAllTypes()
self.assertEquals(
set(types),
set(['exact',
'debug', 'release',
'content-shell-debug', 'content-shell-release',
'system', 'canary']))
|
bsd-3-clause
|
more1/ThinkStats2
|
code/chap01soln.py
|
67
|
1859
|
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import sys
import nsfg
import thinkstats2
def ReadFemResp(dct_file='2002FemResp.dct',
dat_file='2002FemResp.dat.gz',
nrows=None):
"""Reads the NSFG respondent data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
"""
dct = thinkstats2.ReadStataDct(dct_file)
df = dct.ReadFixedWidth(dat_file, compression='gzip', nrows=nrows)
CleanFemResp(df)
return df
def CleanFemResp(df):
"""Recodes variables from the respondent frame.
df: DataFrame
"""
pass
def ValidatePregnum(resp):
"""Validate pregnum in the respondent file.
resp: respondent DataFrame
"""
# read the pregnancy frame
preg = nsfg.ReadFemPreg()
# make the map from caseid to list of pregnancy indices
preg_map = nsfg.MakePregMap(preg)
# iterate through the respondent pregnum series
for index, pregnum in resp.pregnum.iteritems():
caseid = resp.caseid[index]
indices = preg_map[caseid]
# check that pregnum from the respondent file equals
# the number of records in the pregnancy file
if len(indices) != pregnum:
print(caseid, len(indices), pregnum)
return False
return True
def main(script):
"""Tests the functions in this module.
script: string script name
"""
resp = ReadFemResp()
assert(len(resp) == 7643)
assert(resp.pregnum.value_counts()[1] == 1267)
assert(ValidatePregnum(resp))
print('%s: All tests passed.' % script)
if __name__ == '__main__':
main(*sys.argv)
|
gpl-3.0
|
YannThorimbert/ThorPy-1.4.1
|
thorpy/miscgui/style.py
|
3
|
3847
|
"""
This module stores the variables that define the style of the GUI.
Note that the module "painterstyle" also define the default painters.
Default painters are in a separate module because they need to import painters,
and painters need to import style.
"""
import os
from thorpy import THORPY_PATH
from thorpy.miscgui import constants
# Default Texts
OK_TXT = "Ok"
CANCEL_TXT = "Cancel"
# Default Fonts : the program will try to use the fonts in the order of FONTS
FONTS = ("verdana", "comicsansms", "arial", "timesnewroman",
"dejavusansserif", "ubuntu", "century")
FONT_SIZE = 12
FONT_COLOR = constants.BLACK
ITALIC = False
BOLD = False
UNDERLINE = False
FONT_AA = True
FONT_BCKGR = None
# Default fonts for titles (bar) texts
BAR_FONTS = ("verdana", "comicsansms", "arial", "timesnewroman",
"dejavusansserif", "ubuntu", "century")
FONT_BAR_SIZE = 12
FONT_BAR_COLOR = constants.YELLOW
BAR_ITALIC = False
BAR_BOLD = False
BAR_UNDERLINE = False
FONT_BAR_AA = True
FONT_BAR_BCKGR = None
#storage
STORE_MODE = "vertical"
STORE_ALIGN = "center"
MARGINS = (5, 5)
GAPS = (5, 5)
NAME_SPACING = 5 # space between the name and the value
# default element color:
DEF_COLOR = constants.BRAY # base color
DEF_COLOR2 = constants.ULTRABRIGHT # color of elements bckgr
DEF_COLOR3 = constants.BRIGHT
DEF_HELP_COLOR = tuple(list(constants.ULTRABRIGHT) + [220])
DARK_FACTOR = 0.5
LIGHT_FACTOR = 1.2
BORDER_FACT = 0.3
DEF_RADIUS = 10
# default colors
COLOR_TXT_HOVER = constants.BLUE # hover highlight
COLOR_BULK_HOVER = constants.GREEN
COLOR_HOVER_DRAGGER = constants.BRIGHT # hover highlight for lift draggers
COLOR_HOVER_CHECK = constants.BRIGHT # hover highlight for check and radio
BAR_COLOR = constants.LIGHTBLUE # head bar
#title
TITLE_SPACE = 3
TITLE_ALIGN = "left"
CUT_WORD = ".."
TITLE_POS = (0, 0)
TITLE_FONT_SIZE = 15
TITLE_FONT_COLOR = (0, 0, 255)
# default elements size
SIZE = (80, 30)
SMALL_SIZE = (16, 16)
LARGE_SIZE = (150, 30)
XLARGE_SIZE = (250, 30)
Y_SMALL_SIZE = 20
CHECK_SIZE = (14, 14) # check and radio boxes
FILE_WIDTH = 100 # width for filenames
# box
BOX_SIZE = (250, 150)
BOX_RADIUS = 8
#help
HELP_SIZE = (80, 30)
# slider (also affects lift)
SLIDER_MARGINS = (2, 2)
SLIDER_THICK = 8
SLIDERX_DRAG_SIZE = (8, 20)
SLIDERY_DRAG_SIZE = (20, 8)
# lift
LIFT_DRAG_SIZE = (14, 20)
LIFT_BUTTON_SIZE = (16, 16)
LIFT_MARGINS = (1, 1)
BUTTON_MARGINS = (2, 2)
ARROW_COLOR = constants.BLACK
# dropdown lists
DDL_SIZE = (100, 300)
DDL_MARGINS = (1, 1)
# browserlight
BROWSERLIGHT_SIZE = (300, 300)
BROWSERLIGHT_DDL_SIZE = (200, 200)
# only used to determine the size of father
BROWSERLIGHT_STORE_MARGINS = (20, 5)
BROWSERLIGHT_STORE_GAPS = (2, 5)
BROWSERLIGHT_LEFT_SHIFT = 20
# browser
BROWSER_SIZE = (300, 300)
BROWSER_DDL_SIZE = (280, 280)
PATH_FONT_SIZE = 10
# dirviewer
DIRVIEWER_GAP = 5 # gap between lines
DIRVIEWER_X = None # x margin
# inserter
MAX_INSERTER_WIDTH = 100
CURS_FACT = 0.8
CURS_THICK = 1
CURS_COLOR = constants.BLACK
INSERTWRITER_MARGIN = 2
#Shadow
SHADOW_ALTITUDE = 5
# default images
CHECKBOX_IMG = os.path.join(THORPY_PATH, "data/check_box.bmp")
CHECKBOX_IMG_COLORKEY = (255, 255, 255)
#for the moment, same colorkey has to be used for checkbox and radio
RADIO_IMG = os.path.join(THORPY_PATH, "data/check_radio.bmp")
ARROW_IMG = os.path.join(THORPY_PATH, "data/arrow.bmp")
ARROW_IMG_COLORKEY = (255, 255, 255)
ARROW_IMG_COLORSOURCE = (0, 0, 0)
FOLDER_IMG = os.path.join(THORPY_PATH, "data/folder.png")
FOLDER_IMG_COLORKEY = (255, 255, 255)
EXAMPLE_IMG = os.path.join(THORPY_PATH, "data/painting.jpg")
DEFAULT_ICON = os.path.join(THORPY_PATH, "data/thorpy_icon.png")
# default styles
STYLE = "normal"
STYLE_NAME = "text"
STYLE_INSERTER_NAME = "text"
STYLE_CHECKER_NAME = "text"
STYLE_SLIDER_NAME = "text"
STYLE_SLIDER_VALUE = "text"
STYLE_BROWSER_LAUNCHER = "normal"
|
mit
|
angad/libjingle-mac
|
scons-2.2.0/build/lib/SCons/Tool/pdf.py
|
14
|
3067
|
"""SCons.Tool.pdf
Common PDF Builder definition for various other Tool modules that use it.
Add an explicit action to run epstopdf to convert .eps files to .pdf
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/pdf.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Builder
import SCons.Tool
PDFBuilder = None
EpsPdfAction = SCons.Action.Action('$EPSTOPDFCOM', '$EPSTOPDFCOMSTR')
def generate(env):
try:
env['BUILDERS']['PDF']
except KeyError:
global PDFBuilder
if PDFBuilder is None:
PDFBuilder = SCons.Builder.Builder(action = {},
source_scanner = SCons.Tool.PDFLaTeXScanner,
prefix = '$PDFPREFIX',
suffix = '$PDFSUFFIX',
emitter = {},
source_ext_match = None,
single_source=True)
env['BUILDERS']['PDF'] = PDFBuilder
env['PDFPREFIX'] = ''
env['PDFSUFFIX'] = '.pdf'
# put the epstopdf builder in this routine so we can add it after
# the pdftex builder so that one is the default for no source suffix
def generate2(env):
bld = env['BUILDERS']['PDF']
#bld.add_action('.ps', EpsPdfAction) # this is covered by direct Ghostcript action in gs.py
bld.add_action('.eps', EpsPdfAction)
env['EPSTOPDF'] = 'epstopdf'
env['EPSTOPDFFLAGS'] = SCons.Util.CLVar('')
env['EPSTOPDFCOM'] = '$EPSTOPDF $EPSTOPDFFLAGS ${SOURCE} --outfile=${TARGET}'
def exists(env):
# This only puts a skeleton Builder in place, so if someone
# references this Tool directly, it's always "available."
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
bsd-3-clause
|
mapsme/omim
|
tools/python/openlr/quality.py
|
7
|
10203
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import operator
import xml.etree.ElementTree as ET
from collections import namedtuple
from itertools import islice
from math import sin, cos, atan2, radians, sqrt
LatLon = namedtuple('LatLon', 'lat, lon')
def distance(x, y):
"""Implements https://en.wikipedia.org/wiki/Haversine_formula.
>>> int(distance(LatLon(55.747043, 37.655554), LatLon(55.754892, 37.657013)))
875
>>> int(distance(LatLon(60.013918, 29.718361), LatLon(59.951572, 30.205536)))
27910
"""
φ1, φ2 = map(radians, [x.lat, y.lat])
λ1, λ2 = map(radians, [x.lon, y.lon])
Δφ = φ2 - φ1
Δλ = λ2 - λ1
a = sin(Δφ/2)**2 + cos(φ1) * cos(φ2) * sin(Δλ/2)**2
R = 6356863 # Earth radius in meters.
return 2 * R * atan2(sqrt(a), sqrt(1 - a))
def lcs(l1, l2, eq=operator.eq):
"""Finds the longest common subsequence of l1 and l2.
Returns a list of common parts and a list of differences.
>>> lcs([1, 2, 3], [2])
([2], [1, 3])
>>> lcs([1, 2, 3, 3, 4], [2, 3, 4, 5])
([2, 3, 4], [1, 3, 5])
>>> lcs('banana', 'baraban')
(['b', 'a', 'a', 'n'], ['a', 'r', 'b', 'n', 'a'])
>>> lcs('abraban', 'banana')
(['b', 'a', 'a', 'n'], ['a', 'r', 'n', 'b', 'a'])
>>> lcs([1, 2, 3], [4, 5])
([], [4, 5, 1, 2, 3])
>>> lcs([4, 5], [1, 2, 3])
([], [1, 2, 3, 4, 5])
"""
prefs_len = [
[0] * (len(l2) + 1)
for _ in range(len(l1) + 1)
]
for i in range(1, len(l1) + 1):
for j in range(1, len(l2) + 1):
if eq(l1[i - 1], l2[j - 1]):
prefs_len[i][j] = prefs_len[i - 1][j - 1] + 1
else:
prefs_len[i][j] = max(prefs_len[i - 1][j], prefs_len[i][j - 1])
common = []
diff = []
i, j = len(l1), len(l2)
while i and j:
assert i >= 0
assert j >= 0
if eq(l1[i - 1], l2[j - 1]):
common.append(l1[i - 1])
i -= 1
j -= 1
elif prefs_len[i - 1][j] >= prefs_len[i][j - 1]:
i -= 1
diff.append(l1[i])
else:
j -= 1
diff.append(l2[j])
diff.extend(reversed(l1[:i]))
diff.extend(reversed(l2[:j]))
return common[::-1], diff[::-1]
def almost_equal(s1, s2, eps=1e-5):
"""
>>> a = (LatLon(55.77286, 37.8976), LatLon(55.77291, 37.89766))
>>> b = (LatLon(55.77286, 37.89761), LatLon(55.77291, 37.89767))
>>> almost_equal(a, b)
True
>>> a = (LatLon(55.89259, 37.72521), LatLon(55.89269, 37.72535))
>>> b = (LatLon(55.89259, 37.72522), LatLon(55.8927, 37.72536))
>>> almost_equal(a, b)
True
>>> a = (LatLon(55.89259, 37.72519), LatLon(55.89269, 37.72535))
>>> b = (LatLon(55.89259, 37.72522), LatLon(55.8927, 37.72536))
>>> almost_equal(a, b)
False
"""
eps *= 2
return all(
abs(p1.lat - p2.lat) <= eps and abs(p1.lon - p2.lon) <= eps
for p1, p2 in zip(s1, s2)
)
def common_part(l1, l2):
assert l1, 'left hand side argument should not be empty'
if not l2:
return 0.0
common, diff = lcs(l1, l2, eq=almost_equal)
common_len = sum(distance(*x) for x in common)
diff_len = sum(distance(*x) for x in diff)
assert common_len + diff_len
return common_len / (common_len + diff_len)
class Segment:
class NoGoldenPathError(ValueError):
pass
def __init__(self, segment_id, golden_route, matched_route, ignored):
if not golden_route and not ignored:
raise NoGoldenPathError(
"segment {} does not have a corresponding golden route"
"and is not marked"
.format(segment_id)
)
self.segment_id = segment_id
self.golden_route = golden_route
self.matched_route = matched_route or []
self.ignored = ignored
def __repr__(self):
return 'Segment({})'.format(self.segment_id)
def as_tuple(self):
return self.segment_id, self.matched_route, self.golden_route
def parse_route(route):
if not route:
return None
result = []
for edge in route.findall('RoadEdge'):
start = edge.find('StartJunction')
end = edge.find('EndJunction')
result.append((
LatLon(float(start.find('lat').text), float(start.find('lon').text)),
LatLon(float(end.find('lat').text), float(end.find('lon').text))
))
return result
def ignored_segments_number(tree, limit):
ignored_segments_num = 0
ignored_segments_but_matched = []
segments = islice(tree.findall('.//Segment'), limit)
for s in segments:
ignored = s.find('Ignored')
if ignored is not None and ignored.text == 'true':
ignored_segments_num += 1
route = s.find('Route')
if route is not None:
segment_id = int(s.find('.//ReportSegmentID').text)
ignored_segments_but_matched.append(str(segment_id))
return ignored_segments_num, ignored_segments_but_matched
def print_ignored_segments_result(descr, tree, limit):
assessed_ignored_seg = []
(assessed_ignored_seg_num, assessed_ignored_seg_but_matched) =\
ignored_segments_number(tree, limit)
print()
print(descr)
print('{} matched segments from {} ignored segments.'.
format(len(assessed_ignored_seg_but_matched), assessed_ignored_seg_num))
print('Ignored segments, but matched:')
print('\n'.join(assessed_ignored_seg_but_matched))
def parse_segments(tree, limit):
segments = islice(tree.findall('.//Segment'), limit)
for s in segments:
ignored_tag = s.find('Ignored')
ignored = s.find('Ignored') is not None and ignored_tag.text == 'true'
segment_id = int(s.find('.//ReportSegmentID').text)
matched_route = parse_route(s.find('Route'))
# TODO(mgsergio): This is a temproraty hack. All untouched segments
# within limit are considered accurate, so golden path should be equal
# matched path.
golden_route = parse_route(s.find('GoldenRoute'))
if not golden_route and not ignored:
continue
yield Segment(segment_id, golden_route, matched_route, ignored)
def calculate(tree):
result = {}
for s in parse_segments(tree, args.limit):
try:
# An ignored segment is estimated as 1 if matched_route
# is empty and as zero otherwise.
if s.ignored:
result[s.segment_id] = 1.0 if len(s.matched_route) == 0 else 0.0
else:
result[s.segment_id] = common_part(s.golden_route, s.matched_route)
except AssertionError:
print('Something is wrong with segment {}'.format(s))
raise
except Segment.NoGoldenPathError:
raise
return result
def merge(src, dst):
# If segment was ignored it does not have a golden route.
# We should mark the corresponding route in dst as ignored too.
golden_routes = {
int(s.find('.//ReportSegmentID').text): s.find('GoldenRoute')
for s in src.findall('Segment')
}
ignored_routes = {
int(s.find('.//ReportSegmentID').text): s.find('Ignored')
for s in src.findall('Segment')
}
for s in dst.findall('Segment'):
assert not s.find('GoldenRoute')
assert not s.find('Ignored')
reportSegmentID = int(s.find('.//ReportSegmentID').text)
golden_route = golden_routes[reportSegmentID]
ignored_route = ignored_routes[reportSegmentID]
if ignored_route is not None and ignored_route.text == 'true':
elem = ET.Element('Ignored')
elem.text = 'true'
s.append(elem)
continue
if golden_route:
s.append(golden_route)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Use this tool to get numerical scores on segments matching'
)
parser.add_argument(
'assessed_path', type=str,
help='An assessed matching file.')
parser.add_argument(
'-l', '--limit', type=int, default=None,
help='Process no more than limit segments'
)
parser.add_argument(
'--merge', type=str, default=None,
help='A path to a file to take matched routes from'
)
args = parser.parse_args()
assessed = ET.parse(args.assessed_path)
assessed_scores = calculate(assessed)
if args.merge:
candidate = ET.parse(args.merge)
merge(assessed, candidate)
candidate_scores = calculate(candidate)
print('{}\t{}\t{}\t{}'.format(
'segment_id', 'A', 'B', 'Diff')
)
for seg_id in assessed_scores:
print('{}\t{}\t{}\t{}'.format(
seg_id,
assessed_scores[seg_id], candidate_scores[seg_id],
assessed_scores[seg_id] - candidate_scores[seg_id]
))
mean1 = np.mean(list(assessed_scores.values()))
std1 = np.std(list(assessed_scores.values()), ddof=1)
mean2 = np.mean(list(candidate_scores.values()))
std2 = np.std(list(candidate_scores.values()), ddof=1)
# TODO(mgsergio): Use statistical methods to reason about quality.
print('Base: mean: {:.4f}, std: {:.4f}'.format(mean1, std1))
print('New: mean: {:.4f}, std: {:.4f}'.format(mean2, std2))
print('{} is better on avarage: mean1 - mean2: {:.4f}'.format(
'Base' if mean1 - mean2 > 0 else 'New',
mean1 - mean2
))
print_ignored_segments_result('Base', assessed, args.limit)
print_ignored_segments_result('New', candidate, args.limit)
else:
print('{}\t{}'.format(
'segment_id', 'intersection_weight')
)
for x in assessed_scores.items():
print('{}\t{}'.format(*x))
print('Edge number: {:d}, mean: {:.4f}, std: {:.4f}'.format(
len(assessed_scores),
np.mean(list(assessed_scores.values())),
np.std(list(assessed_scores.values()), ddof=1)
))
print_ignored_segments_result('', assessed, args.limit)
|
apache-2.0
|
gazoo74/linux
|
tools/perf/scripts/python/exported-sql-viewer.py
|
22
|
107457
|
#!/usr/bin/env python2
# SPDX-License-Identifier: GPL-2.0
# exported-sql-viewer.py: view data from sql database
# Copyright (c) 2014-2018, Intel Corporation.
# To use this script you will need to have exported data using either the
# export-to-sqlite.py or the export-to-postgresql.py script. Refer to those
# scripts for details.
#
# Following on from the example in the export scripts, a
# call-graph can be displayed for the pt_example database like this:
#
# python tools/perf/scripts/python/exported-sql-viewer.py pt_example
#
# Note that for PostgreSQL, this script supports connecting to remote databases
# by setting hostname, port, username, password, and dbname e.g.
#
# python tools/perf/scripts/python/exported-sql-viewer.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
#
# The result is a GUI window with a tree representing a context-sensitive
# call-graph. Expanding a couple of levels of the tree and adjusting column
# widths to suit will display something like:
#
# Call Graph: pt_example
# Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
# v- ls
# v- 2638:2638
# v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
# |- unknown unknown 1 13198 0.1 1 0.0
# >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
# >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
# v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
# >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
# >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
# >- __libc_csu_init ls 1 10354 0.1 10 0.0
# |- _setjmp libc-2.19.so 1 0 0.0 4 0.0
# v- main ls 1 8182043 99.6 180254 99.9
#
# Points to note:
# The top level is a command name (comm)
# The next level is a thread (pid:tid)
# Subsequent levels are functions
# 'Count' is the number of calls
# 'Time' is the elapsed time until the function returns
# Percentages are relative to the level above
# 'Branch Count' is the total number of branches for that function and all
# functions that it calls
# There is also a "All branches" report, which displays branches and
# possibly disassembly. However, presently, the only supported disassembler is
# Intel XED, and additionally the object code must be present in perf build ID
# cache. To use Intel XED, libxed.so must be present. To build and install
# libxed.so:
# git clone https://github.com/intelxed/mbuild.git mbuild
# git clone https://github.com/intelxed/xed
# cd xed
# ./mfile.py --share
# sudo ./mfile.py --prefix=/usr/local install
# sudo ldconfig
#
# Example report:
#
# Time CPU Command PID TID Branch Type In Tx Branch
# 8107675239590 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 8107675239899 2 ls 22011 22011 hardware interrupt No 7fab593ea260 _start (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675241900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 7fab593ea263 e8 c8 06 00 00 callq 0x7fab593ea930
# 8107675241900 2 ls 22011 22011 call No 7fab593ea263 _start+0x3 (ld-2.19.so) -> 7fab593ea930 _dl_start (ld-2.19.so)
# 7fab593ea930 55 pushq %rbp
# 7fab593ea931 48 89 e5 mov %rsp, %rbp
# 7fab593ea934 41 57 pushq %r15
# 7fab593ea936 41 56 pushq %r14
# 7fab593ea938 41 55 pushq %r13
# 7fab593ea93a 41 54 pushq %r12
# 7fab593ea93c 53 pushq %rbx
# 7fab593ea93d 48 89 fb mov %rdi, %rbx
# 7fab593ea940 48 83 ec 68 sub $0x68, %rsp
# 7fab593ea944 0f 31 rdtsc
# 7fab593ea946 48 c1 e2 20 shl $0x20, %rdx
# 7fab593ea94a 89 c0 mov %eax, %eax
# 7fab593ea94c 48 09 c2 or %rax, %rdx
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 8107675242232 2 ls 22011 22011 hardware interrupt No 7fab593ea94f _dl_start+0x1f (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675242900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea94f _dl_start+0x1f (ld-2.19.so)
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
from __future__ import print_function
import sys
import weakref
import threading
import string
try:
# Python2
import cPickle as pickle
# size of pickled integer big enough for record size
glb_nsz = 8
except ImportError:
import pickle
glb_nsz = 16
import re
import os
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSql import *
pyside_version_1 = True
from decimal import *
from ctypes import *
from multiprocessing import Process, Array, Value, Event
# xrange is range in Python3
try:
xrange
except NameError:
xrange = range
def printerr(*args, **keyword_args):
print(*args, file=sys.stderr, **keyword_args)
# Data formatting helpers
def tohex(ip):
if ip < 0:
ip += 1 << 64
return "%x" % ip
def offstr(offset):
if offset:
return "+0x%x" % offset
return ""
def dsoname(name):
if name == "[kernel.kallsyms]":
return "[kernel]"
return name
def findnth(s, sub, n, offs=0):
pos = s.find(sub)
if pos < 0:
return pos
if n <= 1:
return offs + pos
return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1)
# Percent to one decimal place
def PercentToOneDP(n, d):
if not d:
return "0.0"
x = (n * Decimal(100)) / d
return str(x.quantize(Decimal(".1"), rounding=ROUND_HALF_UP))
# Helper for queries that must not fail
def QueryExec(query, stmt):
ret = query.exec_(stmt)
if not ret:
raise Exception("Query failed: " + query.lastError().text())
# Background thread
class Thread(QThread):
done = Signal(object)
def __init__(self, task, param=None, parent=None):
super(Thread, self).__init__(parent)
self.task = task
self.param = param
def run(self):
while True:
if self.param is None:
done, result = self.task()
else:
done, result = self.task(self.param)
self.done.emit(result)
if done:
break
# Tree data model
class TreeModel(QAbstractItemModel):
def __init__(self, glb, parent=None):
super(TreeModel, self).__init__(parent)
self.glb = glb
self.root = self.GetRoot()
self.last_row_read = 0
def Item(self, parent):
if parent.isValid():
return parent.internalPointer()
else:
return self.root
def rowCount(self, parent):
result = self.Item(parent).childCount()
if result < 0:
result = 0
self.dataChanged.emit(parent, parent)
return result
def hasChildren(self, parent):
return self.Item(parent).hasChildren()
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(section)
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def parent(self, child):
child_item = child.internalPointer()
if child_item is self.root:
return QModelIndex()
parent_item = child_item.getParentItem()
return self.createIndex(parent_item.getRow(), 0, parent_item)
def index(self, row, column, parent):
child_item = self.Item(parent).getChildItem(row)
return self.createIndex(row, column, child_item)
def DisplayData(self, item, index):
return item.getData(index.column())
def FetchIfNeeded(self, row):
if row > self.last_row_read:
self.last_row_read = row
if row + 10 >= self.root.child_count:
self.fetcher.Fetch(glb_chunk_sz)
def columnAlignment(self, column):
return Qt.AlignLeft
def columnFont(self, column):
return None
def data(self, index, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(index.column())
if role == Qt.FontRole:
return self.columnFont(index.column())
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
return self.DisplayData(item, index)
# Table data model
class TableModel(QAbstractTableModel):
def __init__(self, parent=None):
super(TableModel, self).__init__(parent)
self.child_count = 0
self.child_items = []
self.last_row_read = 0
def Item(self, parent):
if parent.isValid():
return parent.internalPointer()
else:
return self
def rowCount(self, parent):
return self.child_count
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(section)
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def index(self, row, column, parent):
return self.createIndex(row, column, self.child_items[row])
def DisplayData(self, item, index):
return item.getData(index.column())
def FetchIfNeeded(self, row):
if row > self.last_row_read:
self.last_row_read = row
if row + 10 >= self.child_count:
self.fetcher.Fetch(glb_chunk_sz)
def columnAlignment(self, column):
return Qt.AlignLeft
def columnFont(self, column):
return None
def data(self, index, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(index.column())
if role == Qt.FontRole:
return self.columnFont(index.column())
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
return self.DisplayData(item, index)
# Model cache
model_cache = weakref.WeakValueDictionary()
model_cache_lock = threading.Lock()
def LookupCreateModel(model_name, create_fn):
model_cache_lock.acquire()
try:
model = model_cache[model_name]
except:
model = None
if model is None:
model = create_fn()
model_cache[model_name] = model
model_cache_lock.release()
return model
# Find bar
class FindBar():
def __init__(self, parent, finder, is_reg_expr=False):
self.finder = finder
self.context = []
self.last_value = None
self.last_pattern = None
label = QLabel("Find:")
label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.textbox = QComboBox()
self.textbox.setEditable(True)
self.textbox.currentIndexChanged.connect(self.ValueChanged)
self.progress = QProgressBar()
self.progress.setRange(0, 0)
self.progress.hide()
if is_reg_expr:
self.pattern = QCheckBox("Regular Expression")
else:
self.pattern = QCheckBox("Pattern")
self.pattern.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.next_button = QToolButton()
self.next_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowDown))
self.next_button.released.connect(lambda: self.NextPrev(1))
self.prev_button = QToolButton()
self.prev_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowUp))
self.prev_button.released.connect(lambda: self.NextPrev(-1))
self.close_button = QToolButton()
self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
self.close_button.released.connect(self.Deactivate)
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addWidget(label)
self.hbox.addWidget(self.textbox)
self.hbox.addWidget(self.progress)
self.hbox.addWidget(self.pattern)
self.hbox.addWidget(self.next_button)
self.hbox.addWidget(self.prev_button)
self.hbox.addWidget(self.close_button)
self.bar = QWidget()
self.bar.setLayout(self.hbox);
self.bar.hide()
def Widget(self):
return self.bar
def Activate(self):
self.bar.show()
self.textbox.setFocus()
def Deactivate(self):
self.bar.hide()
def Busy(self):
self.textbox.setEnabled(False)
self.pattern.hide()
self.next_button.hide()
self.prev_button.hide()
self.progress.show()
def Idle(self):
self.textbox.setEnabled(True)
self.progress.hide()
self.pattern.show()
self.next_button.show()
self.prev_button.show()
def Find(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
self.last_value = value
self.last_pattern = pattern
self.finder.Find(value, direction, pattern, self.context)
def ValueChanged(self):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
index = self.textbox.currentIndex()
data = self.textbox.itemData(index)
# Store the pattern in the combo box to keep it with the text value
if data == None:
self.textbox.setItemData(index, pattern)
else:
self.pattern.setChecked(data)
self.Find(0)
def NextPrev(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
if value != self.last_value:
index = self.textbox.findText(value)
# Allow for a button press before the value has been added to the combo box
if index < 0:
index = self.textbox.count()
self.textbox.addItem(value, pattern)
self.textbox.setCurrentIndex(index)
return
else:
self.textbox.setItemData(index, pattern)
elif pattern != self.last_pattern:
# Keep the pattern recorded in the combo box up to date
index = self.textbox.currentIndex()
self.textbox.setItemData(index, pattern)
self.Find(direction)
def NotFound(self):
QMessageBox.information(self.bar, "Find", "'" + self.textbox.currentText() + "' not found")
# Context-sensitive call graph data model item base
class CallGraphLevelItemBase(object):
def __init__(self, glb, row, parent_item):
self.glb = glb
self.row = row
self.parent_item = parent_item
self.query_done = False;
self.child_count = 0
self.child_items = []
if parent_item:
self.level = parent_item.level + 1
else:
self.level = 0
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def childCount(self):
if not self.query_done:
self.Select()
if not self.child_count:
return -1
return self.child_count
def hasChildren(self):
if not self.query_done:
return True
return self.child_count > 0
def getData(self, column):
return self.data[column]
# Context-sensitive call graph data model level 2+ item base
class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item):
super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.call_path_id = call_path_id
self.branch_count = branch_count
self.time = time
def Select(self):
self.query_done = True;
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time), SUM(branch_count)"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" WHERE parent_call_path_id = " + str(self.call_path_id) +
" AND comm_id = " + str(self.comm_id) +
" AND thread_id = " + str(self.thread_id) +
" GROUP BY call_path_id, name, short_name"
" ORDER BY call_path_id")
while query.next():
child_item = CallGraphLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model level three item
class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, call_path_id, name, dso, count, time, branch_count, parent_item):
super(CallGraphLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item)
dso = dsoname(dso)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = call_path_id
# Context-sensitive call graph data model level two item
class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
super(CallGraphLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 1, 0, 0, parent_item)
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallGraphLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Context-sensitive call graph data model level one item
class CallGraphLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, comm, parent_item):
super(CallGraphLevelOneItem, self).__init__(glb, row, parent_item)
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
self.query_done = True;
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT thread_id, pid, tid"
" FROM comm_threads"
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallGraphLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model root item
class CallGraphRootItem(CallGraphLevelItemBase):
def __init__(self, glb):
super(CallGraphRootItem, self).__init__(glb, 0, None)
self.dbid = 0
self.query_done = True;
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms")
while query.next():
if not query.value(0):
continue
child_item = CallGraphLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model base
class CallGraphModelBase(TreeModel):
def __init__(self, glb, parent=None):
super(CallGraphModelBase, self).__init__(glb, parent)
def FindSelect(self, value, pattern, query):
if pattern:
# postgresql and sqlite pattern patching differences:
# postgresql LIKE is case sensitive but sqlite LIKE is not
# postgresql LIKE allows % and _ to be escaped with \ but sqlite LIKE does not
# postgresql supports ILIKE which is case insensitive
# sqlite supports GLOB (text only) which uses * and ? and is case sensitive
if not self.glb.dbref.is_sqlite3:
# Escape % and _
s = value.replace("%", "\%")
s = s.replace("_", "\_")
# Translate * and ? into SQL LIKE pattern characters % and _
trans = string.maketrans("*?", "%_")
match = " LIKE '" + str(s).translate(trans) + "'"
else:
match = " GLOB '" + str(value) + "'"
else:
match = " = '" + str(value) + "'"
self.DoFindSelect(query, match)
def Found(self, query, found):
if found:
return self.FindPath(query)
return []
def FindValue(self, value, pattern, query, last_value, last_pattern):
if last_value == value and pattern == last_pattern:
found = query.first()
else:
self.FindSelect(value, pattern, query)
found = query.next()
return self.Found(query, found)
def FindNext(self, query):
found = query.next()
if not found:
found = query.first()
return self.Found(query, found)
def FindPrev(self, query):
found = query.previous()
if not found:
found = query.last()
return self.Found(query, found)
def FindThread(self, c):
if c.direction == 0 or c.value != c.last_value or c.pattern != c.last_pattern:
ids = self.FindValue(c.value, c.pattern, c.query, c.last_value, c.last_pattern)
elif c.direction > 0:
ids = self.FindNext(c.query)
else:
ids = self.FindPrev(c.query)
return (True, ids)
def Find(self, value, direction, pattern, context, callback):
class Context():
def __init__(self, *x):
self.value, self.direction, self.pattern, self.query, self.last_value, self.last_pattern = x
def Update(self, *x):
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = x + (self.value, self.pattern)
if len(context):
context[0].Update(value, direction, pattern)
else:
context.append(Context(value, direction, pattern, QSqlQuery(self.glb.db), None, None))
# Use a thread so the UI is not blocked during the SELECT
thread = Thread(self.FindThread, context[0])
thread.done.connect(lambda ids, t=thread, c=callback: self.FindDone(t, c, ids), Qt.QueuedConnection)
thread.start()
def FindDone(self, thread, callback, ids):
callback(ids)
# Context-sensitive call graph data model
class CallGraphModel(CallGraphModelBase):
def __init__(self, glb, parent=None):
super(CallGraphModel, self).__init__(glb, parent)
def GetRoot(self):
return CallGraphRootItem(self.glb)
def columnCount(self, parent=None):
return 7
def columnHeader(self, column):
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE symbols.name" + match +
" GROUP BY comm_id, thread_id, call_path_id"
" ORDER BY comm_id, thread_id, call_path_id")
def FindPath(self, query):
# Turn the query result into a list of ids that the tree view can walk
# to open the tree at the right place.
ids = []
parent_id = query.value(0)
while parent_id:
ids.insert(0, parent_id)
q2 = QSqlQuery(self.glb.db)
QueryExec(q2, "SELECT parent_id"
" FROM call_paths"
" WHERE id = " + str(parent_id))
if not q2.next():
break
parent_id = q2.value(0)
# The call path root is not used
if ids[0] == 1:
del ids[0]
ids.insert(0, query.value(2))
ids.insert(0, query.value(1))
return ids
# Call tree data model level 2+ item base
class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item):
super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.calls_id = calls_id
self.branch_count = branch_count
self.time = time
def Select(self):
self.query_done = True;
if self.calls_id == 0:
comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
else:
comm_thread = ""
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time, branch_count"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
" ORDER BY call_time, calls.id")
while query.next():
child_item = CallTreeLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
self.child_items.append(child_item)
self.child_count += 1
# Call tree data model level three item
class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, calls_id, name, dso, count, time, branch_count, parent_item):
super(CallTreeLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item)
dso = dsoname(dso)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = calls_id
# Call tree data model level two item
class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
super(CallTreeLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 0, 0, 0, parent_item)
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallTreeLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Call tree data model level one item
class CallTreeLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, comm, parent_item):
super(CallTreeLevelOneItem, self).__init__(glb, row, parent_item)
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
self.query_done = True;
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT thread_id, pid, tid"
" FROM comm_threads"
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallTreeLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
# Call tree data model root item
class CallTreeRootItem(CallGraphLevelItemBase):
def __init__(self, glb):
super(CallTreeRootItem, self).__init__(glb, 0, None)
self.dbid = 0
self.query_done = True;
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms")
while query.next():
if not query.value(0):
continue
child_item = CallTreeLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
# Call Tree data model
class CallTreeModel(CallGraphModelBase):
def __init__(self, glb, parent=None):
super(CallTreeModel, self).__init__(glb, parent)
def GetRoot(self):
return CallTreeRootItem(self.glb)
def columnCount(self, parent=None):
return 7
def columnHeader(self, column):
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
QueryExec(query, "SELECT calls.id, comm_id, thread_id"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE symbols.name" + match +
" ORDER BY comm_id, thread_id, call_time, calls.id")
def FindPath(self, query):
# Turn the query result into a list of ids that the tree view can walk
# to open the tree at the right place.
ids = []
parent_id = query.value(0)
while parent_id:
ids.insert(0, parent_id)
q2 = QSqlQuery(self.glb.db)
QueryExec(q2, "SELECT parent_id"
" FROM calls"
" WHERE id = " + str(parent_id))
if not q2.next():
break
parent_id = q2.value(0)
ids.insert(0, query.value(2))
ids.insert(0, query.value(1))
return ids
# Vertical widget layout
class VBox():
def __init__(self, w1, w2, w3=None):
self.vbox = QWidget()
self.vbox.setLayout(QVBoxLayout());
self.vbox.layout().setContentsMargins(0, 0, 0, 0)
self.vbox.layout().addWidget(w1)
self.vbox.layout().addWidget(w2)
if w3:
self.vbox.layout().addWidget(w3)
def Widget(self):
return self.vbox
# Tree window base
class TreeWindowBase(QMdiSubWindow):
def __init__(self, parent=None):
super(TreeWindowBase, self).__init__(parent)
self.model = None
self.find_bar = None
self.view = QTreeView()
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
self.context_menu = TreeContextMenu(self.view)
def DisplayFound(self, ids):
if not len(ids):
return False
parent = QModelIndex()
for dbid in ids:
found = False
n = self.model.rowCount(parent)
for row in xrange(n):
child = self.model.index(row, 0, parent)
if child.internalPointer().dbid == dbid:
found = True
self.view.setCurrentIndex(child)
parent = child
break
if not found:
break
return found
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.model.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, ids):
found = True
if not self.DisplayFound(ids):
found = False
self.find_bar.Idle()
if not found:
self.find_bar.NotFound()
# Context-sensitive call graph window
class CallGraphWindow(TreeWindowBase):
def __init__(self, glb, parent=None):
super(CallGraphWindow, self).__init__(parent)
self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
self.view.setModel(self.model)
for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
self.view.setColumnWidth(c, w)
self.find_bar = FindBar(self, self)
self.vbox = VBox(self.view, self.find_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
# Call tree window
class CallTreeWindow(TreeWindowBase):
def __init__(self, glb, parent=None):
super(CallTreeWindow, self).__init__(parent)
self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
self.view.setModel(self.model)
for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)):
self.view.setColumnWidth(c, w)
self.find_bar = FindBar(self, self)
self.vbox = VBox(self.view, self.find_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree")
# Child data item finder
class ChildDataItemFinder():
def __init__(self, root):
self.root = root
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (None,) * 5
self.rows = []
self.pos = 0
def FindSelect(self):
self.rows = []
if self.pattern:
pattern = re.compile(self.value)
for child in self.root.child_items:
for column_data in child.data:
if re.search(pattern, str(column_data)) is not None:
self.rows.append(child.row)
break
else:
for child in self.root.child_items:
for column_data in child.data:
if self.value in str(column_data):
self.rows.append(child.row)
break
def FindValue(self):
self.pos = 0
if self.last_value != self.value or self.pattern != self.last_pattern:
self.FindSelect()
if not len(self.rows):
return -1
return self.rows[self.pos]
def FindThread(self):
if self.direction == 0 or self.value != self.last_value or self.pattern != self.last_pattern:
row = self.FindValue()
elif len(self.rows):
if self.direction > 0:
self.pos += 1
if self.pos >= len(self.rows):
self.pos = 0
else:
self.pos -= 1
if self.pos < 0:
self.pos = len(self.rows) - 1
row = self.rows[self.pos]
else:
row = -1
return (True, row)
def Find(self, value, direction, pattern, context, callback):
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (value, direction,pattern, self.value, self.pattern)
# Use a thread so the UI is not blocked
thread = Thread(self.FindThread)
thread.done.connect(lambda row, t=thread, c=callback: self.FindDone(t, c, row), Qt.QueuedConnection)
thread.start()
def FindDone(self, thread, callback, row):
callback(row)
# Number of database records to fetch in one go
glb_chunk_sz = 10000
# Background process for SQL data fetcher
class SQLFetcherProcess():
def __init__(self, dbref, sql, buffer, head, tail, fetch_count, fetching_done, process_target, wait_event, fetched_event, prep):
# Need a unique connection name
conn_name = "SQLFetcher" + str(os.getpid())
self.db, dbname = dbref.Open(conn_name)
self.sql = sql
self.buffer = buffer
self.head = head
self.tail = tail
self.fetch_count = fetch_count
self.fetching_done = fetching_done
self.process_target = process_target
self.wait_event = wait_event
self.fetched_event = fetched_event
self.prep = prep
self.query = QSqlQuery(self.db)
self.query_limit = 0 if "$$last_id$$" in sql else 2
self.last_id = -1
self.fetched = 0
self.more = True
self.local_head = self.head.value
self.local_tail = self.tail.value
def Select(self):
if self.query_limit:
if self.query_limit == 1:
return
self.query_limit -= 1
stmt = self.sql.replace("$$last_id$$", str(self.last_id))
QueryExec(self.query, stmt)
def Next(self):
if not self.query.next():
self.Select()
if not self.query.next():
return None
self.last_id = self.query.value(0)
return self.prep(self.query)
def WaitForTarget(self):
while True:
self.wait_event.clear()
target = self.process_target.value
if target > self.fetched or target < 0:
break
self.wait_event.wait()
return target
def HasSpace(self, sz):
if self.local_tail <= self.local_head:
space = len(self.buffer) - self.local_head
if space > sz:
return True
if space >= glb_nsz:
# Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
self.buffer[self.local_head : self.local_head + len(nd)] = nd
self.local_head = 0
if self.local_tail - self.local_head > sz:
return True
return False
def WaitForSpace(self, sz):
if self.HasSpace(sz):
return
while True:
self.wait_event.clear()
self.local_tail = self.tail.value
if self.HasSpace(sz):
return
self.wait_event.wait()
def AddToBuffer(self, obj):
d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
n = len(d)
nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
sz = n + glb_nsz
self.WaitForSpace(sz)
pos = self.local_head
self.buffer[pos : pos + len(nd)] = nd
self.buffer[pos + glb_nsz : pos + sz] = d
self.local_head += sz
def FetchBatch(self, batch_size):
fetched = 0
while batch_size > fetched:
obj = self.Next()
if obj is None:
self.more = False
break
self.AddToBuffer(obj)
fetched += 1
if fetched:
self.fetched += fetched
with self.fetch_count.get_lock():
self.fetch_count.value += fetched
self.head.value = self.local_head
self.fetched_event.set()
def Run(self):
while self.more:
target = self.WaitForTarget()
if target < 0:
break
batch_size = min(glb_chunk_sz, target - self.fetched)
self.FetchBatch(batch_size)
self.fetching_done.value = True
self.fetched_event.set()
def SQLFetcherFn(*x):
process = SQLFetcherProcess(*x)
process.Run()
# SQL data fetcher
class SQLFetcher(QObject):
done = Signal(object)
def __init__(self, glb, sql, prep, process_data, parent=None):
super(SQLFetcher, self).__init__(parent)
self.process_data = process_data
self.more = True
self.target = 0
self.last_target = 0
self.fetched = 0
self.buffer_size = 16 * 1024 * 1024
self.buffer = Array(c_char, self.buffer_size, lock=False)
self.head = Value(c_longlong)
self.tail = Value(c_longlong)
self.local_tail = 0
self.fetch_count = Value(c_longlong)
self.fetching_done = Value(c_bool)
self.last_count = 0
self.process_target = Value(c_longlong)
self.wait_event = Event()
self.fetched_event = Event()
glb.AddInstanceToShutdownOnExit(self)
self.process = Process(target=SQLFetcherFn, args=(glb.dbref, sql, self.buffer, self.head, self.tail, self.fetch_count, self.fetching_done, self.process_target, self.wait_event, self.fetched_event, prep))
self.process.start()
self.thread = Thread(self.Thread)
self.thread.done.connect(self.ProcessData, Qt.QueuedConnection)
self.thread.start()
def Shutdown(self):
# Tell the thread and process to exit
self.process_target.value = -1
self.wait_event.set()
self.more = False
self.fetching_done.value = True
self.fetched_event.set()
def Thread(self):
if not self.more:
return True, 0
while True:
self.fetched_event.clear()
fetch_count = self.fetch_count.value
if fetch_count != self.last_count:
break
if self.fetching_done.value:
self.more = False
return True, 0
self.fetched_event.wait()
count = fetch_count - self.last_count
self.last_count = fetch_count
self.fetched += count
return False, count
def Fetch(self, nr):
if not self.more:
# -1 inidcates there are no more
return -1
result = self.fetched
extra = result + nr - self.target
if extra > 0:
self.target += extra
# process_target < 0 indicates shutting down
if self.process_target.value >= 0:
self.process_target.value = self.target
self.wait_event.set()
return result
def RemoveFromBuffer(self):
pos = self.local_tail
if len(self.buffer) - pos < glb_nsz:
pos = 0
n = pickle.loads(self.buffer[pos : pos + glb_nsz])
if n == 0:
pos = 0
n = pickle.loads(self.buffer[0 : glb_nsz])
pos += glb_nsz
obj = pickle.loads(self.buffer[pos : pos + n])
self.local_tail = pos + n
return obj
def ProcessData(self, count):
for i in xrange(count):
obj = self.RemoveFromBuffer()
self.process_data(obj)
self.tail.value = self.local_tail
self.wait_event.set()
self.done.emit(count)
# Fetch more records bar
class FetchMoreRecordsBar():
def __init__(self, model, parent):
self.model = model
self.label = QLabel("Number of records (x " + "{:,}".format(glb_chunk_sz) + ") to fetch:")
self.label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch_count = QSpinBox()
self.fetch_count.setRange(1, 1000000)
self.fetch_count.setValue(10)
self.fetch_count.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch = QPushButton("Go!")
self.fetch.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch.released.connect(self.FetchMoreRecords)
self.progress = QProgressBar()
self.progress.setRange(0, 100)
self.progress.hide()
self.done_label = QLabel("All records fetched")
self.done_label.hide()
self.spacer = QLabel("")
self.close_button = QToolButton()
self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
self.close_button.released.connect(self.Deactivate)
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addWidget(self.label)
self.hbox.addWidget(self.fetch_count)
self.hbox.addWidget(self.fetch)
self.hbox.addWidget(self.spacer)
self.hbox.addWidget(self.progress)
self.hbox.addWidget(self.done_label)
self.hbox.addWidget(self.close_button)
self.bar = QWidget()
self.bar.setLayout(self.hbox);
self.bar.show()
self.in_progress = False
self.model.progress.connect(self.Progress)
self.done = False
if not model.HasMoreRecords():
self.Done()
def Widget(self):
return self.bar
def Activate(self):
self.bar.show()
self.fetch.setFocus()
def Deactivate(self):
self.bar.hide()
def Enable(self, enable):
self.fetch.setEnabled(enable)
self.fetch_count.setEnabled(enable)
def Busy(self):
self.Enable(False)
self.fetch.hide()
self.spacer.hide()
self.progress.show()
def Idle(self):
self.in_progress = False
self.Enable(True)
self.progress.hide()
self.fetch.show()
self.spacer.show()
def Target(self):
return self.fetch_count.value() * glb_chunk_sz
def Done(self):
self.done = True
self.Idle()
self.label.hide()
self.fetch_count.hide()
self.fetch.hide()
self.spacer.hide()
self.done_label.show()
def Progress(self, count):
if self.in_progress:
if count:
percent = ((count - self.start) * 100) / self.Target()
if percent >= 100:
self.Idle()
else:
self.progress.setValue(percent)
if not count:
# Count value of zero means no more records
self.Done()
def FetchMoreRecords(self):
if self.done:
return
self.progress.setValue(0)
self.Busy()
self.in_progress = True
self.start = self.model.FetchMoreRecords(self.Target())
# Brance data model level two item
class BranchLevelTwoItem():
def __init__(self, row, text, parent_item):
self.row = row
self.parent_item = parent_item
self.data = [""] * 8
self.data[7] = text
self.level = 2
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def childCount(self):
return 0
def hasChildren(self):
return False
def getData(self, column):
return self.data[column]
# Brance data model level one item
class BranchLevelOneItem():
def __init__(self, glb, row, data, parent_item):
self.glb = glb
self.row = row
self.parent_item = parent_item
self.child_count = 0
self.child_items = []
self.data = data[1:]
self.dbid = data[0]
self.level = 1
self.query_done = False
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def Select(self):
self.query_done = True
if not self.glb.have_disassembler:
return
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT cpu, to_dso_id, to_symbol_id, to_sym_offset, short_name, long_name, build_id, sym_start, to_ip"
" FROM samples"
" INNER JOIN dsos ON samples.to_dso_id = dsos.id"
" INNER JOIN symbols ON samples.to_symbol_id = symbols.id"
" WHERE samples.id = " + str(self.dbid))
if not query.next():
return
cpu = query.value(0)
dso = query.value(1)
sym = query.value(2)
if dso == 0 or sym == 0:
return
off = query.value(3)
short_name = query.value(4)
long_name = query.value(5)
build_id = query.value(6)
sym_start = query.value(7)
ip = query.value(8)
QueryExec(query, "SELECT samples.dso_id, symbol_id, sym_offset, sym_start"
" FROM samples"
" INNER JOIN symbols ON samples.symbol_id = symbols.id"
" WHERE samples.id > " + str(self.dbid) + " AND cpu = " + str(cpu) +
" ORDER BY samples.id"
" LIMIT 1")
if not query.next():
return
if query.value(0) != dso:
# Cannot disassemble from one dso to another
return
bsym = query.value(1)
boff = query.value(2)
bsym_start = query.value(3)
if bsym == 0:
return
tot = bsym_start + boff + 1 - sym_start - off
if tot <= 0 or tot > 16384:
return
inst = self.glb.disassembler.Instruction()
f = self.glb.FileFromNamesAndBuildId(short_name, long_name, build_id)
if not f:
return
mode = 0 if Is64Bit(f) else 1
self.glb.disassembler.SetMode(inst, mode)
buf_sz = tot + 16
buf = create_string_buffer(tot + 16)
f.seek(sym_start + off)
buf.value = f.read(buf_sz)
buf_ptr = addressof(buf)
i = 0
while tot > 0:
cnt, text = self.glb.disassembler.DisassembleOne(inst, buf_ptr, buf_sz, ip)
if cnt:
byte_str = tohex(ip).rjust(16)
for k in xrange(cnt):
byte_str += " %02x" % ord(buf[i])
i += 1
while k < 15:
byte_str += " "
k += 1
self.child_items.append(BranchLevelTwoItem(0, byte_str + " " + text, self))
self.child_count += 1
else:
return
buf_ptr += cnt
tot -= cnt
buf_sz -= cnt
ip += cnt
def childCount(self):
if not self.query_done:
self.Select()
if not self.child_count:
return -1
return self.child_count
def hasChildren(self):
if not self.query_done:
return True
return self.child_count > 0
def getData(self, column):
return self.data[column]
# Brance data model root item
class BranchRootItem():
def __init__(self):
self.child_count = 0
self.child_items = []
self.level = 0
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return None
def getRow(self):
return 0
def childCount(self):
return self.child_count
def hasChildren(self):
return self.child_count > 0
def getData(self, column):
return ""
# Branch data preparation
def BranchDataPrep(query):
data = []
for i in xrange(0, 8):
data.append(query.value(i))
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
" (" + dsoname(query.value(11)) + ")" + " -> " +
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
" (" + dsoname(query.value(15)) + ")")
return data
def BranchDataPrepWA(query):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, 8):
data.append(query.value(i))
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
" (" + dsoname(query.value(11)) + ")" + " -> " +
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
" (" + dsoname(query.value(15)) + ")")
return data
# Branch data model
class BranchModel(TreeModel):
progress = Signal(object)
def __init__(self, glb, event_id, where_clause, parent=None):
super(BranchModel, self).__init__(glb, parent)
self.event_id = event_id
self.more = True
self.populated = 0
sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name,"
" CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END,"
" ip, symbols.name, sym_offset, dsos.short_name,"
" to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name"
" FROM samples"
" INNER JOIN comms ON comm_id = comms.id"
" INNER JOIN threads ON thread_id = threads.id"
" INNER JOIN branch_types ON branch_type = branch_types.id"
" INNER JOIN symbols ON symbol_id = symbols.id"
" INNER JOIN symbols to_symbols ON to_symbol_id = to_symbols.id"
" INNER JOIN dsos ON samples.dso_id = dsos.id"
" INNER JOIN dsos AS to_dsos ON samples.to_dso_id = to_dsos.id"
" WHERE samples.id > $$last_id$$" + where_clause +
" AND evsel_id = " + str(self.event_id) +
" ORDER BY samples.id"
" LIMIT " + str(glb_chunk_sz))
if pyside_version_1 and sys.version_info[0] == 3:
prep = BranchDataPrepWA
else:
prep = BranchDataPrep
self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
def GetRoot(self):
return BranchRootItem()
def columnCount(self, parent=None):
return 8
def columnHeader(self, column):
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column]
def columnFont(self, column):
if column != 7:
return None
return QFont("Monospace")
def DisplayData(self, item, index):
if item.level == 1:
self.FetchIfNeeded(item.row)
return item.getData(index.column())
def AddSample(self, data):
child = BranchLevelOneItem(self.glb, self.populated, data, self.root)
self.root.child_items.append(child)
self.populated += 1
def Update(self, fetched):
if not fetched:
self.more = False
self.progress.emit(0)
child_count = self.root.child_count
count = self.populated - child_count
if count > 0:
parent = QModelIndex()
self.beginInsertRows(parent, child_count, child_count + count - 1)
self.insertRows(child_count, count, parent)
self.root.child_count += count
self.endInsertRows()
self.progress.emit(self.root.child_count)
def FetchMoreRecords(self, count):
current = self.root.child_count
if self.more:
self.fetcher.Fetch(count)
else:
self.progress.emit(0)
return current
def HasMoreRecords(self):
return self.more
# Report Variables
class ReportVars():
def __init__(self, name = "", where_clause = "", limit = ""):
self.name = name
self.where_clause = where_clause
self.limit = limit
def UniqueId(self):
return str(self.where_clause + ";" + self.limit)
# Branch window
class BranchWindow(QMdiSubWindow):
def __init__(self, glb, event_id, report_vars, parent=None):
super(BranchWindow, self).__init__(parent)
model_name = "Branch Events " + str(event_id) + " " + report_vars.UniqueId()
self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, report_vars.where_clause))
self.view = QTreeView()
self.view.setUniformRowHeights(True)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
self.view.setModel(self.model)
self.ResizeColumnsToContents()
self.context_menu = TreeContextMenu(self.view)
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.model.root)
self.fetch_bar = FetchMoreRecordsBar(self.model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name + " Branch Events")
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
# so implement a crude alternative
mm = "MM" if column else "MMMM"
font = self.view.font()
metrics = QFontMetrics(font)
max = 0
for row in xrange(n):
val = self.model.root.child_items[row].data[column]
len = metrics.width(str(val) + mm)
max = len if len > max else max
val = self.model.columnHeader(column)
len = metrics.width(str(val) + mm)
max = len if len > max else max
self.view.setColumnWidth(column, max)
def ResizeColumnsToContents(self):
n = min(self.model.root.child_count, 100)
if n < 1:
# No data yet, so connect a signal to notify when there is
self.model.rowsInserted.connect(self.UpdateColumnWidths)
return
columns = self.model.columnCount()
for i in xrange(columns):
self.ResizeColumnToContents(i, n)
def UpdateColumnWidths(self, *x):
# This only needs to be done once, so disconnect the signal now
self.model.rowsInserted.disconnect(self.UpdateColumnWidths)
self.ResizeColumnsToContents()
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
else:
self.find_bar.NotFound()
# Line edit data item
class LineEditDataItem(object):
def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
self.glb = glb
self.label = label
self.placeholder_text = placeholder_text
self.parent = parent
self.id = id
self.value = default
self.widget = QLineEdit(default)
self.widget.editingFinished.connect(self.Validate)
self.widget.textChanged.connect(self.Invalidate)
self.red = False
self.error = ""
self.validated = True
if placeholder_text:
self.widget.setPlaceholderText(placeholder_text)
def TurnTextRed(self):
if not self.red:
palette = QPalette()
palette.setColor(QPalette.Text,Qt.red)
self.widget.setPalette(palette)
self.red = True
def TurnTextNormal(self):
if self.red:
palette = QPalette()
self.widget.setPalette(palette)
self.red = False
def InvalidValue(self, value):
self.value = ""
self.TurnTextRed()
self.error = self.label + " invalid value '" + value + "'"
self.parent.ShowMessage(self.error)
def Invalidate(self):
self.validated = False
def DoValidate(self, input_string):
self.value = input_string.strip()
def Validate(self):
self.validated = True
self.error = ""
self.TurnTextNormal()
self.parent.ClearMessage()
input_string = self.widget.text()
if not len(input_string.strip()):
self.value = ""
return
self.DoValidate(input_string)
def IsValid(self):
if not self.validated:
self.Validate()
if len(self.error):
self.parent.ShowMessage(self.error)
return False
return True
def IsNumber(self, value):
try:
x = int(value)
except:
x = 0
return str(x) == value
# Non-negative integer ranges dialog data item
class NonNegativeIntegerRangesDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, column_name, parent):
super(NonNegativeIntegerRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
self.column_name = column_name
def DoValidate(self, input_string):
singles = []
ranges = []
for value in [x.strip() for x in input_string.split(",")]:
if "-" in value:
vrange = value.split("-")
if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return self.InvalidValue(value)
ranges.append(vrange)
else:
if not self.IsNumber(value):
return self.InvalidValue(value)
singles.append(value)
ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
if len(singles):
ranges.append(self.column_name + " IN (" + ",".join(singles) + ")")
self.value = " OR ".join(ranges)
# Positive integer dialog data item
class PositiveIntegerDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
super(PositiveIntegerDataItem, self).__init__(glb, label, placeholder_text, parent, id, default)
def DoValidate(self, input_string):
if not self.IsNumber(input_string.strip()):
return self.InvalidValue(input_string)
value = int(input_string.strip())
if value <= 0:
return self.InvalidValue(input_string)
self.value = str(value)
# Dialog data item converted and validated using a SQL table
class SQLTableDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
super(SQLTableDataItem, self).__init__(glb, label, placeholder_text, parent)
self.table_name = table_name
self.match_column = match_column
self.column_name1 = column_name1
self.column_name2 = column_name2
def ValueToIds(self, value):
ids = []
query = QSqlQuery(self.glb.db)
stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'"
ret = query.exec_(stmt)
if ret:
while query.next():
ids.append(str(query.value(0)))
return ids
def DoValidate(self, input_string):
all_ids = []
for value in [x.strip() for x in input_string.split(",")]:
ids = self.ValueToIds(value)
if len(ids):
all_ids.extend(ids)
else:
return self.InvalidValue(value)
self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
if self.column_name2:
self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
# Sample time ranges dialog data item converted and validated using 'samples' SQL table
class SampleTimeRangesDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, column_name, parent):
self.column_name = column_name
self.last_id = 0
self.first_time = 0
self.last_time = 2 ** 64
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
if query.next():
self.last_id = int(query.value(0))
self.last_time = int(query.value(1))
QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
if query.next():
self.first_time = int(query.value(0))
if placeholder_text:
placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
super(SampleTimeRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
def IdBetween(self, query, lower_id, higher_id, order):
QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
if query.next():
return True, int(query.value(0))
else:
return False, 0
def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor):
query = QSqlQuery(self.glb.db)
while True:
next_id = int((lower_id + higher_id) / 2)
QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
if not query.next():
ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC")
if not ok:
ok, dbid = self.IdBetween(query, next_id, higher_id, "")
if not ok:
return str(higher_id)
next_id = dbid
QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
next_time = int(query.value(0))
if get_floor:
if target_time > next_time:
lower_id = next_id
else:
higher_id = next_id
if higher_id <= lower_id + 1:
return str(higher_id)
else:
if target_time >= next_time:
lower_id = next_id
else:
higher_id = next_id
if higher_id <= lower_id + 1:
return str(lower_id)
def ConvertRelativeTime(self, val):
mult = 1
suffix = val[-2:]
if suffix == "ms":
mult = 1000000
elif suffix == "us":
mult = 1000
elif suffix == "ns":
mult = 1
else:
return val
val = val[:-2].strip()
if not self.IsNumber(val):
return val
val = int(val) * mult
if val >= 0:
val += self.first_time
else:
val += self.last_time
return str(val)
def ConvertTimeRange(self, vrange):
if vrange[0] == "":
vrange[0] = str(self.first_time)
if vrange[1] == "":
vrange[1] = str(self.last_time)
vrange[0] = self.ConvertRelativeTime(vrange[0])
vrange[1] = self.ConvertRelativeTime(vrange[1])
if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return False
beg_range = max(int(vrange[0]), self.first_time)
end_range = min(int(vrange[1]), self.last_time)
if beg_range > self.last_time or end_range < self.first_time:
return False
vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
return True
def AddTimeRange(self, value, ranges):
n = value.count("-")
if n == 1:
pass
elif n == 2:
if value.split("-")[1].strip() == "":
n = 1
elif n == 3:
n = 2
else:
return False
pos = findnth(value, "-", n)
vrange = [value[:pos].strip() ,value[pos+1:].strip()]
if self.ConvertTimeRange(vrange):
ranges.append(vrange)
return True
return False
def DoValidate(self, input_string):
ranges = []
for value in [x.strip() for x in input_string.split(",")]:
if not self.AddTimeRange(value, ranges):
return self.InvalidValue(value)
ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
self.value = " OR ".join(ranges)
# Report Dialog Base
class ReportDialogBase(QDialog):
def __init__(self, glb, title, items, partial, parent=None):
super(ReportDialogBase, self).__init__(parent)
self.glb = glb
self.report_vars = ReportVars()
self.setWindowTitle(title)
self.setMinimumWidth(600)
self.data_items = [x(glb, self) for x in items]
self.partial = partial
self.grid = QGridLayout()
for row in xrange(len(self.data_items)):
self.grid.addWidget(QLabel(self.data_items[row].label), row, 0)
self.grid.addWidget(self.data_items[row].widget, row, 1)
self.status = QLabel()
self.ok_button = QPushButton("Ok", self)
self.ok_button.setDefault(True)
self.ok_button.released.connect(self.Ok)
self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.cancel_button = QPushButton("Cancel", self)
self.cancel_button.released.connect(self.reject)
self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.hbox = QHBoxLayout()
#self.hbox.addStretch()
self.hbox.addWidget(self.status)
self.hbox.addWidget(self.ok_button)
self.hbox.addWidget(self.cancel_button)
self.vbox = QVBoxLayout()
self.vbox.addLayout(self.grid)
self.vbox.addLayout(self.hbox)
self.setLayout(self.vbox);
def Ok(self):
vars = self.report_vars
for d in self.data_items:
if d.id == "REPORTNAME":
vars.name = d.value
if not vars.name:
self.ShowMessage("Report name is required")
return
for d in self.data_items:
if not d.IsValid():
return
for d in self.data_items[1:]:
if d.id == "LIMIT":
vars.limit = d.value
elif len(d.value):
if len(vars.where_clause):
vars.where_clause += " AND "
vars.where_clause += d.value
if len(vars.where_clause):
if self.partial:
vars.where_clause = " AND ( " + vars.where_clause + " ) "
else:
vars.where_clause = " WHERE " + vars.where_clause + " "
self.accept()
def ShowMessage(self, msg):
self.status.setText("<font color=#FF0000>" + msg)
def ClearMessage(self):
self.status.setText("")
# Selected branch report creation dialog
class SelectedBranchDialog(ReportDialogBase):
def __init__(self, glb, parent=None):
title = "Selected Branches"
items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
lambda g, p: SampleTimeRangesDataItem(g, "Time ranges:", "Enter time ranges", "samples.id", p),
lambda g, p: NonNegativeIntegerRangesDataItem(g, "CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "cpu", p),
lambda g, p: SQLTableDataItem(g, "Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", "", p),
lambda g, p: SQLTableDataItem(g, "PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id", p),
lambda g, p: SQLTableDataItem(g, "Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id", p),
lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p))
super(SelectedBranchDialog, self).__init__(glb, title, items, True, parent)
# Event list
def GetEventList(db):
events = []
query = QSqlQuery(db)
QueryExec(query, "SELECT name FROM selected_events WHERE id > 0 ORDER BY id")
while query.next():
events.append(query.value(0))
return events
# Is a table selectable
def IsSelectable(db, table, sql = ""):
query = QSqlQuery(db)
try:
QueryExec(query, "SELECT * FROM " + table + " " + sql + " LIMIT 1")
except:
return False
return True
# SQL table data model item
class SQLTableItem():
def __init__(self, row, data):
self.row = row
self.data = data
def getData(self, column):
return self.data[column]
# SQL table data model
class SQLTableModel(TableModel):
progress = Signal(object)
def __init__(self, glb, sql, column_headers, parent=None):
super(SQLTableModel, self).__init__(parent)
self.glb = glb
self.more = True
self.populated = 0
self.column_headers = column_headers
self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
def DisplayData(self, item, index):
self.FetchIfNeeded(item.row)
return item.getData(index.column())
def AddSample(self, data):
child = SQLTableItem(self.populated, data)
self.child_items.append(child)
self.populated += 1
def Update(self, fetched):
if not fetched:
self.more = False
self.progress.emit(0)
child_count = self.child_count
count = self.populated - child_count
if count > 0:
parent = QModelIndex()
self.beginInsertRows(parent, child_count, child_count + count - 1)
self.insertRows(child_count, count, parent)
self.child_count += count
self.endInsertRows()
self.progress.emit(self.child_count)
def FetchMoreRecords(self, count):
current = self.child_count
if self.more:
self.fetcher.Fetch(count)
else:
self.progress.emit(0)
return current
def HasMoreRecords(self):
return self.more
def columnCount(self, parent=None):
return len(self.column_headers)
def columnHeader(self, column):
return self.column_headers[column]
def SQLTableDataPrep(self, query, count):
data = []
for i in xrange(count):
data.append(query.value(i))
return data
# SQL automatic table data model
class SQLAutoTableModel(SQLTableModel):
def __init__(self, glb, table_name, parent=None):
sql = "SELECT * FROM " + table_name + " WHERE id > $$last_id$$ ORDER BY id LIMIT " + str(glb_chunk_sz)
if table_name == "comm_threads_view":
# For now, comm_threads_view has no id column
sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz)
column_headers = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "PRAGMA table_info(" + table_name + ")")
while query.next():
column_headers.append(query.value(1))
if table_name == "sqlite_master":
sql = "SELECT * FROM " + table_name
else:
if table_name[:19] == "information_schema.":
sql = "SELECT * FROM " + table_name
select_table_name = table_name[19:]
schema = "information_schema"
else:
select_table_name = table_name
schema = "public"
QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
while query.next():
column_headers.append(query.value(0))
if pyside_version_1 and sys.version_info[0] == 3:
if table_name == "samples_view":
self.SQLTableDataPrep = self.samples_view_DataPrep
if table_name == "samples":
self.SQLTableDataPrep = self.samples_DataPrep
super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
def samples_view_DataPrep(self, query, count):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, count):
data.append(query.value(i))
return data
def samples_DataPrep(self, query, count):
data = []
for i in xrange(9):
data.append(query.value(i))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(9)))
for i in xrange(10, count):
data.append(query.value(i))
return data
# Base class for custom ResizeColumnsToContents
class ResizeColumnsToContentsBase(QObject):
def __init__(self, parent=None):
super(ResizeColumnsToContentsBase, self).__init__(parent)
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
# so implement a crude alternative
font = self.view.font()
metrics = QFontMetrics(font)
max = 0
for row in xrange(n):
val = self.data_model.child_items[row].data[column]
len = metrics.width(str(val) + "MM")
max = len if len > max else max
val = self.data_model.columnHeader(column)
len = metrics.width(str(val) + "MM")
max = len if len > max else max
self.view.setColumnWidth(column, max)
def ResizeColumnsToContents(self):
n = min(self.data_model.child_count, 100)
if n < 1:
# No data yet, so connect a signal to notify when there is
self.data_model.rowsInserted.connect(self.UpdateColumnWidths)
return
columns = self.data_model.columnCount()
for i in xrange(columns):
self.ResizeColumnToContents(i, n)
def UpdateColumnWidths(self, *x):
# This only needs to be done once, so disconnect the signal now
self.data_model.rowsInserted.disconnect(self.UpdateColumnWidths)
self.ResizeColumnsToContents()
# Convert value to CSV
def ToCSValue(val):
if '"' in val:
val = val.replace('"', '""')
if "," in val or '"' in val:
val = '"' + val + '"'
return val
# Key to sort table model indexes by row / column, assuming fewer than 1000 columns
glb_max_cols = 1000
def RowColumnKey(a):
return a.row() * glb_max_cols + a.column()
# Copy selected table cells to clipboard
def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False):
indexes = sorted(view.selectedIndexes(), key=RowColumnKey)
idx_cnt = len(indexes)
if not idx_cnt:
return
if idx_cnt == 1:
with_hdr=False
min_row = indexes[0].row()
max_row = indexes[0].row()
min_col = indexes[0].column()
max_col = indexes[0].column()
for i in indexes:
min_row = min(min_row, i.row())
max_row = max(max_row, i.row())
min_col = min(min_col, i.column())
max_col = max(max_col, i.column())
if max_col > glb_max_cols:
raise RuntimeError("glb_max_cols is too low")
max_width = [0] * (1 + max_col - min_col)
for i in indexes:
c = i.column() - min_col
max_width[c] = max(max_width[c], len(str(i.data())))
text = ""
pad = ""
sep = ""
if with_hdr:
model = indexes[0].model()
for col in range(min_col, max_col + 1):
val = model.headerData(col, Qt.Horizontal)
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
c = col - min_col
max_width[c] = max(max_width[c], len(val))
width = max_width[c]
align = model.headerData(col, Qt.Horizontal, Qt.TextAlignmentRole)
if align & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
text += "\n"
pad = ""
sep = ""
last_row = min_row
for i in indexes:
if i.row() > last_row:
last_row = i.row()
text += "\n"
pad = ""
sep = ""
if as_csv:
text += sep + ToCSValue(str(i.data()))
sep = ","
else:
width = max_width[i.column() - min_col]
if i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
val = str(i.data()).rjust(width)
else:
val = str(i.data())
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
QApplication.clipboard().setText(text)
def CopyTreeCellsToClipboard(view, as_csv=False, with_hdr=False):
indexes = view.selectedIndexes()
if not len(indexes):
return
selection = view.selectionModel()
first = None
for i in indexes:
above = view.indexAbove(i)
if not selection.isSelected(above):
first = i
break
if first is None:
raise RuntimeError("CopyTreeCellsToClipboard internal error")
model = first.model()
row_cnt = 0
col_cnt = model.columnCount(first)
max_width = [0] * col_cnt
indent_sz = 2
indent_str = " " * indent_sz
expanded_mark_sz = 2
if sys.version_info[0] == 3:
expanded_mark = "\u25BC "
not_expanded_mark = "\u25B6 "
else:
expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xBC) + " ", "utf-8")
not_expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xB6) + " ", "utf-8")
leaf_mark = " "
if not as_csv:
pos = first
while True:
row_cnt += 1
row = pos.row()
for c in range(col_cnt):
i = pos.sibling(row, c)
if c:
n = len(str(i.data()))
else:
n = len(str(i.data()).strip())
n += (i.internalPointer().level - 1) * indent_sz
n += expanded_mark_sz
max_width[c] = max(max_width[c], n)
pos = view.indexBelow(pos)
if not selection.isSelected(pos):
break
text = ""
pad = ""
sep = ""
if with_hdr:
for c in range(col_cnt):
val = model.headerData(c, Qt.Horizontal, Qt.DisplayRole).strip()
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
max_width[c] = max(max_width[c], len(val))
width = max_width[c]
align = model.headerData(c, Qt.Horizontal, Qt.TextAlignmentRole)
if align & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
text += "\n"
pad = ""
sep = ""
pos = first
while True:
row = pos.row()
for c in range(col_cnt):
i = pos.sibling(row, c)
val = str(i.data())
if not c:
if model.hasChildren(i):
if view.isExpanded(i):
mark = expanded_mark
else:
mark = not_expanded_mark
else:
mark = leaf_mark
val = indent_str * (i.internalPointer().level - 1) + mark + val.strip()
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
width = max_width[c]
if c and i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
pos = view.indexBelow(pos)
if not selection.isSelected(pos):
break
text = text.rstrip() + "\n"
pad = ""
sep = ""
QApplication.clipboard().setText(text)
def CopyCellsToClipboard(view, as_csv=False, with_hdr=False):
view.CopyCellsToClipboard(view, as_csv, with_hdr)
def CopyCellsToClipboardHdr(view):
CopyCellsToClipboard(view, False, True)
def CopyCellsToClipboardCSV(view):
CopyCellsToClipboard(view, True, True)
# Context menu
class ContextMenu(object):
def __init__(self, view):
self.view = view
self.view.setContextMenuPolicy(Qt.CustomContextMenu)
self.view.customContextMenuRequested.connect(self.ShowContextMenu)
def ShowContextMenu(self, pos):
menu = QMenu(self.view)
self.AddActions(menu)
menu.exec_(self.view.mapToGlobal(pos))
def AddCopy(self, menu):
menu.addAction(CreateAction("&Copy selection", "Copy to clipboard", lambda: CopyCellsToClipboardHdr(self.view), self.view))
menu.addAction(CreateAction("Copy selection as CS&V", "Copy to clipboard as CSV", lambda: CopyCellsToClipboardCSV(self.view), self.view))
def AddActions(self, menu):
self.AddCopy(menu)
class TreeContextMenu(ContextMenu):
def __init__(self, view):
super(TreeContextMenu, self).__init__(view)
def AddActions(self, menu):
i = self.view.currentIndex()
text = str(i.data()).strip()
if len(text):
menu.addAction(CreateAction('Copy "' + text + '"', "Copy to clipboard", lambda: QApplication.clipboard().setText(text), self.view))
self.AddCopy(menu)
# Table window
class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def __init__(self, glb, table_name, parent=None):
super(TableWindow, self).__init__(parent)
self.data_model = LookupCreateModel(table_name + " Table", lambda: SQLAutoTableModel(glb, table_name))
self.model = QSortFilterProxyModel()
self.model.setSourceModel(self.data_model)
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.view.sortByColumn(-1, Qt.AscendingOrder)
self.view.setSortingEnabled(True)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
self.ResizeColumnsToContents()
self.context_menu = ContextMenu(self.view)
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.data_model)
self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, table_name + " Table")
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex())))
else:
self.find_bar.NotFound()
# Table list
def GetTableList(glb):
tables = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "SELECT name FROM sqlite_master WHERE type IN ( 'table' , 'view' ) ORDER BY name")
else:
QueryExec(query, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type IN ( 'BASE TABLE' , 'VIEW' ) ORDER BY table_name")
while query.next():
tables.append(query.value(0))
if glb.dbref.is_sqlite3:
tables.append("sqlite_master")
else:
tables.append("information_schema.tables")
tables.append("information_schema.views")
tables.append("information_schema.columns")
return tables
# Top Calls data model
class TopCallsModel(SQLTableModel):
def __init__(self, glb, report_vars, parent=None):
text = ""
if not glb.dbref.is_sqlite3:
text = "::text"
limit = ""
if len(report_vars.limit):
limit = " LIMIT " + report_vars.limit
sql = ("SELECT comm, pid, tid, name,"
" CASE"
" WHEN (short_name = '[kernel.kallsyms]') THEN '[kernel]'" + text +
" ELSE short_name"
" END AS dso,"
" call_time, return_time, (return_time - call_time) AS elapsed_time, branch_count, "
" CASE"
" WHEN (calls.flags = 1) THEN 'no call'" + text +
" WHEN (calls.flags = 2) THEN 'no return'" + text +
" WHEN (calls.flags = 3) THEN 'no call/return'" + text +
" ELSE ''" + text +
" END AS flags"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" INNER JOIN comms ON calls.comm_id = comms.id"
" INNER JOIN threads ON calls.thread_id = threads.id" +
report_vars.where_clause +
" ORDER BY elapsed_time DESC" +
limit
)
column_headers = ("Command", "PID", "TID", "Symbol", "Object", "Call Time", "Return Time", "Elapsed Time (ns)", "Branch Count", "Flags")
self.alignment = (Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignLeft)
super(TopCallsModel, self).__init__(glb, sql, column_headers, parent)
def columnAlignment(self, column):
return self.alignment[column]
# Top Calls report creation dialog
class TopCallsDialog(ReportDialogBase):
def __init__(self, glb, parent=None):
title = "Top Calls by Elapsed Time"
items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
lambda g, p: SQLTableDataItem(g, "Commands:", "Only calls with these commands will be included", "comms", "comm", "comm_id", "", p),
lambda g, p: SQLTableDataItem(g, "PIDs:", "Only calls with these process IDs will be included", "threads", "pid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "TIDs:", "Only calls with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "DSOs:", "Only calls with these DSOs will be included", "dsos", "short_name", "dso_id", "", p),
lambda g, p: SQLTableDataItem(g, "Symbols:", "Only calls with these symbols will be included", "symbols", "name", "symbol_id", "", p),
lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p),
lambda g, p: PositiveIntegerDataItem(g, "Record limit:", "Limit selection to this number of records", p, "LIMIT", "100"))
super(TopCallsDialog, self).__init__(glb, title, items, False, parent)
# Top Calls window
class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def __init__(self, glb, report_vars, parent=None):
super(TopCallsWindow, self).__init__(parent)
self.data_model = LookupCreateModel("Top Calls " + report_vars.UniqueId(), lambda: TopCallsModel(glb, report_vars))
self.model = self.data_model
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
self.context_menu = ContextMenu(self.view)
self.ResizeColumnsToContents()
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.model)
self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name)
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
else:
self.find_bar.NotFound()
# Action Definition
def CreateAction(label, tip, callback, parent=None, shortcut=None):
action = QAction(label, parent)
if shortcut != None:
action.setShortcuts(shortcut)
action.setStatusTip(tip)
action.triggered.connect(callback)
return action
# Typical application actions
def CreateExitAction(app, parent=None):
return CreateAction("&Quit", "Exit the application", app.closeAllWindows, parent, QKeySequence.Quit)
# Typical MDI actions
def CreateCloseActiveWindowAction(mdi_area):
return CreateAction("Cl&ose", "Close the active window", mdi_area.closeActiveSubWindow, mdi_area)
def CreateCloseAllWindowsAction(mdi_area):
return CreateAction("Close &All", "Close all the windows", mdi_area.closeAllSubWindows, mdi_area)
def CreateTileWindowsAction(mdi_area):
return CreateAction("&Tile", "Tile the windows", mdi_area.tileSubWindows, mdi_area)
def CreateCascadeWindowsAction(mdi_area):
return CreateAction("&Cascade", "Cascade the windows", mdi_area.cascadeSubWindows, mdi_area)
def CreateNextWindowAction(mdi_area):
return CreateAction("Ne&xt", "Move the focus to the next window", mdi_area.activateNextSubWindow, mdi_area, QKeySequence.NextChild)
def CreatePreviousWindowAction(mdi_area):
return CreateAction("Pre&vious", "Move the focus to the previous window", mdi_area.activatePreviousSubWindow, mdi_area, QKeySequence.PreviousChild)
# Typical MDI window menu
class WindowMenu():
def __init__(self, mdi_area, menu):
self.mdi_area = mdi_area
self.window_menu = menu.addMenu("&Windows")
self.close_active_window = CreateCloseActiveWindowAction(mdi_area)
self.close_all_windows = CreateCloseAllWindowsAction(mdi_area)
self.tile_windows = CreateTileWindowsAction(mdi_area)
self.cascade_windows = CreateCascadeWindowsAction(mdi_area)
self.next_window = CreateNextWindowAction(mdi_area)
self.previous_window = CreatePreviousWindowAction(mdi_area)
self.window_menu.aboutToShow.connect(self.Update)
def Update(self):
self.window_menu.clear()
sub_window_count = len(self.mdi_area.subWindowList())
have_sub_windows = sub_window_count != 0
self.close_active_window.setEnabled(have_sub_windows)
self.close_all_windows.setEnabled(have_sub_windows)
self.tile_windows.setEnabled(have_sub_windows)
self.cascade_windows.setEnabled(have_sub_windows)
self.next_window.setEnabled(have_sub_windows)
self.previous_window.setEnabled(have_sub_windows)
self.window_menu.addAction(self.close_active_window)
self.window_menu.addAction(self.close_all_windows)
self.window_menu.addSeparator()
self.window_menu.addAction(self.tile_windows)
self.window_menu.addAction(self.cascade_windows)
self.window_menu.addSeparator()
self.window_menu.addAction(self.next_window)
self.window_menu.addAction(self.previous_window)
if sub_window_count == 0:
return
self.window_menu.addSeparator()
nr = 1
for sub_window in self.mdi_area.subWindowList():
label = str(nr) + " " + sub_window.name
if nr < 10:
label = "&" + label
action = self.window_menu.addAction(label)
action.setCheckable(True)
action.setChecked(sub_window == self.mdi_area.activeSubWindow())
action.triggered.connect(lambda x=nr: self.setActiveSubWindow(x))
self.window_menu.addAction(action)
nr += 1
def setActiveSubWindow(self, nr):
self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1])
# Help text
glb_help_text = """
<h1>Contents</h1>
<style>
p.c1 {
text-indent: 40px;
}
p.c2 {
text-indent: 80px;
}
}
</style>
<p class=c1><a href=#reports>1. Reports</a></p>
<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
<p class=c2><a href=#calltree>1.2 Call Tree</a></p>
<p class=c2><a href=#allbranches>1.3 All branches</a></p>
<p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p>
<p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p>
<p class=c1><a href=#tables>2. Tables</a></p>
<h1 id=reports>1. Reports</h1>
<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
The result is a GUI window with a tree representing a context-sensitive
call-graph. Expanding a couple of levels of the tree and adjusting column
widths to suit will display something like:
<pre>
Call Graph: pt_example
Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
v- ls
v- 2638:2638
v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
|- unknown unknown 1 13198 0.1 1 0.0
>- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
>- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
>- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
>- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
>- __libc_csu_init ls 1 10354 0.1 10 0.0
|- _setjmp libc-2.19.so 1 0 0.0 4 0.0
v- main ls 1 8182043 99.6 180254 99.9
</pre>
<h3>Points to note:</h3>
<ul>
<li>The top level is a command name (comm)</li>
<li>The next level is a thread (pid:tid)</li>
<li>Subsequent levels are functions</li>
<li>'Count' is the number of calls</li>
<li>'Time' is the elapsed time until the function returns</li>
<li>Percentages are relative to the level above</li>
<li>'Branch Count' is the total number of branches for that function and all functions that it calls
</ul>
<h3>Find</h3>
Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
The pattern matching symbols are ? for any character and * for zero or more characters.
<h2 id=calltree>1.2 Call Tree</h2>
The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated.
Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'.
<h2 id=allbranches>1.3 All branches</h2>
The All branches report displays all branches in chronological order.
Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
<h3>Disassembly</h3>
Open a branch to display disassembly. This only works if:
<ol>
<li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li>
<li>The object code is available. Currently, only the perf build ID cache is searched for object code.
The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR.
One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu),
or alternatively, set environment variable PERF_KCORE to the kcore file name.</li>
</ol>
<h4 id=xed>Intel XED Setup</h4>
To use Intel XED, libxed.so must be present. To build and install libxed.so:
<pre>
git clone https://github.com/intelxed/mbuild.git mbuild
git clone https://github.com/intelxed/xed
cd xed
./mfile.py --share
sudo ./mfile.py --prefix=/usr/local install
sudo ldconfig
</pre>
<h3>Find</h3>
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
<h2 id=selectedbranches>1.4 Selected branches</h2>
This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
by various selection criteria. A dialog box displays available criteria which are AND'ed together.
<h3>1.4.1 Time ranges</h3>
The time ranges hint text shows the total time range. Relative time ranges can also be entered in
ms, us or ns. Also, negative values are relative to the end of trace. Examples:
<pre>
81073085947329-81073085958238 From 81073085947329 to 81073085958238
100us-200us From 100us to 200us
10ms- From 10ms to the end
-100ns The first 100ns
-10ms- The last 10ms
</pre>
N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
<h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2>
The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
<h1 id=tables>2. Tables</h1>
The Tables menu shows all tables and views in the database. Most tables have an associated view
which displays the information in a more friendly way. Not all data for large tables is fetched
immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
but that can be slow for large tables.
<p>There are also tables of database meta-information.
For SQLite3 databases, the sqlite_master table is included.
For PostgreSQL databases, information_schema.tables/views/columns are included.
<h3>Find</h3>
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
<p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous
will go to the next/previous result in id order, instead of display order.
"""
# Help window
class HelpWindow(QMdiSubWindow):
def __init__(self, glb, parent=None):
super(HelpWindow, self).__init__(parent)
self.text = QTextBrowser()
self.text.setHtml(glb_help_text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.setWidget(self.text)
AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help")
# Main window that only displays the help text
class HelpOnlyWindow(QMainWindow):
def __init__(self, parent=None):
super(HelpOnlyWindow, self).__init__(parent)
self.setMinimumSize(200, 100)
self.resize(800, 600)
self.setWindowTitle("Exported SQL Viewer Help")
self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation))
self.text = QTextBrowser()
self.text.setHtml(glb_help_text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.setCentralWidget(self.text)
# PostqreSQL server version
def PostqreSQLServerVersion(db):
query = QSqlQuery(db)
QueryExec(query, "SELECT VERSION()")
if query.next():
v_str = query.value(0)
v_list = v_str.strip().split(" ")
if v_list[0] == "PostgreSQL" and v_list[2] == "on":
return v_list[1]
return v_str
return "Unknown"
# SQLite version
def SQLiteVersion(db):
query = QSqlQuery(db)
QueryExec(query, "SELECT sqlite_version()")
if query.next():
return query.value(0)
return "Unknown"
# About dialog
class AboutDialog(QDialog):
def __init__(self, glb, parent=None):
super(AboutDialog, self).__init__(parent)
self.setWindowTitle("About Exported SQL Viewer")
self.setMinimumWidth(300)
pyside_version = "1" if pyside_version_1 else "2"
text = "<pre>"
text += "Python version: " + sys.version.split(" ")[0] + "\n"
text += "PySide version: " + pyside_version + "\n"
text += "Qt version: " + qVersion() + "\n"
if glb.dbref.is_sqlite3:
text += "SQLite version: " + SQLiteVersion(glb.db) + "\n"
else:
text += "PostqreSQL version: " + PostqreSQLServerVersion(glb.db) + "\n"
text += "</pre>"
self.text = QTextBrowser()
self.text.setHtml(text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.text)
self.setLayout(self.vbox);
# Font resize
def ResizeFont(widget, diff):
font = widget.font()
sz = font.pointSize()
font.setPointSize(sz + diff)
widget.setFont(font)
def ShrinkFont(widget):
ResizeFont(widget, -1)
def EnlargeFont(widget):
ResizeFont(widget, 1)
# Unique name for sub-windows
def NumberedWindowName(name, nr):
if nr > 1:
name += " <" + str(nr) + ">"
return name
def UniqueSubWindowName(mdi_area, name):
nr = 1
while True:
unique_name = NumberedWindowName(name, nr)
ok = True
for sub_window in mdi_area.subWindowList():
if sub_window.name == unique_name:
ok = False
break
if ok:
return unique_name
nr += 1
# Add a sub-window
def AddSubWindow(mdi_area, sub_window, name):
unique_name = UniqueSubWindowName(mdi_area, name)
sub_window.setMinimumSize(200, 100)
sub_window.resize(800, 600)
sub_window.setWindowTitle(unique_name)
sub_window.setAttribute(Qt.WA_DeleteOnClose)
sub_window.setWindowIcon(sub_window.style().standardIcon(QStyle.SP_FileIcon))
sub_window.name = unique_name
mdi_area.addSubWindow(sub_window)
sub_window.show()
# Main window
class MainWindow(QMainWindow):
def __init__(self, glb, parent=None):
super(MainWindow, self).__init__(parent)
self.glb = glb
self.setWindowTitle("Exported SQL Viewer: " + glb.dbname)
self.setWindowIcon(self.style().standardIcon(QStyle.SP_ComputerIcon))
self.setMinimumSize(200, 100)
self.mdi_area = QMdiArea()
self.mdi_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.mdi_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setCentralWidget(self.mdi_area)
menu = self.menuBar()
file_menu = menu.addMenu("&File")
file_menu.addAction(CreateExitAction(glb.app, self))
edit_menu = menu.addMenu("&Edit")
edit_menu.addAction(CreateAction("&Copy", "Copy to clipboard", self.CopyToClipboard, self, QKeySequence.Copy))
edit_menu.addAction(CreateAction("Copy as CS&V", "Copy to clipboard as CSV", self.CopyToClipboardCSV, self))
edit_menu.addAction(CreateAction("&Find...", "Find items", self.Find, self, QKeySequence.Find))
edit_menu.addAction(CreateAction("Fetch &more records...", "Fetch more records", self.FetchMoreRecords, self, [QKeySequence(Qt.Key_F8)]))
edit_menu.addAction(CreateAction("&Shrink Font", "Make text smaller", self.ShrinkFont, self, [QKeySequence("Ctrl+-")]))
edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")]))
reports_menu = menu.addMenu("&Reports")
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"):
reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self))
self.EventMenu(GetEventList(glb.db), reports_menu)
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self))
self.TableMenu(GetTableList(glb), menu)
self.window_menu = WindowMenu(self.mdi_area, menu)
help_menu = menu.addMenu("&Help")
help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
help_menu.addAction(CreateAction("&About Exported SQL Viewer", "About this application", self.About, self))
def Try(self, fn):
win = self.mdi_area.activeSubWindow()
if win:
try:
fn(win.view)
except:
pass
def CopyToClipboard(self):
self.Try(CopyCellsToClipboardHdr)
def CopyToClipboardCSV(self):
self.Try(CopyCellsToClipboardCSV)
def Find(self):
win = self.mdi_area.activeSubWindow()
if win:
try:
win.find_bar.Activate()
except:
pass
def FetchMoreRecords(self):
win = self.mdi_area.activeSubWindow()
if win:
try:
win.fetch_bar.Activate()
except:
pass
def ShrinkFont(self):
self.Try(ShrinkFont)
def EnlargeFont(self):
self.Try(EnlargeFont)
def EventMenu(self, events, reports_menu):
branches_events = 0
for event in events:
event = event.split(":")[0]
if event == "branches":
branches_events += 1
dbid = 0
for event in events:
dbid += 1
event = event.split(":")[0]
if event == "branches":
label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewBranchView(x), self))
label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewSelectedBranchView(x), self))
def TableMenu(self, tables, menu):
table_menu = menu.addMenu("&Tables")
for table in tables:
table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda t=table: self.NewTableView(t), self))
def NewCallGraph(self):
CallGraphWindow(self.glb, self)
def NewCallTree(self):
CallTreeWindow(self.glb, self)
def NewTopCalls(self):
dialog = TopCallsDialog(self.glb, self)
ret = dialog.exec_()
if ret:
TopCallsWindow(self.glb, dialog.report_vars, self)
def NewBranchView(self, event_id):
BranchWindow(self.glb, event_id, ReportVars(), self)
def NewSelectedBranchView(self, event_id):
dialog = SelectedBranchDialog(self.glb, self)
ret = dialog.exec_()
if ret:
BranchWindow(self.glb, event_id, dialog.report_vars, self)
def NewTableView(self, table_name):
TableWindow(self.glb, table_name, self)
def Help(self):
HelpWindow(self.glb, self)
def About(self):
dialog = AboutDialog(self.glb, self)
dialog.exec_()
# XED Disassembler
class xed_state_t(Structure):
_fields_ = [
("mode", c_int),
("width", c_int)
]
class XEDInstruction():
def __init__(self, libxed):
# Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion
xedd_t = c_byte * 512
self.xedd = xedd_t()
self.xedp = addressof(self.xedd)
libxed.xed_decoded_inst_zero(self.xedp)
self.state = xed_state_t()
self.statep = addressof(self.state)
# Buffer for disassembled instruction text
self.buffer = create_string_buffer(256)
self.bufferp = addressof(self.buffer)
class LibXED():
def __init__(self):
try:
self.libxed = CDLL("libxed.so")
except:
self.libxed = None
if not self.libxed:
self.libxed = CDLL("/usr/local/lib/libxed.so")
self.xed_tables_init = self.libxed.xed_tables_init
self.xed_tables_init.restype = None
self.xed_tables_init.argtypes = []
self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero
self.xed_decoded_inst_zero.restype = None
self.xed_decoded_inst_zero.argtypes = [ c_void_p ]
self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode
self.xed_operand_values_set_mode.restype = None
self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ]
self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode
self.xed_decoded_inst_zero_keep_mode.restype = None
self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ]
self.xed_decode = self.libxed.xed_decode
self.xed_decode.restype = c_int
self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ]
self.xed_format_context = self.libxed.xed_format_context
self.xed_format_context.restype = c_uint
self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ]
self.xed_tables_init()
def Instruction(self):
return XEDInstruction(self)
def SetMode(self, inst, mode):
if mode:
inst.state.mode = 4 # 32-bit
inst.state.width = 4 # 4 bytes
else:
inst.state.mode = 1 # 64-bit
inst.state.width = 8 # 8 bytes
self.xed_operand_values_set_mode(inst.xedp, inst.statep)
def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip):
self.xed_decoded_inst_zero_keep_mode(inst.xedp)
err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt)
if err:
return 0, ""
# Use AT&T mode (2), alternative is Intel (3)
ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
if not ok:
return 0, ""
if sys.version_info[0] == 2:
result = inst.buffer.value
else:
result = inst.buffer.value.decode()
# Return instruction length and the disassembled instruction text
# For now, assume the length is in byte 166
return inst.xedd[166], result
def TryOpen(file_name):
try:
return open(file_name, "rb")
except:
return None
def Is64Bit(f):
result = sizeof(c_void_p)
# ELF support only
pos = f.tell()
f.seek(0)
header = f.read(7)
f.seek(pos)
magic = header[0:4]
if sys.version_info[0] == 2:
eclass = ord(header[4])
encoding = ord(header[5])
version = ord(header[6])
else:
eclass = header[4]
encoding = header[5]
version = header[6]
if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
result = True if eclass == 2 else False
return result
# Global data
class Glb():
def __init__(self, dbref, db, dbname):
self.dbref = dbref
self.db = db
self.dbname = dbname
self.home_dir = os.path.expanduser("~")
self.buildid_dir = os.getenv("PERF_BUILDID_DIR")
if self.buildid_dir:
self.buildid_dir += "/.build-id/"
else:
self.buildid_dir = self.home_dir + "/.debug/.build-id/"
self.app = None
self.mainwindow = None
self.instances_to_shutdown_on_exit = weakref.WeakSet()
try:
self.disassembler = LibXED()
self.have_disassembler = True
except:
self.have_disassembler = False
def FileFromBuildId(self, build_id):
file_name = self.buildid_dir + build_id[0:2] + "/" + build_id[2:] + "/elf"
return TryOpen(file_name)
def FileFromNamesAndBuildId(self, short_name, long_name, build_id):
# Assume current machine i.e. no support for virtualization
if short_name[0:7] == "[kernel" and os.path.basename(long_name) == "kcore":
file_name = os.getenv("PERF_KCORE")
f = TryOpen(file_name) if file_name else None
if f:
return f
# For now, no special handling if long_name is /proc/kcore
f = TryOpen(long_name)
if f:
return f
f = self.FileFromBuildId(build_id)
if f:
return f
return None
def AddInstanceToShutdownOnExit(self, instance):
self.instances_to_shutdown_on_exit.add(instance)
# Shutdown any background processes or threads
def ShutdownInstances(self):
for x in self.instances_to_shutdown_on_exit:
try:
x.Shutdown()
except:
pass
# Database reference
class DBRef():
def __init__(self, is_sqlite3, dbname):
self.is_sqlite3 = is_sqlite3
self.dbname = dbname
def Open(self, connection_name):
dbname = self.dbname
if self.is_sqlite3:
db = QSqlDatabase.addDatabase("QSQLITE", connection_name)
else:
db = QSqlDatabase.addDatabase("QPSQL", connection_name)
opts = dbname.split()
for opt in opts:
if "=" in opt:
opt = opt.split("=")
if opt[0] == "hostname":
db.setHostName(opt[1])
elif opt[0] == "port":
db.setPort(int(opt[1]))
elif opt[0] == "username":
db.setUserName(opt[1])
elif opt[0] == "password":
db.setPassword(opt[1])
elif opt[0] == "dbname":
dbname = opt[1]
else:
dbname = opt
db.setDatabaseName(dbname)
if not db.open():
raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
return db, dbname
# Main
def Main():
if (len(sys.argv) < 2):
printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}");
raise Exception("Too few arguments")
dbname = sys.argv[1]
if dbname == "--help-only":
app = QApplication(sys.argv)
mainwindow = HelpOnlyWindow()
mainwindow.show()
err = app.exec_()
sys.exit(err)
is_sqlite3 = False
try:
f = open(dbname, "rb")
if f.read(15) == b'SQLite format 3':
is_sqlite3 = True
f.close()
except:
pass
dbref = DBRef(is_sqlite3, dbname)
db, dbname = dbref.Open("main")
glb = Glb(dbref, db, dbname)
app = QApplication(sys.argv)
glb.app = app
mainwindow = MainWindow(glb)
glb.mainwindow = mainwindow
mainwindow.show()
err = app.exec_()
glb.ShutdownInstances()
db.close()
sys.exit(err)
if __name__ == "__main__":
Main()
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.