repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
drawks/ansible | lib/ansible/modules/cloud/google/gcp_redis_instance_facts.py | 12 | 7557 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_redis_instance_facts
description:
- Gather facts for GCP Instance
short_description: Gather facts for GCP Instance
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
region:
description:
- The name of the Redis region of the instance.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: " a instance facts"
gcp_redis_instance_facts:
region: us-central1
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: facts
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
alternativeLocationId:
description:
- Only applicable to STANDARD_HA tier which protects the instance against zonal
failures by provisioning it across two zones.
- If provided, it must be a different zone from the one provided in [locationId].
returned: success
type: str
authorizedNetwork:
description:
- The full name of the Google Compute Engine network to which the instance is
connected. If left unspecified, the default network will be used.
returned: success
type: str
createTime:
description:
- The time the instance was created in RFC3339 UTC "Zulu" format, accurate to
nanoseconds.
returned: success
type: str
currentLocationId:
description:
- The current zone where the Redis endpoint is placed.
- For Basic Tier instances, this will always be the same as the [locationId]
provided by the user at creation time. For Standard Tier instances, this can
be either [locationId] or [alternativeLocationId] and can change after a failover
event.
returned: success
type: str
displayName:
description:
- An arbitrary and optional user-provided name for the instance.
returned: success
type: str
host:
description:
- Hostname or IP address of the exposed Redis endpoint used by clients to connect
to the service.
returned: success
type: str
labels:
description:
- Resource labels to represent user provided metadata.
returned: success
type: dict
redisConfigs:
description:
- Redis configuration parameters, according to U(http://redis.io/topics/config.)
- 'Please check Memorystore documentation for the list of supported parameters:
U(https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs)
.'
returned: success
type: dict
locationId:
description:
- The zone where the instance will be provisioned. If not provided, the service
will choose a zone for the instance. For STANDARD_HA tier, instances will
be created across two zones for protection against zonal failures. If [alternativeLocationId]
is also provided, it must be different from [locationId].
returned: success
type: str
name:
description:
- The ID of the instance or a fully qualified identifier for the instance. .
returned: success
type: str
memorySizeGb:
description:
- Redis memory size in GiB.
returned: success
type: int
port:
description:
- The port number of the exposed Redis endpoint.
returned: success
type: int
redisVersion:
description:
- The version of Redis software. If not provided, latest supported version will
be used. Updating the version will perform an upgrade/downgrade to the new
version. Currently, the supported values are REDIS_3_2 for Redis 3.2.
returned: success
type: str
reservedIpRange:
description:
- The CIDR range of internal addresses that are reserved for this instance.
If not provided, the service will choose an unused /29 block, for example,
10.0.0.0/29 or 192.168.0.0/29. Ranges must be unique and non-overlapping with
existing subnets in an authorized network.
returned: success
type: str
tier:
description:
- 'The service tier of the instance. Must be one of these values: - BASIC: standalone
instance - STANDARD_HA: highly available primary/replica instances .'
returned: success
type: str
region:
description:
- The name of the Redis region of the instance.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(region=dict(required=True, type='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
items = fetch_list(module, collection(module))
if items.get('instances'):
items = items.get('instances')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://redis.googleapis.com/v1/projects/{project}/locations/{region}/instances".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'redis')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
TangXT/edx-platform | common/djangoapps/user_api/models.py | 24 | 1900 | from django.contrib.auth.models import User
from django.core.validators import RegexValidator
from django.db import models
from xmodule_django.models import CourseKeyField
class UserPreference(models.Model):
"""A user's preference, stored as generic text to be processed by client"""
KEY_REGEX = r"[-_a-zA-Z0-9]+"
user = models.ForeignKey(User, db_index=True, related_name="preferences")
key = models.CharField(max_length=255, db_index=True, validators=[RegexValidator(KEY_REGEX)])
value = models.TextField()
class Meta: # pylint: disable=missing-docstring
unique_together = ("user", "key")
@classmethod
def set_preference(cls, user, preference_key, preference_value):
"""
Sets the user preference for a given key
"""
user_pref, _ = cls.objects.get_or_create(user=user, key=preference_key)
user_pref.value = preference_value
user_pref.save()
@classmethod
def get_preference(cls, user, preference_key, default=None):
"""
Gets the user preference value for a given key
Returns the given default if there isn't a preference for the given key
"""
try:
user_pref = cls.objects.get(user=user, key=preference_key)
return user_pref.value
except cls.DoesNotExist:
return default
class UserCourseTag(models.Model):
"""
Per-course user tags, to be used by various things that want to store tags about
the user. Added initially to store assignment to experimental groups.
"""
user = models.ForeignKey(User, db_index=True, related_name="+")
key = models.CharField(max_length=255, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
value = models.TextField()
class Meta: # pylint: disable=missing-docstring
unique_together = ("user", "course_id", "key")
| agpl-3.0 |
tastynoodle/django | tests/i18n/contenttypes/tests.py | 9 | 1181 | # coding: utf-8
from __future__ import unicode_literals
import os
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.utils import override_settings
from django.utils._os import upath
from django.utils import six
from django.utils import translation
from i18n import TransRealMixin
@override_settings(
USE_I18N=True,
LOCALE_PATHS=(
os.path.join(os.path.dirname(upath(__file__)), 'locale'),
),
LANGUAGE_CODE='en',
LANGUAGES=(
('en', 'English'),
('fr', 'French'),
),
)
class ContentTypeTests(TransRealMixin, TestCase):
def test_verbose_name(self):
company_type = ContentType.objects.get(app_label='i18n', model='company')
with translation.override('en'):
self.assertEqual(six.text_type(company_type), 'Company')
with translation.override('fr'):
self.assertEqual(six.text_type(company_type), 'Société')
def test_field_override(self):
company_type = ContentType.objects.get(app_label='i18n', model='company')
company_type.name = 'Other'
self.assertEqual(six.text_type(company_type), 'Other')
| bsd-3-clause |
snowflakedb/snowflake-connector-python | src/snowflake/connector/vendored/urllib3/util/__init__.py | 27 | 1155 | from __future__ import absolute_import
# For backwards compatibility, provide imports that used to be here.
from .connection import is_connection_dropped
from .request import SKIP_HEADER, SKIPPABLE_HEADERS, make_headers
from .response import is_fp_closed
from .retry import Retry
from .ssl_ import (
ALPN_PROTOCOLS,
HAS_SNI,
IS_PYOPENSSL,
IS_SECURETRANSPORT,
PROTOCOL_TLS,
SSLContext,
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
from .timeout import Timeout, current_time
from .url import Url, get_host, parse_url, split_first
from .wait import wait_for_read, wait_for_write
__all__ = (
"HAS_SNI",
"IS_PYOPENSSL",
"IS_SECURETRANSPORT",
"SSLContext",
"PROTOCOL_TLS",
"ALPN_PROTOCOLS",
"Retry",
"Timeout",
"Url",
"assert_fingerprint",
"current_time",
"is_connection_dropped",
"is_fp_closed",
"get_host",
"parse_url",
"make_headers",
"resolve_cert_reqs",
"resolve_ssl_version",
"split_first",
"ssl_wrap_socket",
"wait_for_read",
"wait_for_write",
"SKIP_HEADER",
"SKIPPABLE_HEADERS",
)
| apache-2.0 |
stevegt/UltimakerUtils | leveling-rings-UM1.py | 1 | 2681 | #!/usr/bin/python
# Derived from the UM2 version by an anonymous contributor...
#
# http://umforum.ultimaker.com/index.php?/topic/5951-um2-calibration-utility-leveling-ringsgcode/?p=54694
#
# ...who wisely says: "I accept NO liability for any damage done by
# using either version or any derivatives. USE AT YOUR OWN RISK."
filament_diameter = 2.89
build_area_width = 205.0
build_area_depth = 205.0
rings = 10
wide = 0.4
thick = 0.2925 / 2
temperature = 230
bed_temperature = 60
base_dia = 180
pi=3.1415927
center_x = build_area_width/2.0
center_y = build_area_depth/2.0
filament_area = (filament_diameter / 2) ** 2 * pi
head = '''
M107 ;start with the fan off
G21 ;metric values
G90 ;absolute positioning
M82 ;set extruder to absolute mode
M107 ;start with the fan off
G28 X0 Y0 ;move X/Y to min endstops
G28 Z0 ;move Z to min endstops
G1 Z15.0 F9000 ;move the platform down 15mm
M140 S{bed_temperature:.2f} ;set bed temp (no wait)
M109 T0 S{temperature:.2f} ;set extruder temp (wait)
M190 S{bed_temperature:.2f} ;set bed temp (wait)
G92 E0 ;zero the extruded length
G1 F200 E3 ;extrude 3mm of feed stock
G92 E0 ;zero the extruded length again
G1 F9000 ;set speed to 9000
;Put printing message on LCD screen
M117 Printing...
;Layer count: 1
;LAYER:0
'''
loop = '''
G0 F9000 X{x:.2f} Y{y:.2f} Z{z:.2f}
G2 F1000 X{x:.2f} Y{y:.2f} I{r:.2f} E{total_mm3:.2f}'''
tail = '''
;End GCode
M104 S0 ;extruder heater off
M140 S0 ;heated bed heater off (if you have it)
G91 ;relative positioning
G1 E-1 F300 ;retract the filament a bit before lifting the nozzle, to release some of the pressure
G1 Z+0.5 E-5 X-20 Y-20 F9000 ;move Z up a bit and retract filament even more
G28 X0 Y0 ;move X/Y to min endstops, so the head is out of the way
M84 ;steppers off
G90 ;absolute positioning'''
total_mm3 = 0
body = ''
cross_section = thick * wide
z = thick
for i in range(rings):
dia = base_dia - ((wide * 2) * i)
circumference = pi * dia
r = dia/2.0;
x = center_x - r
y = center_y
mm3 = (circumference * cross_section) / filament_area
total_mm3 += mm3
body += loop.format(**vars())
print head.format(**vars())
print body
print tail.format(**vars())
| gpl-2.0 |
biswajitsahu/kuma | vendor/packages/git/diff.py | 32 | 2669 | # diff.py
# Copyright (C) 2008-2010 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import re
import commit
class Diff(object):
"""
A Diff contains diff information between two commits.
"""
def __init__(self, repo, a_path, b_path, a_commit, b_commit, a_mode,
b_mode, new_file, deleted_file, rename_from,
rename_to, diff):
self.repo = repo
self.a_path = a_path
self.b_path = b_path
if not a_commit or re.search(r'^0{40}$', a_commit):
self.a_commit = None
else:
self.a_commit = commit.Commit(repo, id=a_commit)
if not b_commit or re.search(r'^0{40}$', b_commit):
self.b_commit = None
else:
self.b_commit = commit.Commit(repo, id=b_commit)
self.a_mode = a_mode
self.b_mode = b_mode
self.new_file = new_file
self.deleted_file = deleted_file
self.rename_from = rename_from
self.rename_to = rename_to
self.renamed = rename_from != rename_to
self.diff = diff
@classmethod
def list_from_string(cls, repo, text):
diffs = []
diff_header = re.compile(r"""
#^diff[ ]--git
[ ]a/(?P<a_path>\S+)[ ]b/(?P<b_path>\S+)\n
(?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n
^rename[ ]from[ ](?P<rename_from>\S+)\n
^rename[ ]to[ ](?P<rename_to>\S+)(?:\n|$))?
(?:^old[ ]mode[ ](?P<old_mode>\d+)\n
^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
(?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
(?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
(?:^index[ ](?P<a_commit>[0-9A-Fa-f]+)
\.\.(?P<b_commit>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
""", re.VERBOSE | re.MULTILINE).match
for diff in ('\n' + text).split('\ndiff --git')[1:]:
header = diff_header(diff)
a_path, b_path, similarity_index, rename_from, rename_to, \
old_mode, new_mode, new_file_mode, deleted_file_mode, \
a_commit, b_commit, b_mode = header.groups()
new_file, deleted_file = bool(new_file_mode), bool(deleted_file_mode)
diffs.append(Diff(repo, a_path, b_path, a_commit, b_commit,
old_mode or deleted_file_mode, new_mode or new_file_mode or b_mode,
new_file, deleted_file, rename_from, rename_to, diff[header.end():]))
return diffs
| mpl-2.0 |
galtys/odoo | setup.py | 37 | 5624 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from glob import glob
from setuptools import find_packages, setup
from os.path import join, dirname
execfile(join(dirname(__file__), 'openerp', 'release.py')) # Load release variables
lib_name = 'openerp'
def py2exe_datafiles():
data_files = {}
data_files['Microsoft.VC90.CRT'] = glob('C:\Microsoft.VC90.CRT\*.*')
for root, dirnames, filenames in os.walk('openerp'):
for filename in filenames:
if not re.match(r'.*(\.pyc|\.pyo|\~)$', filename):
data_files.setdefault(root, []).append(join(root, filename))
import babel
data_files['babel/localedata'] = glob(join(dirname(babel.__file__), 'localedata', '*'))
others = ['global.dat', 'numbers.py', 'support.py', 'plural.py']
data_files['babel'] = map(lambda f: join(dirname(babel.__file__), f), others)
others = ['frontend.py', 'mofile.py']
data_files['babel/messages'] = map(lambda f: join(dirname(babel.__file__), 'messages', f), others)
import pytz
tzdir = dirname(pytz.__file__)
for root, _, filenames in os.walk(join(tzdir, 'zoneinfo')):
base = join('pytz', root[len(tzdir) + 1:])
data_files[base] = [join(root, f) for f in filenames]
import docutils
dudir = dirname(docutils.__file__)
for root, _, filenames in os.walk(dudir):
base = join('docutils', root[len(dudir) + 1:])
data_files[base] = [join(root, f) for f in filenames if not f.endswith(('.py', '.pyc', '.pyo'))]
import passlib
pl = dirname(passlib.__file__)
for root, _, filenames in os.walk(pl):
base = join('passlib', root[len(pl) + 1:])
data_files[base] = [join(root, f) for f in filenames if not f.endswith(('.py', '.pyc', '.pyo'))]
return data_files.items()
def py2exe_options():
if os.name == 'nt':
import py2exe
return {
'console': [
{'script': 'odoo.py'},
{'script': 'openerp-gevent'},
{'script': 'openerp-server', 'icon_resources': [
(1, join('setup', 'win32', 'static', 'pixmaps', 'openerp-icon.ico'))
]},
],
'options': {
'py2exe': {
'skip_archive': 1,
'optimize': 0, # Keep the assert running as the integrated tests rely on them.
'dist_dir': 'dist',
'packages': [
'asynchat', 'asyncore',
'commands',
'dateutil',
'decimal',
'decorator',
'docutils',
'email',
'encodings',
'HTMLParser',
'imaplib',
'jinja2',
'lxml', 'lxml._elementpath', 'lxml.builder', 'lxml.etree', 'lxml.objectify',
'mako',
'markupsafe',
'mock',
'openerp',
'openid',
'passlib',
'PIL',
'poplib',
'psutil',
'pychart',
'pydot',
'pyparsing',
'pyPdf',
'pytz',
'reportlab',
'requests',
'select',
'simplejson',
'smtplib',
'uuid',
'vatnumber',
'vobject',
'win32service', 'win32serviceutil',
'xlwt',
'xml', 'xml.dom',
'yaml',
],
'excludes': ['Tkconstants', 'Tkinter', 'tcl'],
}
},
'data_files': py2exe_datafiles()
}
else:
return {}
setup(
name='odoo',
version=version,
description=description,
long_description=long_desc,
url=url,
author=author,
author_email=author_email,
classifiers=filter(None, classifiers.split('\n')),
license=license,
scripts=['openerp-server', 'openerp-gevent', 'odoo.py'],
packages=find_packages(),
package_dir={'%s' % lib_name: 'openerp'},
include_package_data=True,
install_requires=[
'babel >= 1.0',
'decorator',
'docutils',
'feedparser',
'gevent',
'Jinja2',
'lxml', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/
'mako',
'mock',
'passlib',
'pillow', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/
'psutil', # windows binary code.google.com/p/psutil/downloads/list
'psycogreen',
'psycopg2 >= 2.2',
'python-chart',
'pydot',
'pyparsing',
'pypdf',
'pyserial',
'python-dateutil',
'python-ldap', # optional
'python-openid',
'pytz',
'pyusb >= 1.0.0b1',
'pyyaml',
'qrcode',
'reportlab', # windows binary pypi.python.org/pypi/reportlab
'requests',
'simplejson',
'unittest2',
'vatnumber',
'vobject',
'werkzeug',
'xlwt',
],
extras_require={
'SSL': ['pyopenssl'],
},
tests_require=[
'unittest2',
'mock',
],
**py2exe_options()
)
| agpl-3.0 |
leviroth/praw | praw/models/base.py | 6 | 1256 | """Provide the PRAWBase superclass."""
from copy import deepcopy
class PRAWBase(object):
"""Superclass for all models in PRAW."""
@staticmethod
def _safely_add_arguments(argument_dict, key, **new_arguments):
"""Replace argument_dict[key] with a deepcopy and update.
This method is often called when new parameters need to be added to a
request. By calling this method and adding the new or updated
parameters we can insure we don't modify the dictionary passed in by
the caller.
"""
value = deepcopy(argument_dict[key]) if key in argument_dict else {}
value.update(new_arguments)
argument_dict[key] = value
@classmethod
def parse(cls, data, reddit):
"""Return an instance of ``cls`` from ``data``.
:param data: The structured data.
:param reddit: An instance of :class:`.Reddit`.
"""
return cls(reddit, _data=data)
def __init__(self, reddit, _data):
"""Initialize a PRAWModel instance.
:param reddit: An instance of :class:`.Reddit`.
"""
self._reddit = reddit
if _data:
for attribute, value in _data.items():
setattr(self, attribute, value)
| bsd-2-clause |
KosiehBarter/anaconda | pyanaconda/pwpolicy.py | 10 | 4777 | #
# Brian C. Lane <[email protected]>
#
# Copyright 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import BaseData, KickstartCommand
from pykickstart.errors import KickstartValueError, formatErrorMsg
from pykickstart.options import KSOptionParser
import warnings
from pyanaconda.i18n import _
class F22_PwPolicyData(BaseData):
""" Kickstart Data object to hold information about pwpolicy. """
removedKeywords = BaseData.removedKeywords
removedAttrs = BaseData.removedAttrs
def __init__(self, *args, **kwargs):
BaseData.__init__(self, *args, **kwargs)
self.name = kwargs.get("name", "")
self.minlen = kwargs.get("minlen", 8)
self.minquality = kwargs.get("minquality", 50)
self.strict = kwargs.get("strict", True)
self.changesok = kwargs.get("changesok", False)
self.emptyok = kwargs.get("emptyok", True)
def __eq__(self, y):
if not y:
return False
return self.name == y.name
def __ne__(self, y):
return not self == y
def __str__(self):
retval = BaseData.__str__(self)
if self.name != "":
retval += "pwpolicy"
retval += self._getArgsAsStr() + "\n"
return retval
def _getArgsAsStr(self):
retval = ""
retval += " %s" % self.name
retval += " --minlen=%d" % self.minlen
retval += " --minquality=%d" % self.minquality
if self.strict:
retval += " --strict"
else:
retval += " --notstrict"
if self.changesok:
retval += " --changesok"
else:
retval += " --nochanges"
if self.emptyok:
retval += " --emptyok"
else:
retval += " --notempty"
return retval
class F22_PwPolicy(KickstartCommand):
""" Kickstart command implementing password policy. """
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.policyList = kwargs.get("policyList", [])
def __str__(self):
retval = ""
for policy in self.policyList:
retval += policy.__str__()
return retval
def _getParser(self):
op = KSOptionParser()
op.add_option("--minlen", type="int")
op.add_option("--minquality", type="int")
op.add_option("--strict", action="store_true")
op.add_option("--notstrict", dest="strict", action="store_false")
op.add_option("--changesok", action="store_true")
op.add_option("--nochanges", dest="changesok", action="store_false")
op.add_option("--emptyok", action="store_true")
op.add_option("--notempty", dest="emptyok", action="store_false")
return op
def parse(self, args):
(opts, extra) = self.op.parse_args(args=args, lineno=self.lineno)
if len(extra) != 1:
raise KickstartValueError(formatErrorMsg(self.lineno, msg=_("policy name required for %s") % "pwpolicy"))
pd = self.handler.PwPolicyData()
self._setToObj(self.op, opts, pd)
pd.lineno = self.lineno
pd.name = extra[0]
# Check for duplicates in the data list.
if pd in self.dataList():
warnings.warn(_("A %(command)s with the name %(policyName)s has already been defined.") % {"command": "pwpolicy", "policyName": pd.name})
return pd
def dataList(self):
return self.policyList
def get_policy(self, name):
""" Get the policy by name
:param str name: Name of the policy to return.
"""
policy = [p for p in self.policyList if p.name == name]
if policy:
return policy[0]
else:
return None
| gpl-2.0 |
udapi/udapi-python | udapi/block/ud/complywithtext.py | 1 | 11648 | r"""Block ComplyWithText for adapting the nodes to comply with the text.
Implementation design details:
Usually, most of the inconsistencies between tree tokens and the raw text are simple to solve.
However, there may be also rare cases when it is not clear how to align the tokens
(nodes in the tree) with the raw text (stored in ``root.text``).
This block tries to solve the general case using several heuristics.
It starts with running a LCS-like algorithm (LCS = longest common subsequence)
``difflib.SequenceMatcher`` on the raw text and concatenation of tokens' forms,
i.e. on sequences of characters (as opposed to running LCS on sequences of tokens).
To prevent mis-alignment problems, we keep the spaces present in the raw text
and we insert spaces into the concatenated forms (``tree_chars``) according to ``SpaceAfter=No``.
An example of a mis-alignment problem:
text "énfase na necesidade" with 4 nodes "énfase en a necesidade"
should be solved by adding multiword token "na" over the nodes "en" and "a".
However, running LCS (or difflib) over the character sequences
"énfaseenanecesidade"
"énfasenanecesidade"
may result in énfase -> énfas.
Author: Martin Popel
"""
import difflib
import logging
import re
from udapi.core.block import Block
from udapi.core.mwt import MWT
class ComplyWithText(Block):
"""Adapt the nodes to comply with the text."""
def __init__(self, fix_text=True, prefer_mwt=True, allow_goeswith=True, max_mwt_length=4,
**kwargs):
"""Args:
fix_text: After all heuristics are applied, the token forms may still not match the text.
Should we edit the text to match the token forms (as a last resort)? Default=True.
prefer_mwt - What to do if multiple subsequent nodes correspond to a text written
without spaces and non-word characters (punctuation)?
E.g. if "3pm doesn't" is annotated with four nodes "3 pm does n't".
We can use either SpaceAfter=No, or create a multi-word token (MWT).
Note that if there is space or punctuation, SpaceAfter=No will be used always
(e.g. "3 p.m." annotated with three nodes "3 p. m.").
If the character sequence does not match exactly, MWT will be used always
(e.g. "3pm doesn't" annotated with four nodes "3 p.m. does not").
Thus this parameter influences only the "unclear" cases.
Default=True (i.e. prefer multi-word tokens over SpaceAfter=No).
allow_goeswith - If a node corresponds to multiple space-separated strings in text,
which are not allowed as tokens with space, we can either leave this diff
unresolved or create new nodes and join them with the `goeswith` deprel.
Default=True (i.e. add the goeswith nodes if applicable).
max_mwt_length - Maximum length of newly created multi-word tokens (in syntactic words).
Default=4.
"""
super().__init__(**kwargs)
self.fix_text = fix_text
self.prefer_mwt = prefer_mwt
self.allow_goeswith = allow_goeswith
self.max_mwt_length = max_mwt_length
@staticmethod
def allow_space(form):
"""Is space allowed within this token form?"""
return re.fullmatch('[0-9 ]+([,.][0-9]+)?', form)
@staticmethod
def store_orig_form(node, new_form):
"""Store the original form of this node into MISC, unless the change is common&expected."""
_ = new_form
if node.form not in ("''", "``"):
node.misc['OrigForm'] = node.form
def process_tree(self, root):
text = root.text
if text is None:
raise ValueError('Tree %s has no text, cannot use ud.ComplyWithText' % root)
# Normalize the stored text (double space -> single space)
# and skip sentences which are already ok.
text = ' '.join(text.split())
if text == root.compute_text():
return
tree_chars, char_nodes = _nodes_to_chars(root.token_descendants)
# Align. difflib may not give LCS, but usually it is good enough.
matcher = difflib.SequenceMatcher(None, tree_chars, text, autojunk=False)
diffs = list(matcher.get_opcodes())
_log_diffs(diffs, tree_chars, text, 'matcher')
diffs = self.unspace_diffs(diffs, tree_chars, text)
_log_diffs(diffs, tree_chars, text, 'unspace')
diffs = self.merge_diffs(diffs, char_nodes)
_log_diffs(diffs, tree_chars, text, 'merge')
# Solve diffs.
self.solve_diffs(diffs, tree_chars, char_nodes, text)
# Fill SpaceAfter=No.
tmp_text = text
for node in root.token_descendants:
if tmp_text.startswith(node.form):
tmp_text = tmp_text[len(node.form):]
if not tmp_text or tmp_text[0].isspace():
del node.misc['SpaceAfter']
tmp_text = tmp_text.lstrip()
else:
node.misc['SpaceAfter'] = 'No'
else:
logging.warning('Node %s does not match text "%s"', node, tmp_text[:20])
return
# Edit root.text if needed.
if self.fix_text:
computed_text = root.compute_text()
if text != computed_text:
root.add_comment('ToDoOrigText = ' + root.text)
root.text = computed_text
def unspace_diffs(self, orig_diffs, tree_chars, text):
diffs = []
for diff in orig_diffs:
edit, tree_lo, tree_hi, text_lo, text_hi = diff
if edit != 'insert':
if tree_chars[tree_lo] == ' ':
tree_lo += 1
if tree_chars[tree_hi - 1] == ' ':
tree_hi -= 1
old = tree_chars[tree_lo:tree_hi]
new = text[text_lo:text_hi]
if old == '' and new == '':
continue
elif old == new:
edit = 'equal'
elif old == '':
edit = 'insert'
diffs.append((edit, tree_lo, tree_hi, text_lo, text_hi))
return diffs
def merge_diffs(self, orig_diffs, char_nodes):
"""Make sure each diff starts on original token boundary.
If not, merge the diff with the previous diff.
E.g. (equal, "5", "5"), (replace, "-6", "–7")
is changed into (replace, "5-6", "5–7")
"""
diffs = []
for diff in orig_diffs:
edit, tree_lo, tree_hi, text_lo, text_hi = diff
if edit != 'insert' and char_nodes[tree_lo] is not None:
diffs.append(diff)
elif edit == 'equal':
while tree_lo < tree_hi and char_nodes[tree_lo] is None:
tree_lo += 1
text_lo += 1
diffs[-1] = ('replace', diffs[-1][1], tree_lo, diffs[-1][3], text_lo)
if tree_lo < tree_hi:
diffs.append(('equal', tree_lo, tree_hi, text_lo, text_hi))
else:
if not diffs:
diffs = [diff]
elif diffs[-1][0] != 'equal':
diffs[-1] = ('replace', diffs[-1][1], tree_hi, diffs[-1][3], text_hi)
else:
p_tree_hi = diffs[-1][2] - 1
p_text_hi = diffs[-1][4] - 1
while char_nodes[p_tree_hi] is None:
p_tree_hi -= 1
p_text_hi -= 1
assert p_tree_hi >= diffs[-1][1]
assert p_text_hi >= diffs[-1][3]
diffs[-1] = ('equal', diffs[-1][1], p_tree_hi, diffs[-1][3], p_text_hi)
diffs.append(('replace', p_tree_hi, tree_hi, p_text_hi, text_hi))
return diffs
def solve_diffs(self, diffs, tree_chars, char_nodes, text):
for diff in diffs:
edit, tree_lo, tree_hi, text_lo, text_hi = diff
# Focus only on edits of type 'replace', log insertions and deletions as failures.
if edit == 'equal':
continue
if edit in ('insert', 'delete'):
logging.warning('Unable to solve token-vs-text mismatch\n%s',
_diff2str(diff, tree_chars, text))
continue
# Revert the splittng and solve the diff.
nodes = [n for n in char_nodes[tree_lo:tree_hi] if n is not None]
form = text[text_lo:text_hi]
self.solve_diff(nodes, form.strip())
def solve_diff(self, nodes, form):
"""Fix a given (minimal) tokens-vs-text inconsistency."""
nodes_str = ' '.join([n.form for n in nodes]) # just for debugging
node = nodes[0]
# First, solve the cases when the text contains a space.
if ' ' in form:
if len(nodes) == 1 and node.form == form.replace(' ', ''):
if self.allow_space(form):
self.store_orig_form(node, form)
node.form = form
elif self.allow_goeswith:
forms = form.split()
node.form = forms[0]
for split_form in reversed(forms[1:]):
new = node.create_child(form=split_form, deprel='goeswith', upos=node.upos)
new.shift_after_node(node)
else:
logging.warning('Unable to solve 1:m diff:\n%s -> %s', nodes_str, form)
else:
logging.warning('Unable to solve n:m diff:\n%s -> %s', nodes_str, form)
# Second, solve the cases when multiple nodes match one form (without any spaces).
elif len(nodes) > 1:
# If the match is exact, we can choose between MWT ans SpaceAfter solutions.
if not self.prefer_mwt and ''.join([n.form for n in nodes]) == form:
pass # SpaceAfter=No will be added later on.
# If one of the nodes is already a MWT, we cannot have nested MWTs.
# TODO: enlarge the MWT instead of failing.
elif any(isinstance(n, MWT) for n in nodes):
logging.warning('Unable to solve partial-MWT diff:\n%s -> %s', nodes_str, form)
# MWT with too many words are suspicious.
elif len(nodes) > self.max_mwt_length:
logging.warning('Not creating too long (%d>%d) MWT:\n%s -> %s',
len(nodes), self.max_mwt_length, nodes_str, form)
# Otherwise, create a new MWT.
else:
node.root.create_multiword_token(nodes, form)
# Third, solve the 1-1 cases.
else:
self.store_orig_form(node, form)
node.form = form
def _nodes_to_chars(nodes):
chars, char_nodes = [], []
for node in nodes:
form = node.form
if node.misc['SpaceAfter'] != 'No' and node != nodes[-1]:
form += ' '
chars.extend(form)
char_nodes.append(node)
char_nodes.extend([None] * (len(form) - 1))
return ''.join(chars), char_nodes
def _log_diffs(diffs, tree_chars, text, msg):
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.warning('=== After %s:', msg)
for diff in diffs:
logging.warning(_diff2str(diff, tree_chars, text))
def _diff2str(diff, tree, text):
old = '|' + ''.join(tree[diff[1]:diff[2]]) + '|'
new = '|' + ''.join(text[diff[3]:diff[4]]) + '|'
if diff[0] == 'equal':
return '{:7} {!s:>50}'.format(diff[0], old)
return '{:7} {!s:>50} --> {!s}'.format(diff[0], old, new)
| gpl-3.0 |
tragiclifestories/django | django/contrib/admin/templatetags/admin_modify.py | 342 | 2505 | from django import template
register = template.Library()
@register.inclusion_tag('admin/prepopulated_fields_js.html', takes_context=True)
def prepopulated_fields_js(context):
"""
Creates a list of prepopulated_fields that should render Javascript for
the prepopulated fields for both the admin form and inlines.
"""
prepopulated_fields = []
if 'adminform' in context:
prepopulated_fields.extend(context['adminform'].prepopulated_fields)
if 'inline_admin_formsets' in context:
for inline_admin_formset in context['inline_admin_formsets']:
for inline_admin_form in inline_admin_formset:
if inline_admin_form.original is None:
prepopulated_fields.extend(inline_admin_form.prepopulated_fields)
context.update({'prepopulated_fields': prepopulated_fields})
return context
@register.inclusion_tag('admin/submit_line.html', takes_context=True)
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
show_save = context.get('show_save', True)
show_save_and_continue = context.get('show_save_and_continue', True)
ctx = {
'opts': opts,
'show_delete_link': (
not is_popup and context['has_delete_permission'] and
change and context.get('show_delete', True)
),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': (
context['has_add_permission'] and not is_popup and
(not save_as or context['add'])
),
'show_save_and_continue': not is_popup and context['has_change_permission'] and show_save_and_continue,
'is_popup': is_popup,
'show_save': show_save,
'preserved_filters': context.get('preserved_filters'),
}
if context.get('original') is not None:
ctx['original'] = context['original']
return ctx
@register.filter
def cell_count(inline_admin_form):
"""Returns the number of cells used in a tabular inline"""
count = 1 # Hidden cell with hidden 'id' field
for fieldset in inline_admin_form:
# Loop through all the fields (one per cell)
for line in fieldset:
for field in line:
count += 1
if inline_admin_form.formset.can_delete:
# Delete checkbox
count += 1
return count
| bsd-3-clause |
wkpark/zinnia | python/test.py | 12 | 1187 | #!/usr/bin/python
import zinnia
input = "(character (width 1000)(height 1000)(strokes ((243 273)(393 450))((700 253)(343 486)(280 716)(393 866)(710 880))))";
try:
s = zinnia.Character()
r = zinnia.Recognizer()
r.open("/usr/local/lib/zinnia/model/tomoe/handwriting-ja.model")
if (not s.parse(input)):
print s.what()
result = r.classify(s, 10)
size = result.size()
for i in range(0, (size - 1)):
print "%s\t%f" % (result.value(i), result.score(i))
s.clear();
s.set_width(300)
s.set_height(300)
s.add(0, 51, 29)
s.add(0, 117, 41)
s.add(1, 99, 65)
s.add(1, 219, 77)
s.add(2, 27, 131)
s.add(2, 261, 131)
s.add(3, 129, 17)
s.add(3, 57, 203)
s.add(4, 111, 71)
s.add(4, 219, 173)
s.add(5, 81, 161)
s.add(5, 93, 281)
s.add(6, 99, 167)
s.add(6, 207, 167)
s.add(6, 189, 245)
s.add(7, 99, 227)
s.add(7, 189, 227)
s.add(8, 111, 257)
s.add(8, 189, 245)
result = r.classify(s, 10)
size = result.size()
for i in range(0, (size - 1)):
print "%s\t%f" % (result.value(i), result.score(i))
except RuntimeError, e:
print "RuntimeError: ", e,
| bsd-3-clause |
storiesofsolidarity/story-database | stories/admin.py | 1 | 1393 | from django.contrib import admin
from models import Location, Story
from people.models import Author
class LocationAdmin(admin.ModelAdmin):
list_display = ('zipcode', 'city_fmt', 'county_fmt', 'state_fmt', 'story_count')
list_filter = ('state',)
search_fields = ('zipcode', 'city', 'county')
admin.site.register(Location, LocationAdmin)
class EmployerFilter(admin.SimpleListFilter):
title = 'author employer'
parameter_name = 'employer'
def lookups(self, request, model_admin):
employer_set = set()
for a in Author.objects.all():
if a.employer:
employer_set.add(a.employer.split(' ', 1)[0])
return [(str(c), str(c)) for c in employer_set if c]
def queryset(self, request, queryset):
if self.value() or self.value() == 'None':
return queryset.filter(author__employer__startswith=self.value())
else:
return queryset
class StoryAdmin(admin.ModelAdmin):
list_display = ('excerpt', 'author_display', 'employer', 'anonymous', 'created_at')
list_filter = (EmployerFilter, 'location__state', 'truncated')
date_hierarchy = 'created_at'
readonly_fields = ('truncated',)
raw_id_fields = ('author', 'location')
search_fields = ('location__city', 'author__user__first_name', 'author__user__last_name', 'content')
admin.site.register(Story, StoryAdmin)
| agpl-3.0 |
Racing1/android_kernel_htc_ruby_aosp | tools/perf/scripts/python/syscall-counts.py | 944 | 1429 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40d %10d\n" % (id, val),
| gpl-2.0 |
peppelinux/inventario_verdebinario | museo/models.py | 1 | 4183 | from django.db import models
from photologue.models import ImageModel
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
class Produttore(ImageModel):
id_tabella = models.AutoField(primary_key=True)
nome = models.CharField(max_length=135, blank=True)
nome_abbreviato = models.CharField(max_length=135, blank=True)
#slug = models.SlugField(unique=True, help_text=('"slug": un identificatore automatico e univoco'))
descrizione = models.TextField(max_length=1024, blank=True)
data_nascita = models.DateField(null=True, blank=True)
data_chiusura = models.DateField(null=True, blank=True)
#immagine_logo = models.ImageField(upload_to="LoghiProduttori", blank=True)
url = models.CharField(max_length=256, blank=True)
def save(self, *args, **kwargs):
if self.nome_abbreviato == None or self.nome_abbreviato.split() == []:
self.nome_abbreviato = self.nome.upper()
super(self.__class__, self).save(*args, **kwargs) # Call the "real" save() method.
class Meta:
ordering = ['nome']
db_table = 'produttore'
verbose_name_plural = "Produttore"
# def get_absolute_url(self):
# return '%s' % (self.url)
def __str__(self):
return '%s' % (self.nome_abbreviato)
class SchedaTecnica(models.Model):
id_tabella = models.AutoField(primary_key=True)
modello = models.CharField(max_length=135, blank=True)
produttore = models.ForeignKey(Produttore, null=True, blank=True, on_delete=models.SET_NULL)
paese_di_origine = models.CharField(max_length=135, blank=True)
anno = models.CharField(max_length=135, blank=True)
tastiera = models.CharField(max_length=135, blank=True)
cpu = models.CharField(max_length=135, blank=True)
velocita = models.CharField(max_length=135, blank=True)
memoria_volatile = models.CharField(max_length=135, blank=True)
memoria_di_massa = models.CharField(max_length=135, blank=True)
modalita_grafica = models.CharField(max_length=135, blank=True)
audio = models.CharField(max_length=135, blank=True)
dispositivi_media = models.CharField(max_length=135, blank=True)
alimentazione = models.CharField(max_length=135, blank=True)
prezzo = models.CharField(max_length=135, blank=True)
descrizione = models.TextField(max_length=1024, blank=True)
data_inserimento = models.DateField(null=True, blank=False, auto_now_add=True)
class Meta:
db_table = 'scheda_tecnica'
verbose_name_plural = "Scheda Tecnica"
class FotoHardwareMuseo(ImageModel):
id_tabella = models.AutoField(primary_key=True)
#immagine = models.ImageField(upload_to="FotoHardwareMuseo/%d.%m.%Y", blank=True)
etichetta_verde = models.CharField(max_length=135, blank=True)
data_inserimento = models.DateField(null=True, blank=False, auto_now_add=True)
seriale = models.CharField(max_length=384, blank=True)
didascalia = models.TextField(max_length=328, blank=True)
scheda_tecnica = models.ForeignKey(SchedaTecnica, null=True, blank=True, on_delete=models.SET_NULL)
class Meta:
db_table = 'foto_hardware_museo'
verbose_name_plural = "Foto Hardware Museo"
def __str__(self):
return '%s %s' % (self.seriale, self.scheda_tecnica)
def get_absolute_url(self):
#return '/media/foto/FotoHardwareMuseo/' + self.data_inserimento.strftime('%d.%m.%Y') + '/' + self.image.name
return '/media/%s' % self.image.name
def admin_thumbnail(self):
func = getattr(self, 'get_admin_thumbnail_url', None)
if func is None:
return _('An "admin_thumbnail" photo size has not been defined.')
else:
if hasattr(self, 'get_absolute_url'):
return '<a class="foto_admin_thumbs" target="_blank" href="%s"><img src="%s"></a>' % \
(self.get_absolute_url(), func())
else:
return '<a class="foto_admin_thumbs" target="_blank" href="%s"><img src="%s"></a>' % \
(self.image.url, func())
admin_thumbnail.short_description = _('Thumbnail')
admin_thumbnail.allow_tags = True
| gpl-3.0 |
scholarly/pynacl | tests/test_encoding.py | 7 | 2670 | # Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import pytest
import nacl.encoding
import nacl.secret
KEY = b"1" * nacl.secret.SecretBox.KEY_SIZE
NONCE = b"1" * nacl.secret.SecretBox.NONCE_SIZE
TEXT = b"The quick brown fox jumps over the lazy dog"
VECTORS = [
# Encoder, Ciphertext
(
nacl.encoding.RawEncoder,
(b"111111111111111111111111\xfcU\xe2\x9f\xe6E\x92\xd7\x0eFM=x\x83\x8fj"
b"} v\xd4\xf0\x1a1\xc0\x88Uk\x12\x02\x1cd\xfaOH\x13\xdc\x0e\x0e\xd7A"
b"\x07\x0b.\x9f\x01\xbf\xe4\xd0s\xf1P\xd3\x0e\xaa\x9d\xb3\xf7\\\x0f"),
),
(
nacl.encoding.HexEncoder,
(b"313131313131313131313131313131313131313131313131fc55e29fe64592d70e4"
b"64d3d78838f6a7d2076d4f01a31c088556b12021c64fa4f4813dc0e0ed741070b2e"
b"9f01bfe4d073f150d30eaa9db3f75c0f"),
),
(
nacl.encoding.Base16Encoder,
(b"313131313131313131313131313131313131313131313131FC55E29FE64592D70E4"
b"64D3D78838F6A7D2076D4F01A31C088556B12021C64FA4F4813DC0E0ED741070B2E"
b"9F01BFE4D073F150D30EAA9DB3F75C0F"),
),
(
nacl.encoding.Base32Encoder,
(b"GEYTCMJRGEYTCMJRGEYTCMJRGEYTCMJRGEYTCMP4KXRJ7ZSFSLLQ4RSNHV4IHD3KPUQ"
b"HNVHQDIY4BCCVNMJAEHDE7JHUQE64BYHNOQIHBMXJ6AN74TIHH4KQ2MHKVHNT65OA6"
b"==="),
),
(
nacl.encoding.Base64Encoder,
(b"MTExMTExMTExMTExMTExMTExMTExMTEx/FXin+ZFktcORk09eIOPan0gdtTwGjHAiFV"
b"rEgIcZPpPSBPcDg7XQQcLLp8Bv+TQc/FQ0w6qnbP3XA8="),
),
(
nacl.encoding.URLSafeBase64Encoder,
(b"MTExMTExMTExMTExMTExMTExMTExMTEx_FXin-ZFktcORk09eIOPan0gdtTwGjHAiFV"
b"rEgIcZPpPSBPcDg7XQQcLLp8Bv-TQc_FQ0w6qnbP3XA8="),
),
]
@pytest.mark.parametrize(("encoder", "ciphertext"), VECTORS)
def test_encoders(encoder, ciphertext):
box = nacl.secret.SecretBox(KEY)
test_ciphertext = box.encrypt(TEXT, NONCE, encoder=encoder)
assert test_ciphertext == ciphertext
test_plaintext = box.decrypt(test_ciphertext, encoder=encoder)
assert test_plaintext == TEXT
| apache-2.0 |
hsum/sqlalchemy | examples/vertical/__init__.py | 30 | 1043 | """
Illustrates "vertical table" mappings.
A "vertical table" refers to a technique where individual attributes
of an object are stored as distinct rows in a table. The "vertical
table" technique is used to persist objects which can have a varied
set of attributes, at the expense of simple query control and brevity.
It is commonly found in content/document management systems in order
to represent user-created structures flexibly.
Two variants on the approach are given. In the second, each row
references a "datatype" which contains information about the type of
information stored in the attribute, such as integer, string, or date.
Example::
shrew = Animal(u'shrew')
shrew[u'cuteness'] = 5
shrew[u'weasel-like'] = False
shrew[u'poisonous'] = True
session.add(shrew)
session.flush()
q = (session.query(Animal).
filter(Animal.facts.any(
and_(AnimalFact.key == u'weasel-like',
AnimalFact.value == True))))
print 'weasel-like animals', q.all()
.. autosource::
""" | mit |
jbreitbart/autopin-plus | vendor/fast-lib/vendor/mosquitto-1.3.5/test/broker/05-clean-session-qos1.py | 18 | 1845 | #!/usr/bin/env python
# Test whether a clean session client has a QoS 1 message queued for it.
import subprocess
import socket
import time
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
mid = 109
keepalive = 60
connect_packet = mosq_test.gen_connect("clean-qos2-test", keepalive=keepalive, clean_session=False)
connack_packet = mosq_test.gen_connack(rc=0)
disconnect_packet = mosq_test.gen_disconnect()
subscribe_packet = mosq_test.gen_subscribe(mid, "qos1/clean_session/test", 1)
suback_packet = mosq_test.gen_suback(mid, 1)
mid = 1
publish_packet = mosq_test.gen_publish("qos1/clean_session/test", qos=1, mid=mid, payload="clean-session-message")
puback_packet = mosq_test.gen_puback(mid)
broker = subprocess.Popen(['../../src/mosquitto', '-p', '1888'], stderr=subprocess.PIPE)
try:
time.sleep(0.5)
sock = mosq_test.do_client_connect(connect_packet, connack_packet)
sock.send(subscribe_packet)
if mosq_test.expect_packet(sock, "suback", suback_packet):
sock.send(disconnect_packet)
sock.close()
pub = subprocess.Popen(['./05-clean-session-qos1-helper.py'])
pub.wait()
# Now reconnect and expect a publish message.
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=30)
if mosq_test.expect_packet(sock, "publish", publish_packet):
sock.send(puback_packet)
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
exit(rc)
| gpl-3.0 |
tunneln/CarnotKE | jyhton/lib-python/2.7/bdb.py | 144 | 21714 | """Debugger basics"""
import fnmatch
import sys
import os
import types
__all__ = ["BdbQuit","Bdb","Breakpoint"]
class BdbQuit(Exception):
"""Exception to give up completely"""
class Bdb:
"""Generic Python debugger base class.
This class takes care of details of the trace facility;
a derived class should implement user interaction.
The standard debugger class (pdb.Pdb) is an example.
"""
def __init__(self, skip=None):
self.skip = set(skip) if skip else None
self.breaks = {}
self.fncache = {}
self.frame_returning = None
def canonic(self, filename):
if filename == "<" + filename[1:-1] + ">":
return filename
canonic = self.fncache.get(filename)
if not canonic:
canonic = os.path.abspath(filename)
canonic = os.path.normcase(canonic)
self.fncache[filename] = canonic
return canonic
def reset(self):
import linecache
linecache.checkcache()
self.botframe = None
self._set_stopinfo(None, None)
def trace_dispatch(self, frame, event, arg):
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
if event == 'c_call':
return self.trace_dispatch
if event == 'c_exception':
return self.trace_dispatch
if event == 'c_return':
return self.trace_dispatch
print 'bdb.Bdb.dispatch: unknown debugging event:', repr(event)
return self.trace_dispatch
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_call(self, frame, arg):
# XXX 'arg' is no longer used
if self.botframe is None:
# First call of dispatch since reset()
self.botframe = frame.f_back # (CT) Note that this may also be None!
return self.trace_dispatch
if not (self.stop_here(frame) or self.break_anywhere(frame)):
# No need to trace this function
return # None
self.user_call(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
try:
self.frame_returning = frame
self.user_return(frame, arg)
finally:
self.frame_returning = None
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_exception(self, frame, arg):
if self.stop_here(frame):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
# Normally derived classes don't override the following
# methods, but they may if they want to redefine the
# definition of stopping and breakpoints.
def is_skipped_module(self, module_name):
for pattern in self.skip:
if fnmatch.fnmatch(module_name, pattern):
return True
return False
def stop_here(self, frame):
# (CT) stopframe may now also be None, see dispatch_call.
# (CT) the former test for None is therefore removed from here.
if self.skip and \
self.is_skipped_module(frame.f_globals.get('__name__')):
return False
if frame is self.stopframe:
if self.stoplineno == -1:
return False
return frame.f_lineno >= self.stoplineno
while frame is not None and frame is not self.stopframe:
if frame is self.botframe:
return True
frame = frame.f_back
return False
def break_here(self, frame):
filename = self.canonic(frame.f_code.co_filename)
if not filename in self.breaks:
return False
lineno = frame.f_lineno
if not lineno in self.breaks[filename]:
# The line itself has no breakpoint, but maybe the line is the
# first line of a function with breakpoint set by function name.
lineno = frame.f_code.co_firstlineno
if not lineno in self.breaks[filename]:
return False
# flag says ok to delete temp. bp
(bp, flag) = effective(filename, lineno, frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.do_clear(str(bp.number))
return True
else:
return False
def do_clear(self, arg):
raise NotImplementedError, "subclass of bdb must implement do_clear()"
def break_anywhere(self, frame):
return self.canonic(frame.f_code.co_filename) in self.breaks
# Derived classes should override the user_* methods
# to gain control.
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
pass
def user_line(self, frame):
"""This method is called when we stop or break at this line."""
pass
def user_return(self, frame, return_value):
"""This method is called when a return trap is set here."""
pass
def user_exception(self, frame, exc_info):
exc_type, exc_value, exc_traceback = exc_info
"""This method is called if an exception occurs,
but only if we are to stop at or just below this level."""
pass
def _set_stopinfo(self, stopframe, returnframe, stoplineno=0):
self.stopframe = stopframe
self.returnframe = returnframe
self.quitting = 0
# stoplineno >= 0 means: stop at line >= the stoplineno
# stoplineno -1 means: don't stop at all
self.stoplineno = stoplineno
# Derived classes and clients can call the following methods
# to affect the stepping state.
def set_until(self, frame): #the name "until" is borrowed from gdb
"""Stop when the line with the line no greater than the current one is
reached or when returning from current frame"""
self._set_stopinfo(frame, frame, frame.f_lineno+1)
def set_step(self):
"""Stop after one line of code."""
# Issue #13183: pdb skips frames after hitting a breakpoint and running
# step commands.
# Restore the trace function in the caller (that may not have been set
# for performance reasons) when returning from the current frame.
if self.frame_returning:
caller_frame = self.frame_returning.f_back
if caller_frame and not caller_frame.f_trace:
caller_frame.f_trace = self.trace_dispatch
self._set_stopinfo(None, None)
def set_next(self, frame):
"""Stop on the next line in or below the given frame."""
self._set_stopinfo(frame, None)
def set_return(self, frame):
"""Stop when returning from the given frame."""
self._set_stopinfo(frame.f_back, frame)
def set_trace(self, frame=None):
"""Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
if frame is None:
frame = sys._getframe().f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
def set_continue(self):
# Don't stop except at breakpoints or when finished
self._set_stopinfo(self.botframe, None, -1)
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
frame = sys._getframe().f_back
while frame and frame is not self.botframe:
del frame.f_trace
frame = frame.f_back
def set_quit(self):
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 1
sys.settrace(None)
# Derived classes and clients can call the following methods
# to manipulate breakpoints. These methods return an
# error message is something went wrong, None if all is well.
# Set_break prints out the breakpoint line and file:lineno.
# Call self.get_*break*() to see the breakpoints or better
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
def set_break(self, filename, lineno, temporary=0, cond = None,
funcname=None):
filename = self.canonic(filename)
import linecache # Import as late as possible
line = linecache.getline(filename, lineno)
if not line:
return 'Line %s:%d does not exist' % (filename,
lineno)
if not filename in self.breaks:
self.breaks[filename] = []
list = self.breaks[filename]
if not lineno in list:
list.append(lineno)
bp = Breakpoint(filename, lineno, temporary, cond, funcname)
def _prune_breaks(self, filename, lineno):
if (filename, lineno) not in Breakpoint.bplist:
self.breaks[filename].remove(lineno)
if not self.breaks[filename]:
del self.breaks[filename]
def clear_break(self, filename, lineno):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
if lineno not in self.breaks[filename]:
return 'There is no breakpoint at %s:%d' % (filename,
lineno)
# If there's only one bp in the list for that file,line
# pair, then remove the breaks entry
for bp in Breakpoint.bplist[filename, lineno][:]:
bp.deleteMe()
self._prune_breaks(filename, lineno)
def clear_bpbynumber(self, arg):
try:
number = int(arg)
except:
return 'Non-numeric breakpoint number (%s)' % arg
try:
bp = Breakpoint.bpbynumber[number]
except IndexError:
return 'Breakpoint number (%d) out of range' % number
if not bp:
return 'Breakpoint (%d) already deleted' % number
bp.deleteMe()
self._prune_breaks(bp.file, bp.line)
def clear_all_file_breaks(self, filename):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
for line in self.breaks[filename]:
blist = Breakpoint.bplist[filename, line]
for bp in blist:
bp.deleteMe()
del self.breaks[filename]
def clear_all_breaks(self):
if not self.breaks:
return 'There are no breakpoints'
for bp in Breakpoint.bpbynumber:
if bp:
bp.deleteMe()
self.breaks = {}
def get_break(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename]
def get_breaks(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename] and \
Breakpoint.bplist[filename, lineno] or []
def get_file_breaks(self, filename):
filename = self.canonic(filename)
if filename in self.breaks:
return self.breaks[filename]
else:
return []
def get_all_breaks(self):
return self.breaks
# Derived classes and clients can call the following method
# to get a data structure representing a stack trace.
def get_stack(self, f, t):
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
if f is self.botframe:
break
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
if f is None:
i = max(0, len(stack) - 1)
return stack, i
#
def format_stack_entry(self, frame_lineno, lprefix=': '):
import linecache, repr
frame, lineno = frame_lineno
filename = self.canonic(frame.f_code.co_filename)
s = '%s(%r)' % (filename, lineno)
if frame.f_code.co_name:
s = s + frame.f_code.co_name
else:
s = s + "<lambda>"
if '__args__' in frame.f_locals:
args = frame.f_locals['__args__']
else:
args = None
if args:
s = s + repr.repr(args)
else:
s = s + '()'
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
s = s + '->'
s = s + repr.repr(rv)
line = linecache.getline(filename, lineno, frame.f_globals)
if line: s = s + lprefix + line.strip()
return s
# The following two methods can be called by clients to use
# a debugger to debug a statement, given as a string.
def run(self, cmd, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(cmd, types.CodeType):
cmd = cmd+'\n'
try:
exec cmd in globals, locals
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runeval(self, expr, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(expr, types.CodeType):
expr = expr+'\n'
try:
return eval(expr, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runctx(self, cmd, globals, locals):
# B/W compatibility
self.run(cmd, globals, locals)
# This method is more useful to debug a single function call.
def runcall(self, func, *args, **kwds):
self.reset()
sys.settrace(self.trace_dispatch)
res = None
try:
res = func(*args, **kwds)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
return res
def set_trace():
Bdb().set_trace()
class Breakpoint:
"""Breakpoint class
Implements temporary breakpoints, ignore counts, disabling and
(re)-enabling, and conditionals.
Breakpoints are indexed by number through bpbynumber and by
the file,line tuple using bplist. The former points to a
single instance of class Breakpoint. The latter points to a
list of such instances since there may be more than one
breakpoint per line.
"""
# XXX Keeping state in the class is a mistake -- this means
# you cannot have more than one active Bdb instance.
next = 1 # Next bp to be assigned
bplist = {} # indexed by (file, lineno) tuple
bpbynumber = [None] # Each entry is None or an instance of Bpt
# index 0 is unused, except for marking an
# effective break .... see effective()
def __init__(self, file, line, temporary=0, cond=None, funcname=None):
self.funcname = funcname
# Needed if funcname is not None.
self.func_first_executable_line = None
self.file = file # This better be in canonical form!
self.line = line
self.temporary = temporary
self.cond = cond
self.enabled = 1
self.ignore = 0
self.hits = 0
self.number = Breakpoint.next
Breakpoint.next = Breakpoint.next + 1
# Build the two lists
self.bpbynumber.append(self)
if (file, line) in self.bplist:
self.bplist[file, line].append(self)
else:
self.bplist[file, line] = [self]
def deleteMe(self):
index = (self.file, self.line)
self.bpbynumber[self.number] = None # No longer in list
self.bplist[index].remove(self)
if not self.bplist[index]:
# No more bp for this f:l combo
del self.bplist[index]
def enable(self):
self.enabled = 1
def disable(self):
self.enabled = 0
def bpprint(self, out=None):
if out is None:
out = sys.stdout
if self.temporary:
disp = 'del '
else:
disp = 'keep '
if self.enabled:
disp = disp + 'yes '
else:
disp = disp + 'no '
print >>out, '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
self.file, self.line)
if self.cond:
print >>out, '\tstop only if %s' % (self.cond,)
if self.ignore:
print >>out, '\tignore next %d hits' % (self.ignore)
if (self.hits):
if (self.hits > 1): ss = 's'
else: ss = ''
print >>out, ('\tbreakpoint already hit %d time%s' %
(self.hits, ss))
# -----------end of Breakpoint class----------
def checkfuncname(b, frame):
"""Check whether we should break here because of `b.funcname`."""
if not b.funcname:
# Breakpoint was set via line number.
if b.line != frame.f_lineno:
# Breakpoint was set at a line with a def statement and the function
# defined is called: don't break.
return False
return True
# Breakpoint set via function name.
if frame.f_code.co_name != b.funcname:
# It's not a function call, but rather execution of def statement.
return False
# We are in the right frame.
if not b.func_first_executable_line:
# The function is entered for the 1st time.
b.func_first_executable_line = frame.f_lineno
if b.func_first_executable_line != frame.f_lineno:
# But we are not at the first line number: don't break.
return False
return True
# Determines if there is an effective (active) breakpoint at this
# line of code. Returns breakpoint number or 0 if none
def effective(file, line, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary bp.
"""
possibles = Breakpoint.bplist[file,line]
for i in range(0, len(possibles)):
b = possibles[i]
if b.enabled == 0:
continue
if not checkfuncname(b, frame):
continue
# Count every hit when bp is enabled
b.hits = b.hits + 1
if not b.cond:
# If unconditional, and ignoring,
# go on to next, else break
if b.ignore > 0:
b.ignore = b.ignore -1
continue
else:
# breakpoint and marker that's ok
# to delete if temporary
return (b,1)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals,
frame.f_locals)
if val:
if b.ignore > 0:
b.ignore = b.ignore -1
# continue
else:
return (b,1)
# else:
# continue
except:
# if eval fails, most conservative
# thing is to stop on breakpoint
# regardless of ignore count.
# Don't delete temporary,
# as another hint to user.
return (b,0)
return (None, None)
# -------------------- testing --------------------
class Tdb(Bdb):
def user_call(self, frame, args):
name = frame.f_code.co_name
if not name: name = '???'
print '+++ call', name, args
def user_line(self, frame):
import linecache
name = frame.f_code.co_name
if not name: name = '???'
fn = self.canonic(frame.f_code.co_filename)
line = linecache.getline(fn, frame.f_lineno, frame.f_globals)
print '+++', fn, frame.f_lineno, name, ':', line.strip()
def user_return(self, frame, retval):
print '+++ return', retval
def user_exception(self, frame, exc_stuff):
print '+++ exception', exc_stuff
self.set_continue()
def foo(n):
print 'foo(', n, ')'
x = bar(n*10)
print 'bar returned', x
def bar(a):
print 'bar(', a, ')'
return a/2
def test():
t = Tdb()
t.run('import bdb; bdb.foo(10)')
# end
| apache-2.0 |
XtheOne/Inverter-Data-Logger | InverterLib.py | 1 | 3301 | import socket
import struct
import os
import binascii
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding('cp437')
def getNetworkIp():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.connect(('<broadcast>', 0))
return s.getsockname()[0]
def createV4RequestFrame(logger_sn):
"""Create request frame for inverter logger.
The request string is build from several parts. The first part is a
fixed 4 char string; the second part is the reversed hex notation of
the s/n twice; then again a fixed string of two chars; a checksum of
the double s/n with an offset; and finally a fixed ending char.
Args:
logger_sn (int): Serial number of the inverter
Returns:
str: Information request string for inverter
"""
#frame = (headCode) + (dataFieldLength) + (contrlCode) + (sn) + (sn) + (command) + (checksum) + (endCode)
frame_hdr = binascii.unhexlify('680241b1') #from SolarMan / new Omnik app
command = binascii.unhexlify('0100')
defchk = binascii.unhexlify('87')
endCode = binascii.unhexlify('16')
tar = bytearray.fromhex(hex(logger_sn)[8:10] + hex(logger_sn)[6:8] + hex(logger_sn)[4:6] + hex(logger_sn)[2:4])
frame = bytearray(frame_hdr + tar + tar + command + defchk + endCode)
checksum = 0
frame_bytes = bytearray(frame)
for i in range(1, len(frame_bytes) - 2, 1):
checksum += frame_bytes[i] & 255
frame_bytes[len(frame_bytes) - 2] = int((checksum & 255))
return bytearray(frame_bytes)
def expand_path(path):
"""
Expand relative path to absolute path.
Args:
path: file path
Returns: absolute path to file
"""
if os.path.isabs(path):
return path
else:
return os.path.dirname(os.path.abspath(__file__)) + "/" + path
def getLoggers():
# Create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((getNetworkIp(), 48899))
# Set a timeout so the socket does not block indefinitely when trying to receive data.
sock.settimeout(3)
# Set the time-to-live for messages to 1 so they do not go past the local network segment.
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
SendData = "WIFIKIT-214028-READ" # Lotto/TM = "AT+YZAPP=214028,READ"
gateways = ''
try:
# Send data to the broadcast address
sent = sock.sendto(SendData, ('<broadcast>', 48899))
# Look for responses from all recipients
while True:
try:
data, server = sock.recvfrom(1024)
except socket.timeout:
break
else:
if (data == SendData): continue #skip sent data
a = data.split(',')
wifi_ip, wifi_mac, wifi_sn = a[0],a[1],a[2]
if (len(gateways)>1):
gateways = gateways+','
gateways = gateways+wifi_ip+','+wifi_sn
finally:
sock.close()
return gateways
| gpl-3.0 |
sigrokproject/libsigrokdecode | decoders/swd/__init__.py | 6 | 1212 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Angus Gratton <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
'''
This PD decodes the ARM SWD (version 1) protocol, as described in the
"ARM Debug Interface v5.2" Architecture Specification.
Not supported:
* Turnaround periods other than the default 1, as set in DLCR.TURNROUND
(should be trivial to add)
* SWD protocol version 2 (multi-drop support, etc.)
Details:
http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ihi0031c/index.html
(Registration required)
'''
from .pd import Decoder
| gpl-3.0 |
dstanek/keystone | keystone/common/dependency.py | 10 | 7661 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module provides support for dependency injection.
Providers are registered via the ``@provider()`` decorator, and dependencies on
them are registered with ``@requires()``. Providers are available to their
consumers via an attribute. See the documentation for the individual functions
for more detail.
See also:
https://en.wikipedia.org/wiki/Dependency_injection
"""
import traceback
from keystone.i18n import _
_REGISTRY = {}
_future_dependencies = {}
_factories = {}
def _set_provider(name, provider):
_original_provider, where_registered = _REGISTRY.get(name, (None, None))
if where_registered:
raise Exception('%s already has a registered provider, at\n%s' %
(name, ''.join(where_registered)))
_REGISTRY[name] = (provider, traceback.format_stack())
GET_REQUIRED = object()
GET_OPTIONAL = object()
def get_provider(name, optional=GET_REQUIRED):
if optional is GET_REQUIRED:
return _REGISTRY[name][0]
return _REGISTRY.get(name, (None, None))[0]
class UnresolvableDependencyException(Exception):
"""Raised when a required dependency is not resolvable.
See ``resolve_future_dependencies()`` for more details.
"""
def __init__(self, name, targets):
msg = _('Unregistered dependency: %(name)s for %(targets)s') % {
'name': name, 'targets': targets}
super(UnresolvableDependencyException, self).__init__(msg)
def provider(name):
"""A class decorator used to register providers.
When ``@provider()`` is used to decorate a class, members of that class
will register themselves as providers for the named dependency. As an
example, In the code fragment::
@dependency.provider('foo_api')
class Foo:
def __init__(self):
...
...
foo = Foo()
The object ``foo`` will be registered as a provider for ``foo_api``. No
more than one such instance should be created; additional instances will
replace the previous ones, possibly resulting in different instances being
used by different consumers.
"""
def wrapper(cls):
def wrapped(init):
def __wrapped_init__(self, *args, **kwargs):
"""Initialize the wrapped object and add it to the registry."""
init(self, *args, **kwargs)
_set_provider(name, self)
resolve_future_dependencies(__provider_name=name)
return __wrapped_init__
cls.__init__ = wrapped(cls.__init__)
_factories[name] = cls
return cls
return wrapper
def _process_dependencies(obj):
# Any dependencies that can be resolved immediately are resolved.
# Dependencies that cannot be resolved immediately are stored for
# resolution in resolve_future_dependencies.
def process(obj, attr_name, unresolved_in_out):
for dependency in getattr(obj, attr_name, []):
if dependency not in _REGISTRY:
# We don't know about this dependency, so save it for later.
unresolved_in_out.setdefault(dependency, []).append(obj)
continue
setattr(obj, dependency, get_provider(dependency))
process(obj, '_dependencies', _future_dependencies)
def requires(*dependencies):
"""A class decorator used to inject providers into consumers.
The required providers will be made available to instances of the decorated
class via an attribute with the same name as the provider. For example, in
the code fragment::
@dependency.requires('foo_api', 'bar_api')
class FooBarClient:
def __init__(self):
...
...
client = FooBarClient()
The object ``client`` will have attributes named ``foo_api`` and
``bar_api``, which are instances of the named providers.
Objects must not rely on the existence of these attributes until after
``resolve_future_dependencies()`` has been called; they may not exist
beforehand.
Dependencies registered via ``@required()`` must have providers; if not,
an ``UnresolvableDependencyException`` will be raised when
``resolve_future_dependencies()`` is called.
"""
def wrapper(self, *args, **kwargs):
"""Inject each dependency from the registry."""
self.__wrapped_init__(*args, **kwargs)
_process_dependencies(self)
def wrapped(cls):
"""Note the required dependencies on the object for later injection.
The dependencies of the parent class are combined with that of the
child class to create a new set of dependencies.
"""
existing_dependencies = getattr(cls, '_dependencies', set())
cls._dependencies = existing_dependencies.union(dependencies)
if not hasattr(cls, '__wrapped_init__'):
cls.__wrapped_init__ = cls.__init__
cls.__init__ = wrapper
return cls
return wrapped
def resolve_future_dependencies(__provider_name=None):
"""Forces injection of all dependencies.
Before this function is called, circular dependencies may not have been
injected. This function should be called only once, after all global
providers are registered. If an object needs to be created after this
call, it must not have circular dependencies.
If any required dependencies are unresolvable, this function will raise an
``UnresolvableDependencyException``.
Outside of this module, this function should be called with no arguments;
the optional argument, ``__provider_name`` is used internally, and should
be treated as an implementation detail.
"""
new_providers = dict()
if __provider_name:
# A provider was registered, so take care of any objects depending on
# it.
targets = _future_dependencies.pop(__provider_name, [])
for target in targets:
setattr(target, __provider_name, get_provider(__provider_name))
return
# Resolve future dependencies, raises UnresolvableDependencyException if
# there's no provider registered.
try:
for dependency, targets in _future_dependencies.copy().items():
if dependency not in _REGISTRY:
# a Class was registered that could fulfill the dependency, but
# it has not yet been initialized.
factory = _factories.get(dependency)
if factory:
provider = factory()
new_providers[dependency] = provider
else:
raise UnresolvableDependencyException(dependency, targets)
for target in targets:
setattr(target, dependency, get_provider(dependency))
finally:
_future_dependencies.clear()
return new_providers
def reset():
"""Reset the registry of providers.
This is useful for unit testing to ensure that tests don't use providers
from previous tests.
"""
_REGISTRY.clear()
_future_dependencies.clear()
| apache-2.0 |
ioos/system-test | pelican-plugins/asciidoc_reader/test_asciidoc_reader.py | 34 | 2460 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import os
from pelican.readers import Readers
from pelican.tests.support import unittest, get_settings
from .asciidoc_reader import asciidoc_enabled
CUR_DIR = os.path.dirname(__file__)
CONTENT_PATH = os.path.join(CUR_DIR, 'test_data')
@unittest.skipUnless(asciidoc_enabled, "asciidoc isn't installed")
class AsciiDocReaderTest(unittest.TestCase):
def read_file(self, path, **kwargs):
# Isolate from future API changes to readers.read_file
r = Readers(settings=get_settings(**kwargs))
return r.read_file(base_path=CONTENT_PATH, path=path)
def test_article_with_asc_extension(self):
# Ensure the asc extension is being processed by the correct reader
page = self.read_file(
path='article_with_asc_extension.asc')
expected = ('<div class="sect1">\n'
'<h2 id="_used_for_pelican_test">'
'Used for pelican test</h2>\n'
'<div class="sectionbody">\n'
'<div class="paragraph">'
'<p>The quick brown fox jumped over '
'the lazy dog’s back.</p>'
'</div>\n</div>\n</div>\n')
self.assertEqual(page.content, expected)
expected = {
'category': 'Blog',
'author': 'Author O. Article',
'title': 'Test AsciiDoc File Header',
'date': datetime.datetime(2011, 9, 15, 9, 5),
'tags': ['Linux', 'Python', 'Pelican'],
}
for key, value in expected.items():
self.assertEqual(value, page.metadata[key], key)
def test_article_with_asc_options(self):
# test to ensure the ASCIIDOC_OPTIONS is being used
page = self.read_file(path='article_with_asc_options.asc',
ASCIIDOC_OPTIONS=["-a revision=1.0.42"])
expected = ('<div class="sect1">\n'
'<h2 id="_used_for_pelican_test">'
'Used for pelican test</h2>\n'
'<div class="sectionbody">\n'
'<div class="paragraph">'
'<p>version 1.0.42</p></div>\n'
'<div class="paragraph">'
'<p>The quick brown fox jumped over '
'the lazy dog’s back.</p>'
'</div>\n</div>\n</div>\n')
self.assertEqual(page.content, expected) | unlicense |
mvaled/sentry | tests/sentry/deletions/test_tagkey.py | 1 | 3690 | from __future__ import absolute_import
from sentry import tagstore
from sentry.tagstore.models import EventTag
from sentry.models import ScheduledDeletion
from sentry.tasks.deletion import run_deletion
from sentry.testutils import TestCase
class DeleteTagKeyTest(TestCase):
def test_simple(self):
team = self.create_team(name="test", slug="test")
project = self.create_project(teams=[team], name="test1", slug="test1")
group = self.create_group(project=project)
key = "foo"
value = "bar"
tk = tagstore.create_tag_key(
key=key, project_id=project.id, environment_id=self.environment.id
)
tv = tagstore.create_tag_value(
key=key, value=value, project_id=project.id, environment_id=self.environment.id
)
tagstore.create_group_tag_key(
key=key, group_id=group.id, project_id=project.id, environment_id=self.environment.id
)
tagstore.create_group_tag_value(
key=key,
value=value,
group_id=group.id,
project_id=project.id,
environment_id=self.environment.id,
)
tagstore.create_event_tags(
group_id=group.id,
project_id=project.id,
event_id=1,
environment_id=self.environment.id,
tags=[(tk.key, tv.value)],
)
project2 = self.create_project(teams=[team], name="test2")
env2 = self.create_environment(project=project2)
group2 = self.create_group(project=project2)
tk2 = tagstore.create_tag_key(project2.id, env2.id, key)
tv2 = tagstore.create_tag_value(
key=key, value=value, project_id=project2.id, environment_id=env2.id
)
tagstore.create_group_tag_key(
key=key, group_id=group2.id, project_id=project2.id, environment_id=env2.id
)
tagstore.create_group_tag_value(
key=key, value=value, group_id=group2.id, project_id=project2.id, environment_id=env2.id
)
tagstore.create_event_tags(
group_id=group2.id,
project_id=project2.id,
environment_id=env2.id,
event_id=1,
tags=[(tk2.key, tv2.value)],
)
deletion = ScheduledDeletion.schedule(tk, days=0)
deletion.update(in_progress=True)
with self.tasks():
run_deletion(deletion.id)
try:
tagstore.get_group_tag_value(
group.project_id, group.id, self.environment.id, key, value
)
assert False # verify exception thrown
except tagstore.GroupTagValueNotFound:
pass
try:
tagstore.get_group_tag_key(group.project_id, group.id, self.environment.id, key)
assert False # verify exception thrown
except tagstore.GroupTagKeyNotFound:
pass
try:
tagstore.get_tag_value(project.id, self.environment.id, key, value)
assert False # verify exception thrown
except tagstore.TagValueNotFound:
pass
try:
tagstore.get_tag_key(project.id, self.environment.id, key)
assert False # verify exception thrown
except tagstore.TagKeyNotFound:
pass
assert tagstore.get_tag_key(project2.id, env2.id, key) is not None
assert tagstore.get_group_tag_key(group2.project_id, group2.id, env2.id, key) is not None
assert (
tagstore.get_group_tag_value(group2.project_id, group2.id, env2.id, key, value)
is not None
)
assert EventTag.objects.filter(key_id=tk2.id).exists()
| bsd-3-clause |
RedhawkSDR/integration-gnuhawk | gnuradio/docs/doxygen/doxyxml/doxyindex.py | 16 | 8404 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Classes providing more user-friendly interfaces to the doxygen xml
docs than the generated classes provide.
"""
import os
from generated import index
from base import Base
from text import description
class DoxyIndex(Base):
"""
Parses a doxygen xml directory.
"""
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyIndex, self)._parse()
self._root = index.parse(os.path.join(self._xml_path, 'index.xml'))
for mem in self._root.compound:
converted = self.convert_mem(mem)
# For files we want the contents to be accessible directly
# from the parent rather than having to go through the file
# object.
if self.get_cls(mem) == DoxyFile:
if mem.name.endswith('.h'):
self._members += converted.members()
self._members.append(converted)
else:
self._members.append(converted)
def generate_swig_doc_i(self):
"""
%feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
Wraps the C++: gr_align_on_samplenumbers_ss::align_state";
"""
pass
class DoxyCompMem(Base):
kind = None
def __init__(self, *args, **kwargs):
super(DoxyCompMem, self).__init__(*args, **kwargs)
@classmethod
def can_parse(cls, obj):
return obj.kind == cls.kind
def set_descriptions(self, parse_data):
bd = description(getattr(parse_data, 'briefdescription', None))
dd = description(getattr(parse_data, 'detaileddescription', None))
self._data['brief_description'] = bd
self._data['detailed_description'] = dd
def set_parameters(self, data):
vs = [ddc.value for ddc in data.detaileddescription.content_]
pls = []
for v in vs:
if hasattr(v, 'parameterlist'):
pls += v.parameterlist
pis = []
for pl in pls:
pis += pl.parameteritem
dpis = []
for pi in pis:
dpi = DoxyParameterItem(pi)
dpi._parse()
dpis.append(dpi)
self._data['params'] = dpis
class DoxyCompound(DoxyCompMem):
pass
class DoxyMember(DoxyCompMem):
pass
class DoxyFunction(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'function'
def _parse(self):
if self._parsed:
return
super(DoxyFunction, self)._parse()
self.set_descriptions(self._parse_data)
self.set_parameters(self._parse_data)
if not self._data['params']:
# If the params weren't set by a comment then just grab the names.
self._data['params'] = []
prms = self._parse_data.param
for prm in prms:
self._data['params'].append(DoxyParam(prm))
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
Base.mem_classes.append(DoxyFunction)
class DoxyParam(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyParam, self)._parse()
self.set_descriptions(self._parse_data)
self._data['declname'] = self._parse_data.declname
@property
def description(self):
descriptions = []
if self.brief_description:
descriptions.append(self.brief_description)
if self.detailed_description:
descriptions.append(self.detailed_description)
return '\n\n'.join(descriptions)
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
name = property(lambda self: self.data()['declname'])
class DoxyParameterItem(DoxyMember):
"""A different representation of a parameter in Doxygen."""
def _parse(self):
if self._parsed:
return
super(DoxyParameterItem, self)._parse()
names = []
for nl in self._parse_data.parameternamelist:
for pn in nl.parametername:
names.append(description(pn))
# Just take first name
self._data['name'] = names[0]
# Get description
pd = description(self._parse_data.get_parameterdescription())
self._data['description'] = pd
description = property(lambda self: self.data()['description'])
name = property(lambda self: self.data()['name'])
class DoxyClass(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'class'
def _parse(self):
if self._parsed:
return
super(DoxyClass, self)._parse()
self.retrieve_data()
if self._error:
return
self.set_descriptions(self._retrieved_data.compounddef)
self.set_parameters(self._retrieved_data.compounddef)
# Sectiondef.kind tells about whether private or public.
# We just ignore this for now.
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
Base.mem_classes.append(DoxyClass)
class DoxyFile(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'file'
def _parse(self):
if self._parsed:
return
super(DoxyFile, self)._parse()
self.retrieve_data()
self.set_descriptions(self._retrieved_data.compounddef)
if self._error:
return
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
Base.mem_classes.append(DoxyFile)
class DoxyNamespace(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'namespace'
Base.mem_classes.append(DoxyNamespace)
class DoxyGroup(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'group'
def _parse(self):
if self._parsed:
return
super(DoxyGroup, self)._parse()
self.retrieve_data()
if self._error:
return
cdef = self._retrieved_data.compounddef
self._data['title'] = description(cdef.title)
# Process inner groups
grps = cdef.innergroup
for grp in grps:
converted = DoxyGroup.from_refid(grp.refid, top=self.top)
self._members.append(converted)
# Process inner classes
klasses = cdef.innerclass
for kls in klasses:
converted = DoxyClass.from_refid(kls.refid, top=self.top)
self._members.append(converted)
# Process normal members
self.process_memberdefs()
title = property(lambda self: self.data()['title'])
Base.mem_classes.append(DoxyGroup)
class DoxyFriend(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'friend'
Base.mem_classes.append(DoxyFriend)
class DoxyOther(Base):
__module__ = "gnuradio.utils.doxyxml"
kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum', 'dir', 'page'])
@classmethod
def can_parse(cls, obj):
return obj.kind in cls.kinds
Base.mem_classes.append(DoxyOther)
| gpl-3.0 |
alexandrucoman/vbox-nova-driver | nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py | 33 | 15330 | # Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
import webob
from nova.api.openstack.compute.contrib import server_groups
from nova.api.openstack.compute.plugins.v3 import server_groups as sg_v21
from nova.api.openstack import extensions
from nova import context
import nova.db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
FAKE_UUID3 = 'b8713410-9ba3-e913-901b-13410ca90121'
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def server_group_template(**kwargs):
sgroup = kwargs.copy()
sgroup.setdefault('name', 'test')
return sgroup
def server_group_resp_template(**kwargs):
sgroup = kwargs.copy()
sgroup.setdefault('name', 'test')
sgroup.setdefault('policies', [])
sgroup.setdefault('members', [])
return sgroup
def server_group_db(sg):
attrs = sg.copy()
if 'id' in attrs:
attrs['uuid'] = attrs.pop('id')
if 'policies' in attrs:
policies = attrs.pop('policies')
attrs['policies'] = policies
else:
attrs['policies'] = []
if 'members' in attrs:
members = attrs.pop('members')
attrs['members'] = members
else:
attrs['members'] = []
attrs['deleted'] = 0
attrs['deleted_at'] = None
attrs['created_at'] = None
attrs['updated_at'] = None
if 'user_id' not in attrs:
attrs['user_id'] = 'user_id'
if 'project_id' not in attrs:
attrs['project_id'] = 'project_id'
attrs['id'] = 7
return AttrDict(attrs)
class ServerGroupTestV21(test.TestCase):
validation_error = exception.ValidationError
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self._setup_controller()
self.req = fakes.HTTPRequest.blank('')
def _setup_controller(self):
self.controller = sg_v21.ServerGroupController()
def test_create_server_group_with_no_policies(self):
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def _create_server_group_normal(self, policies):
sgroup = server_group_template()
sgroup['policies'] = policies
res_dict = self.controller.create(self.req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
def test_create_server_group(self):
policies = ['affinity', 'anti-affinity']
for policy in policies:
self._create_server_group_normal([policy])
def _create_instance(self, context):
instance = objects.Instance(context=context, image_ref=1, node='node1',
reservation_id='a', host='host1', project_id='fake',
vm_state='fake', system_metadata={'key': 'value'})
instance.create()
return instance
def _create_instance_group(self, context, members):
ig = objects.InstanceGroup(context=context, name='fake_name',
user_id='fake_user', project_id='fake',
members=members)
ig.create()
return ig.uuid
def _create_groups_and_instances(self, ctx):
instances = [self._create_instance(ctx), self._create_instance(ctx)]
members = [instance.uuid for instance in instances]
ig_uuid = self._create_instance_group(ctx, members)
return (ig_uuid, instances, members)
def test_display_members(self):
ctx = context.RequestContext('fake_user', 'fake')
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
res_dict = self.controller.show(self.req, ig_uuid)
result_members = res_dict['server_group']['members']
self.assertEqual(2, len(result_members))
for member in members:
self.assertIn(member, result_members)
def test_display_active_members_only(self):
ctx = context.RequestContext('fake_user', 'fake')
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
# delete an instance
instances[1].destroy()
# check that the instance does not exist
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid,
ctx, instances[1].uuid)
res_dict = self.controller.show(self.req, ig_uuid)
result_members = res_dict['server_group']['members']
# check that only the active instance is displayed
self.assertEqual(1, len(result_members))
self.assertIn(instances[0].uuid, result_members)
def test_create_server_group_with_non_alphanumeric_in_name(self):
# The fix for bug #1434335 expanded the allowable character set
# for server group names to include non-alphanumeric characters
# if they are printable.
sgroup = server_group_template(name='good* $%name',
policies=['affinity'])
res_dict = self.controller.create(self.req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'good* $%name')
def test_create_server_group_with_illegal_name(self):
# blank name
sgroup = server_group_template(name='', policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with length 256
sgroup = server_group_template(name='1234567890' * 26,
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# non-string name
sgroup = server_group_template(name=12, policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with leading spaces
sgroup = server_group_template(name=' leading spaces',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with trailing spaces
sgroup = server_group_template(name='trailing space ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with all spaces
sgroup = server_group_template(name=' ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with unprintable character
sgroup = server_group_template(name='bad\x00name',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with out of range char U0001F4A9
sgroup = server_group_template(name=u"\U0001F4A9",
policies=['affinity'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
# blank policy
sgroup = server_group_template(name='fake-name', policies='')
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(name='fake-name', policies=7)
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(name='fake-name', policies='invalid')
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(name='fake-name', policies=None)
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_conflicting_policies(self):
sgroup = server_group_template()
policies = ['anti-affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_duplicate_policies(self):
sgroup = server_group_template()
policies = ['affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_not_supported(self):
sgroup = server_group_template()
policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_no_body(self):
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=None)
def test_create_server_group_with_no_server_group(self):
body = {'no-instanceGroup': None}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_list_server_group_by_tenant(self):
groups = []
policies = ['anti-affinity']
members = []
metadata = {} # always empty
names = ['default-x', 'test']
sg1 = server_group_resp_template(id=str(1345),
name=names[0],
policies=policies,
members=members,
metadata=metadata)
sg2 = server_group_resp_template(id=str(891),
name=names[1],
policies=policies,
members=members,
metadata=metadata)
groups = [sg1, sg2]
expected = {'server_groups': groups}
def return_server_groups(context, project_id):
return [server_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'instance_group_get_all_by_project_id',
return_server_groups)
res_dict = self.controller.index(self.req)
self.assertEqual(res_dict, expected)
def test_list_server_group_all(self):
all_groups = []
tenant_groups = []
policies = ['anti-affinity']
members = []
metadata = {} # always empty
names = ['default-x', 'test']
sg1 = server_group_resp_template(id=str(1345),
name=names[0],
policies=[],
members=members,
metadata=metadata)
sg2 = server_group_resp_template(id=str(891),
name=names[1],
policies=policies,
members=members,
metadata={})
tenant_groups = [sg2]
all_groups = [sg1, sg2]
all = {'server_groups': all_groups}
tenant_specific = {'server_groups': tenant_groups}
def return_all_server_groups(context):
return [server_group_db(sg) for sg in all_groups]
self.stubs.Set(nova.db, 'instance_group_get_all',
return_all_server_groups)
def return_tenant_server_groups(context, project_id):
return [server_group_db(sg) for sg in tenant_groups]
self.stubs.Set(nova.db, 'instance_group_get_all_by_project_id',
return_tenant_server_groups)
path = '/os-server-groups?all_projects=True'
req = fakes.HTTPRequest.blank(path, use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, all)
req = fakes.HTTPRequest.blank(path)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, tenant_specific)
def test_delete_server_group_by_id(self):
sg = server_group_template(id='123')
self.called = False
def server_group_delete(context, id):
self.called = True
def return_server_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return server_group_db(sg)
self.stubs.Set(nova.db, 'instance_group_delete',
server_group_delete)
self.stubs.Set(nova.db, 'instance_group_get',
return_server_group)
resp = self.controller.delete(self.req, '123')
self.assertTrue(self.called)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller, sg_v21.ServerGroupController):
status_int = self.controller.delete.wsgi_code
else:
status_int = resp.status_int
self.assertEqual(204, status_int)
def test_delete_non_existing_server_group(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, 'invalid')
class ServerGroupTestV2(ServerGroupTestV21):
validation_error = webob.exc.HTTPBadRequest
def _setup_controller(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {}
self.controller = server_groups.ServerGroupController(ext_mgr)
| apache-2.0 |
cpcloud/PyTables | contrib/make_hdf.py | 14 | 10754 | #!/usr/bin/env python
from __future__ import generators
import tables, cPickle, time
#################################################################################
def is_scalar(item):
try:
iter(item)
#could be a string
try:
item[:0]+'' #check for string
return 'str'
except:
return 0
except:
return 'notstr'
def is_dict(item):
try:
item.iteritems()
return 1
except:
return 0
def make_col(row_type, row_name, row_item, str_len):
'''for strings it will always make at least 80 char or twice mac char size'''
set_len=80
if str_len:
if 2*str_len>set_len:
set_len=2*str_len
row_type[row_name]=tables.Col("CharType", set_len)
else:
type_matrix={
int: tables.Col("Int32", 1),
float: tables.Col("Float32", 4), #Col("Int16", 1)
}
row_type[row_name]=type_matrix[type(row_item)]
def make_row(data):
row_type={}
scalar_type=is_scalar(data)
if scalar_type:
if scalar_type=='str':
make_col(row_type, 'scalar', data, len(data))
else:
make_col(row_type, 'scalar', data, 0)
else: #it is a list-like
the_type=is_scalar(data[0])
if the_type=='str':
#get max length
the_max=0
for i in data:
if len(i)>the_max:
the_max=len(i)
make_col(row_type, 'col', data[0], the_max)
elif the_type:
make_col(row_type, 'col', data[0], 0)
else: #list within the list, make many columns
make_col(row_type, 'col_depth', 0, 0)
count=0
for col in data:
the_type=is_scalar(col[0])
if the_type=='str':
#get max length
the_max=0
for i in data:
if len(i)>the_max:
the_max=len(i)
make_col(row_type, 'col_'+str(count), col[0], the_max)
elif the_type:
make_col(row_type, 'col_'+str(count), col[0], 0)
else:
raise ValueError('too many nested levels of lists')
count+=1
return row_type
def add_table(fileh, group_obj, data, table_name):
#figure out if it is a list of lists or a single list
#get types of columns
row_type=make_row(data)
table1=fileh.createTable(group_obj, table_name, row_type, 'H', compress=1)
row=table1.row
if is_scalar(data):
row['scalar']=data
row.append()
else:
if is_scalar(data[0]):
for i in data:
row['col']=i
row.append()
else:
count=0
for col in data:
row['col_depth']=len(col)
for the_row in col:
if is_scalar(the_row):
row['col_'+str(count)]=the_row
row.append()
else:
raise ValueError('too many levels of lists')
count+=1
table1.flush()
def add_cache(fileh, cache):
group_name='pytables_cache_v0';table_name='cache0'
root=fileh.root
group_obj=fileh.createGroup(root, group_name)
cache_str=cPickle.dumps(cache, 0)
cache_str=cache_str.replace('\n', chr(1))
cache_pieces=[]
while cache_str:
cache_part=cache_str[:8000];cache_str=cache_str[8000:]
if cache_part:
cache_pieces.append(cache_part)
row_type={}
row_type['col_0']=tables.Col("CharType", 8000)
#
table_cache=fileh.createTable(group_obj, table_name, row_type, 'H', compress =1)
for piece in cache_pieces:
print len(piece)
table_cache.row['col_0']=piece
table_cache.row.append()
table_cache.flush()
def save2(hdf_file, data):
fileh=tables.openFile(hdf_file, mode='w', title='logon history')
root=fileh.root;cache_root=cache={}
root_path=root._v_pathname;root=0
stack = [ (root_path, data, cache) ]
table_num=0
count=0
while stack:
(group_obj_path, data, cache)=stack.pop()
#data='wilma':{'mother':[22,23,24]}}
#grp_name wilma
for grp_name in data:
#print 'fileh=',fileh
count+=1
cache[grp_name]={}
new_group_obj=fileh.createGroup(group_obj_path, grp_name)
#print 'path=',new_group_obj._v_pathname
new_path=new_group_obj._v_pathname
#if dict, you have a bunch of groups
if is_dict(data[grp_name]):#{'mother':[22,23,24]}
stack.append((new_path, data[grp_name], cache[grp_name]))
#you have a table
else:
#data[grp_name]=[110,130,140],[1,2,3]
add_table(fileh, new_path, data[grp_name], 'tbl_'+str(table_num))
table_num+=1
#fileh=tables.openFile(hdf_file,mode='a',title='logon history')
add_cache(fileh, cache_root)
fileh.close()
########################
class Hdf_dict(dict):
def __init__(self,hdf_file,hdf_dict={},stack=[]):
self.hdf_file=hdf_file
self.stack=stack
if stack:
self.hdf_dict=hdf_dict
else:
self.hdf_dict=self.get_cache()
self.cur_dict=self.hdf_dict
def get_cache(self):
fileh=tables.openFile(self.hdf_file, rootUEP='pytables_cache_v0')
table=fileh.root.cache0
total=[]
print 'reading'
begin=time.time()
for i in table.iterrows():
total.append(i['col_0'])
total=''.join(total)
total=total.replace(chr(1), '\n')
print 'loaded cache len=', len(total), time.time()-begin
begin=time.time()
a=cPickle.loads(total)
print 'cache', time.time()-begin
return a
def has_key(self, k):
return k in self.cur_dict
def keys(self):
return self.cur_dict.keys()
def get(self,key,default=None):
try:
return self.__getitem__(key)
except:
return default
def items(self):
return list(self.iteritems())
def values(self):
return list(self.itervalues())
###########################################
def __len__(self):
return len(self.cur_dict)
def __getitem__(self, k):
if k in self.cur_dict:
#now check if k has any data
if self.cur_dict[k]:
new_stack=self.stack[:]
new_stack.append(k)
return Hdf_dict(self.hdf_file, hdf_dict=self.cur_dict[k], stack=new_stack)
else:
new_stack=self.stack[:]
new_stack.append(k)
fileh=tables.openFile(self.hdf_file, rootUEP='/'.join(new_stack))
#cur_data=getattr(self.cur_group,k) #/wilma (Group) '' =getattr(/ (Group) 'logon history',wilma)
for table in fileh.root:
#return [ i['col_1'] for i in table.iterrows() ] #[9110,91]
#perhaps they stored a single item
try:
for item in table['scalar']:
return item
except:
#otherwise they stored a list of data
try:
return [ item for item in table['col']]
except:
cur_column=[]
total_columns=[]
col_num=0
cur_row=0
num_rows=0
for row in table:
if not num_rows:
num_rows=row['col_depth']
if cur_row==num_rows:
cur_row=num_rows=0
col_num+=1
total_columns.append(cur_column)
cur_column=[]
cur_column.append( row['col_'+str(col_num)])
cur_row+=1
total_columns.append(cur_column)
return total_columns
else:
raise KeyError(k)
def iterkeys(self):
for key in self.iterkeys():
yield key
def __iter__(self):
return self.iterkeys()
def itervalues(self):
for k in self.iterkeys():
v=self.__getitem__(k)
yield v
def iteritems(self):
# yield children
for k in self.iterkeys():
v=self.__getitem__(k)
yield (k, v)
def __repr__(self):
return '{Hdf dict}'
def __str__(self):
return self.__repr__()
#####
def setdefault(self,key,default=None):
try:
return self.__getitem__(key)
except:
self.__setitem__(key)
return default
def update(self, d):
for k, v in d.iteritems():
self.__setitem__(k, v)
def popitem(self):
try:
k, v = self.iteritems().next()
del self[k]
return k, v
except StopIteration:
raise KeyError("Hdf Dict is empty")
def __setitem__(self, key, value):
raise NotImplementedError
def __delitem__(self, key):
raise NotImplementedError
def __hash__(self):
raise TypeError("Hdf dict bjects are unhashable")
if __name__=='__main__':
def write_small(file=''):
data1={
'fred':['a', 'b', 'c'],
'barney':[[9110, 9130, 9140], [91, 92, 93]],
'wilma':{'mother':{'pebbles':[22, 23, 24],'bambam':[67, 68, 69]}}
}
print 'saving'
save2(file, data1)
print 'saved'
def read_small(file=''):
#a=make_hdf.Hdf_dict(file)
a=Hdf_dict(file)
print a['wilma']
b=a['wilma']
for i in b:
print i
print a.keys()
print 'has fred', bool('fred' in a)
print 'length a', len(a)
print 'get', a.get('fred'), a.get('not here')
print 'wilma keys', a['wilma'].keys()
print 'barney', a['barney']
print 'get items'
print a.items()
for i in a.iteritems():
print 'item', i
for i in a.itervalues():
print i
a=raw_input('enter y to write out test file to test.hdf')
if a.strip()=='y':
print 'writing'
write_small('test.hdf')
print 'reading'
read_small('test.hdf')
| bsd-3-clause |
kanagasabapathi/python-for-android | python3-alpha/python3-src/Tools/scripts/win_add2path.py | 49 | 1618 | """Add Python to the search path on Windows
This is a simple script to add Python to the Windows search path. It
modifies the current user (HKCU) tree of the registry.
Copyright (c) 2008 by Christian Heimes <[email protected]>
Licensed to PSF under a Contributor Agreement.
"""
import sys
import site
import os
import winreg
HKCU = winreg.HKEY_CURRENT_USER
ENV = "Environment"
PATH = "PATH"
DEFAULT = "%PATH%"
def modify():
pythonpath = os.path.dirname(os.path.normpath(sys.executable))
scripts = os.path.join(pythonpath, "Scripts")
appdata = os.environ["APPDATA"]
if hasattr(site, "USER_SITE"):
userpath = site.USER_SITE.replace(appdata, "%APPDATA%")
userscripts = os.path.join(userpath, "Scripts")
else:
userscripts = None
with winreg.CreateKey(HKCU, ENV) as key:
try:
envpath = winreg.QueryValueEx(key, PATH)[0]
except WindowsError:
envpath = DEFAULT
paths = [envpath]
for path in (pythonpath, scripts, userscripts):
if path and path not in envpath and os.path.isdir(path):
paths.append(path)
envpath = os.pathsep.join(paths)
winreg.SetValueEx(key, PATH, 0, winreg.REG_EXPAND_SZ, envpath)
return paths, envpath
def main():
paths, envpath = modify()
if len(paths) > 1:
print("Path(s) added:")
print('\n'.join(paths[1:]))
else:
print("No path was added")
print("\nPATH is now:\n%s\n" % envpath)
print("Expanded:")
print(winreg.ExpandEnvironmentStrings(envpath))
if __name__ == '__main__':
main()
| apache-2.0 |
jiaojianbupt/tools | project_manager/alias.py | 1 | 1746 | # -*- coding: utf-8 -*-
"""
Created by jiaojian at 2018/6/29 16:30
"""
import os
import sys
import termios
from tools.utils.basic_printer import print_with_style, ConsoleColor
HOME = os.environ['HOME']
def get_input():
fd = sys.stdin.fileno()
old_tty_info = termios.tcgetattr(fd)
new_tty_info = old_tty_info[:]
new_tty_info[3] &= ~termios.ICANON
new_tty_info[3] &= ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, new_tty_info)
answer = os.read(fd, 1)
termios.tcsetattr(fd, termios.TCSANOW, old_tty_info)
return answer
def add_alias():
if sys.platform == 'darwin':
bash_profile_name = '.bash_profile'
else:
bash_profile_name = '.bashrc'
linux_bash_profile_path = os.path.join(HOME, bash_profile_name)
exec_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'main.py')
alias = 'alias updateall="python %s"' % exec_file_path
if os.path.exists(linux_bash_profile_path):
with open(linux_bash_profile_path, 'rw') as bashrc_file:
bash_profile = bashrc_file.read()
if bash_profile.find(alias) >= 0:
return
answer = ''
while not answer or answer not in {'y', 'n'}:
print_with_style('Add \'%s\' to your %s?(y/n)' % (alias, bash_profile_name), color=ConsoleColor.YELLOW)
answer = get_input()
if answer == 'n':
return
elif answer == 'y':
break
bash_profile = bash_profile + '\n' + alias
with open(linux_bash_profile_path, 'w') as bashrc_file:
bashrc_file.write(bash_profile)
print_with_style('Alias added.', color=ConsoleColor.YELLOW)
| gpl-3.0 |
75651/kbengine_cloud | kbe/res/scripts/common/Lib/test/test_json/test_indent.py | 103 | 1824 | import textwrap
from io import StringIO
from test.test_json import PyTest, CTest
class TestIndent:
def test_indent(self):
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth',
{'nifty': 87}, {'field': 'yes', 'morefield': False} ]
expect = textwrap.dedent("""\
[
\t[
\t\t"blorpie"
\t],
\t[
\t\t"whoops"
\t],
\t[],
\t"d-shtaeou",
\t"d-nthiouh",
\t"i-vhbjkhnth",
\t{
\t\t"nifty": 87
\t},
\t{
\t\t"field": "yes",
\t\t"morefield": false
\t}
]""")
d1 = self.dumps(h)
d2 = self.dumps(h, indent=2, sort_keys=True, separators=(',', ': '))
d3 = self.dumps(h, indent='\t', sort_keys=True, separators=(',', ': '))
d4 = self.dumps(h, indent=2, sort_keys=True)
d5 = self.dumps(h, indent='\t', sort_keys=True)
h1 = self.loads(d1)
h2 = self.loads(d2)
h3 = self.loads(d3)
self.assertEqual(h1, h)
self.assertEqual(h2, h)
self.assertEqual(h3, h)
self.assertEqual(d2, expect.expandtabs(2))
self.assertEqual(d3, expect)
self.assertEqual(d4, d2)
self.assertEqual(d5, d3)
def test_indent0(self):
h = {3: 1}
def check(indent, expected):
d1 = self.dumps(h, indent=indent)
self.assertEqual(d1, expected)
sio = StringIO()
self.json.dump(h, sio, indent=indent)
self.assertEqual(sio.getvalue(), expected)
# indent=0 should emit newlines
check(0, '{\n"3": 1\n}')
# indent=None is more compact
check(None, '{"3": 1}')
class TestPyIndent(TestIndent, PyTest): pass
class TestCIndent(TestIndent, CTest): pass
| lgpl-3.0 |
gs0510/coala-bears | bears/python/PyFlakesBear.py | 13 | 1050 | from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.PipRequirement import PipRequirement
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
@linter(executable='pyflakes',
use_stderr=True,
output_format='regex',
output_regex=r'.*:(?P<line>\d+):'
r'[(?P<column>\d+):|?]*(?P<severity>)\s(?P<message>.*)\n',
severity_map={
'': RESULT_SEVERITY.INFO
})
class PyFlakesBear:
"""
Checks Python files for errors using ``pyflakes``.
See https://github.com/PyCQA/pyflakes for more info.
"""
LANGUAGES = {'Python', 'Python 3'}
REQUIREMENTS = {PipRequirement('pyflakes', '1.4.0')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL-3.0'
ASCIINEMA_URL = 'https://asciinema.org/a/92503'
CAN_DETECT = {'Syntax', 'Unused Code', 'Undefined Element'}
@staticmethod
def create_arguments(filename, file, config_file):
return filename,
| agpl-3.0 |
amw2104/fireplace | setup.py | 1 | 1046 | #!/usr/bin/env python
import os.path
import fireplace
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), "README.md")).read()
CLASSIFIERS = [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)"
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Games/Entertainment :: Simulation",
]
setup(
name="fireplace",
version=fireplace.__version__,
packages=find_packages(exclude="tests"),
package_data={"": ["CardDefs.xml"]},
include_package_data=True,
tests_require=["pytest"],
author=fireplace.__author__,
author_email=fireplace.__email__,
description="Pure-python Hearthstone re-implementation and simulator",
classifiers=CLASSIFIERS,
download_url="https://github.com/jleclanche/python-bna/tarball/master",
long_description=README,
license="AGPLv3",
url="https://github.com/jleclanche/fireplace",
)
| agpl-3.0 |
sjperkins/tensorflow | tensorflow/contrib/keras/python/keras/metrics.py | 8 | 3418 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in Keras metrics functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.keras.python.keras import backend as K
# pylint: disable=unused-import
from tensorflow.contrib.keras.python.keras.losses import binary_crossentropy
from tensorflow.contrib.keras.python.keras.losses import categorical_crossentropy
from tensorflow.contrib.keras.python.keras.losses import cosine_proximity
from tensorflow.contrib.keras.python.keras.losses import hinge
from tensorflow.contrib.keras.python.keras.losses import kullback_leibler_divergence
from tensorflow.contrib.keras.python.keras.losses import logcosh
from tensorflow.contrib.keras.python.keras.losses import mean_absolute_error
from tensorflow.contrib.keras.python.keras.losses import mean_absolute_percentage_error
from tensorflow.contrib.keras.python.keras.losses import mean_squared_error
from tensorflow.contrib.keras.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.contrib.keras.python.keras.losses import poisson
from tensorflow.contrib.keras.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.contrib.keras.python.keras.losses import squared_hinge
# pylint: disable=unused-import
from tensorflow.contrib.keras.python.keras.utils.generic_utils import deserialize_keras_object
def binary_accuracy(y_true, y_pred):
return K.mean(K.equal(y_true, K.round(y_pred)), axis=-1)
def categorical_accuracy(y_true, y_pred):
return K.cast(
K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)), K.floatx())
def sparse_categorical_accuracy(y_true, y_pred):
return K.cast(
K.equal(
K.max(y_true, axis=-1), K.cast(K.argmax(y_pred, axis=-1),
K.floatx())), K.floatx())
def top_k_categorical_accuracy(y_true, y_pred, k=5):
return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k), axis=-1)
# Aliases
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
cosine = cosine_proximity
def serialize(metric):
return metric.__name__
def deserialize(name, custom_objects=None):
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='metric function')
def get(identifier):
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'metric function identifier:', identifier)
| apache-2.0 |
dapengchen123/code_v1 | reid/datasets/market1501.py | 1 | 3563 | from __future__ import print_function, absolute_import
import os.path as osp
from ..utils.data import Dataset
from ..utils.osutils import mkdir_if_missing
from ..utils.serialization import write_json
class Market1501(Dataset):
url = 'https://drive.google.com/file/d/0B8-rUzbwVRk0c054eEozWG9COHM/view'
md5 = '65005ab7d12ec1c44de4eeafe813e68a'
def __init__(self, root, split_id=0, num_val=0.3, download=False):
super(Market1501, self).__init__(root, split_id=split_id)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. " +
"You can use download=True to download it.")
self.load(num_val)
def download(self):
if self._check_integrity():
print("Files already downloaded and verified")
return
import re
import hashlib
import shutil
from glob import glob
from zipfile import ZipFile
raw_dir = osp.join(self.root, 'raw')
mkdir_if_missing(raw_dir)
# Download the raw zip file
fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip')
if osp.isfile(fpath) and \
hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5:
print("Using downloaded file: " + fpath)
else:
raise RuntimeError("Please download the dataset manually from {} "
"to {}".format(self.url, fpath))
# Extract the file
exdir = osp.join(raw_dir, 'Market-1501-v15.09.15')
if not osp.isdir(exdir):
print("Extracting zip file")
with ZipFile(fpath) as z:
z.extractall(path=raw_dir)
# Format
images_dir = osp.join(self.root, 'images')
mkdir_if_missing(images_dir)
# 1501 identities (+1 for background) with 6 camera views each
identities = [[[] for _ in range(6)] for _ in range(1502)]
def register(subdir, pattern=re.compile(r'([-\d]+)_c(\d)')):
fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg')))
pids = set()
for fpath in fpaths:
fname = osp.basename(fpath)
pid, cam = map(int, pattern.search(fname).groups())
if pid == -1: continue # junk images are just ignored
assert 0 <= pid <= 1501 # pid == 0 means background
assert 1 <= cam <= 6
cam -= 1
pids.add(pid)
fname = ('{:08d}_{:02d}_{:04d}.jpg'
.format(pid, cam, len(identities[pid][cam])))
identities[pid][cam].append(fname)
shutil.copy(fpath, osp.join(images_dir, fname))
return pids
trainval_pids = register('bounding_box_train')
gallery_pids = register('bounding_box_test')
query_pids = register('query')
assert query_pids <= gallery_pids
assert trainval_pids.isdisjoint(gallery_pids)
# Save meta information into a json file
meta = {'name': 'Market1501', 'shot': 'multiple', 'num_cameras': 6,
'identities': identities}
write_json(meta, osp.join(self.root, 'meta.json'))
# Save the only training / test split
splits = [{
'trainval': sorted(list(trainval_pids)),
'query': sorted(list(query_pids)),
'gallery': sorted(list(gallery_pids))}]
write_json(splits, osp.join(self.root, 'splits.json'))
| mit |
larsmans/scipy | benchmarks/benchmarks/linalg_solve_toeplitz.py | 106 | 1170 | """Benchmark the solve_toeplitz solver (Levinson recursion)
"""
from __future__ import division, absolute_import, print_function
import numpy as np
try:
import scipy.linalg
except ImportError:
pass
from .common import Benchmark
class SolveToeplitz(Benchmark):
params = (
('float64', 'complex128'),
(100, 300, 1000),
('toeplitz', 'generic')
)
param_names = ('dtype', 'n', 'solver')
def setup(self, dtype, n, soltype):
random = np.random.RandomState(1234)
dtype = np.dtype(dtype)
# Sample a random Toeplitz matrix representation and rhs.
c = random.randn(n)
r = random.randn(n)
y = random.randn(n)
if dtype == np.complex128:
c = c + 1j*random.rand(n)
r = r + 1j*random.rand(n)
y = y + 1j*random.rand(n)
self.c = c
self.r = r
self.y = y
self.T = scipy.linalg.toeplitz(c, r=r)
def time_solve_toeplitz(self, dtype, n, soltype):
if soltype == 'toeplitz':
scipy.linalg.solve_toeplitz((self.c, self.r), self.y)
else:
scipy.linalg.solve(self.T, self.y)
| bsd-3-clause |
glenflet/ZtoRGBpy | ZtoRGBpy/_info.py | 1 | 2082 | # -*- coding: utf-8 -*-
# =================================================================================
# Copyright 2019 Glen Fletcher <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# All documentation this file as docstrings or comments are licensed under the
# Creative Commons Attribution-ShareAlike 4.0 International License; you may
# not use this documentation except in compliance with this License.
# You may obtain a copy of this License at
#
# https://creativecommons.org/licenses/by-sa/4.0
#
# =================================================================================
"""
ZtoRGB information definition module
Special private module used for automatic processing, and inclusion
.. moduleauthor:: Glen Fletcher <[email protected]>
"""
__authors__ = [
("Glen Fletcher", "[email protected]")]
__copyright__ = "2019 Glen Fletcher"
__license__ = """\
The source code for this package is licensed under the [Apache 2.0 License](http://www.apache.org/licenses/LICENSE-2.0),
while the documentation including docstrings and comments embedded in the source code are licensed under the
[Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0)
"""
__contact__ = "Glen Fletcher <[email protected]>"
__version__ = "2.0"
__title__ = "ZtoRGBpy"
__desc__ = """\
Complex number to perceptually uniform RGB subset mapping library"""
__all__ = [
'__authors__', '__copyright__', '__license__',
'__contact__', '__version__', '__title__',
'__desc__']
| mit |
ElecProg/decmath | decmath/trig.py | 1 | 4598 | from decimal import getcontext, Decimal
from decmath import _pi, _to_Decimal, sign
# Trigonometric functions
def acos(x):
"""Return the arc cosine (measured in radians) of x."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal("NaN")
elif abs(x) > 1:
raise ValueError("Domain error: acos accepts -1 <= x <= 1.")
elif x == -1:
return _pi()
elif x == 0:
return _pi() / 2
elif x == 1:
return Decimal(0)
getcontext().prec += 2
one_half = Decimal('0.5')
i, lasts, s, gamma, fact, num = Decimal(0), 0, _pi() / 2 - x, 1, 1, x
while s != lasts:
lasts = s
i += 1
fact *= i
num *= x * x
gamma *= i - one_half
coeff = gamma / ((2 * i + 1) * fact)
s -= coeff * num
getcontext().prec -= 2
return +s
def asin(x):
"""Return the arc sine (measured in radians) of x."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal("NaN")
elif abs(x) > 1:
raise ValueError("Domain error: asin accepts -1 <= x <= 1.")
elif x == -1:
return -_pi() / 2
elif x == 0:
return Decimal(0)
elif x == 1:
return _pi() / 2
getcontext().prec += 2
one_half = Decimal('0.5')
i, lasts, s, gamma, fact, num = Decimal(0), 0, x, 1, 1, x
while s != lasts:
lasts = s
i += 1
fact *= i
num *= x * x
gamma *= i - one_half
coeff = gamma / ((2 * i + 1) * fact)
s += coeff * num
getcontext().prec -= 2
return +s
def atan(x):
"""Return the arc tangent (measured in radians) of x."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal("NaN")
elif x == Decimal('-Inf'):
return -_pi() / 2
elif x == 0:
return Decimal(0)
elif x == Decimal('Inf'):
return _pi() / 2
if x < -1:
c = _pi() / -2
x = 1 / x
elif x > 1:
c = _pi() / 2
x = 1 / x
else:
c = 0
getcontext().prec += 2
x_squared = x**2
y = x_squared / (1 + x_squared)
y_over_x = y / x
i, lasts, s, coeff, num = Decimal(0), 0, y_over_x, 1, y_over_x
while s != lasts:
lasts = s
i += 2
coeff *= i / (i + 1)
num *= y
s += coeff * num
if c:
s = c - s
getcontext().prec -= 2
return +s
def atan2(y, x):
"""Return the arc tangent (measured in radians) of y/x.
Unlike atan(y/x), the signs of both x and y are considered."""
y = _to_Decimal(y)
x = _to_Decimal(x)
abs_y = abs(y)
abs_x = abs(x)
y_is_real = abs_y != Decimal('Inf')
if y.is_nan() or x.is_nan():
return Decimal("NaN")
if x:
if y_is_real:
a = y and atan(y / x) or Decimal(0)
if x < 0:
a += sign(y) * _pi()
return a
elif abs_y == abs_x:
x = sign(x)
y = sign(y)
return _pi() * (Decimal(2) * abs(x) - x) / (Decimal(4) * y)
if y:
return atan(sign(y) * Decimal('Inf'))
elif sign(x) < 0:
return sign(y) * _pi()
else:
return sign(y) * Decimal(0)
def cos(x):
"""Return the cosine of x as measured in radians."""
x = _to_Decimal(x) % (2 * _pi())
if x.is_nan():
return Decimal('NaN')
elif x == _pi() / 2 or x == 3 * _pi() / 2:
return Decimal(0)
getcontext().prec += 2
i, lasts, s, fact, num, sign = 0, 0, 1, 1, 1, 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i - 1)
num *= x * x
sign *= -1
s += num / fact * sign
getcontext().prec -= 2
return +s
def hypot(x, y):
"""Return the Euclidean distance, sqrt(x*x + y*y)."""
return (_to_Decimal(x).__pow__(2) + _to_Decimal(y).__pow__(2)).sqrt()
def sin(x):
"""Return the sine of x as measured in radians."""
x = _to_Decimal(x) % (2 * _pi())
if x.is_nan():
return Decimal('NaN')
elif x == 0 or x == _pi():
return Decimal(0)
getcontext().prec += 2
i, lasts, s, fact, num, sign = 1, 0, x, 1, x, 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i - 1)
num *= x * x
sign *= -1
s += num / fact * sign
getcontext().prec -= 2
return +s
def tan(x):
"""Return the tangent of x (measured in radians)."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal('NaN')
elif x == _pi() / 2:
return Decimal('Inf')
elif x == 3 * _pi() / 2:
return Decimal('-Inf')
return sin(x) / cos(x)
| mit |
jasonwee/asus-rt-n14uhp-mrtg | tmp/ve_asus-rt-n14uhp-mrtg/lib/python3.4/site-packages/django/db/backends/postgresql/operations.py | 149 | 10182 | from __future__ import unicode_literals
from psycopg2.extras import Inet
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
class DatabaseOperations(BaseDatabaseOperations):
def unification_cast_sql(self, output_field):
internal_type = output_field.get_internal_type()
if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"):
# PostgreSQL will resolve a union as type 'text' if input types are
# 'unknown'.
# http://www.postgresql.org/docs/9.4/static/typeconv-union-case.html
# These fields cannot be implicitly cast back in the default
# PostgreSQL configuration so we need to explicitly cast them.
# We must also remove components of the type within brackets:
# varchar(255) -> varchar.
return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0]
return '%s'
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
return field_name, params
def datetime_cast_date_sql(self, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = '(%s)::date' % field_name
return sql, params
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = self.date_extract_sql(lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
return sql, params
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def lookup_cast(self, lookup_type, internal_type=None):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
if internal_type in ('IPAddressField', 'GenericIPAddressField'):
lookup = "HOST(%s)"
else:
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def prepare_sql_script(self, sql):
return [sql]
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def set_time_zone_sql(self):
return "SET TIME ZONE %s"
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
# us to truncate tables referenced by a foreign key in any other
# table.
tables_sql = ', '.join(
style.SQL_FIELD(self.quote_name(table)) for table in tables)
if allow_cascade:
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
style.SQL_KEYWORD('CASCADE'),
)]
else:
sql = ['%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
)]
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
sql = []
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" %
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table)),
)
)
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.remote_field.through:
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))
)
)
return output
def prep_for_iexact_query(self, x):
return x
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields):
if fields:
return 'DISTINCT ON (%s)' % ', '.join(fields)
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
# http://initd.org/psycopg/docs/cursor.html#cursor.query
# The query attribute is a Psycopg extension to the DB API 2.0.
if cursor.query is not None:
return cursor.query.decode('utf-8')
return None
def return_insert_id(self):
return "RETURNING %s", ()
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def adapt_datefield_value(self, value):
return value
def adapt_datetimefield_value(self, value):
return value
def adapt_timefield_value(self, value):
return value
def adapt_ipaddressfield_value(self, value):
if value:
return Inet(value)
return None
| apache-2.0 |
martindurant/astrobits | time_series.py | 1 | 12543 | """Take a list of files and known star coordinates, and
perform photometry on them all, either with apertures (phot)
or by PSF fitting (daophot, which required additional
parameters and is apropriate to poor S/N or crowded fields).
Makes extensive use of iraf tasks; set all photometry parameters
before running:
datapars - for data characteristics
centerpars - finding the reference star on each image.
centerpars, photpars, fitskypars - for controling aperture photometry
daopars - for controling daophot
filelist: set of image files, in IRAF syntax (image.fits[1][*,*,2] etc);
can be more than one per cube.
coords: name a file containing all star coords for photometry, based on
an image unshifted relative to (0,0) in the shifts list. Be pure numbers
for phot method, .mag or .als for daophot method.
shifts: name a file containing shifts, a tuple of shifts arrays, image
header keywords (tuple of two= or None for no shifts
refstar: coords of star for deriving (x,y) offset, as in coords
timestamp: source of the timing information: a header keyword, delta-t
for uniform sampling or a file with times (in whatever formate you'll be
using later.
psf: whether to use daophot or aperture phot for analysis. If this is a
filename, that is the PSF profile to use for every image; if it is "True",
make a new PSF for every image. Pars below only for full PSF fitting
pststars: a .pst file from daophot, listing the IDs of stars for making
the PSF for each image. NB: DAOphot refuses to measure any star with SNR<2.
ids: which stars are interesting, by ID (in input coord list order)
coords: starting well-measured coords (pdump-ed from a .als, perhaps).
"""
import os
import numpy
from glob import glob
import pyfits
from pylab import find
from numpy import load,vstack,save,median
thisdir = os.getcwd()
os.chdir("/home/durant")
from pyraf import iraf
iraf.cd(thisdir)
iraf.digiphot()
iraf.daophot()
import pyraf
import pyfits
import numpy as n
def shift_file_coords(filename,xshift,yshift,output,sort=None):
"""Understands filetypes: 2-column ascii numbers, .mag, .als, .pst.
NB: shift means where each image is, relative to the original (not where
it should be moved to).
"""
if not(sort):
sort = 'num'
if filename.find('.mag')>0: sort = 'mag'
if filename.find('.als')>0: sort = 'als'
if filename.find('.pst')>0: sort = 'pst'
if not(sort=='num' or sort=='mag' or sort=='als' or sort=='pst'):
raise ValueError('Unknown input filetype: %s'%filename)
if sort=='num': # shift 2-column numeric ASCII table
x,y = load(filename,usecols=[0,1],unpack=True)
x += xshift
y += yshift
X = vstack((x,y))
save(output,X.transpose())
return
if sort=='mag': #shift a .mag photometry file
fred = open(filename)
freda= open(output,'w')
for line in fred:
if line.split()[-1]=='\\' and len(line.split())==9 and line[0]!='#':
x = float(line.split()[0]) + xshift
y = float(line.split()[1]) + yshift
line = "%-14.3f %-11.3f"%(x,y)+line[21:]
freda.write(line)
if sort=='als': #shift a .als DAOphot photometry file
fred = open(filename)
freda= open(output,'w')
for line in fred:
if line.split()[-1]=='\\' and len(line.split())==8 and line[0]!='#':
x = float(line.split()[1]) + xshift
y = float(line.split()[2]) + yshift
line = line[:9] + "%-10.3f %-10.3f"%(x,y) + line[29:]
freda.write(line)
if sort=='pst': #shift a PSF star list for DAOphot
fred = open(filename)
freda= open(output,'w')
for line in fred:
if line[0]!="#":
x = float(line.split()[1]) + xshift
y = float(line.split()[2]) + yshift
line = line[:9] + "%-10.3f %-10.3f"%(x,y) + line[29:]
freda.write(line)
fred.close()
freda.close()
def recentre(image,refcoordfile):
"""Returns improved shift by centroiding
on the reference star using phot. This can be VERY
sensitive to the parameters in centerpars."""
xin,yin = load(refcoordfile,unpack=True)
try:
iraf.phot(image,refcoordfile,'temp.mag',inter="no",calgorithm='centroid',
mode='h',verify='no',update='no',verbose='no')
xout,yout=iraf.pdump('temp.mag','xcen,ycen','yes',Stdout=1)[0].split()
except:
print "Recentring failed on", image
return 0.,0.
xout,yout = float(xout),float(yout)
return xout-xin,yout-yin
vary_par = 1.
vary_max = 10
vary_min = 6
vary_fwhm= 0
def setaperture(image,refstar):
"""Measure the FWHM of the reference star unsing simple DAOphot editor
and then set the photometry aperture to this number"""
x,y = load(refstar,unpack=True)
fred = open('tempaperfile','w')
fred.write("%f %f 100 a\nq"%(x,y))
fred.close()
try:
output=iraf.daoedit(image,icomm='tempaperfile',Stdout=1,Stderr=1)
except:
print "Aperture setting failed on",image
return
FWHM = float(output[3].split()[4])
iraf.photpars.apertures = min(max(FWHM*vary_par,vary_min),vary_max)
iraf.daopars.fitrad = min(max(FWHM*vary_par,vary_min),vary_max)
global vary_fwhm
vary_fwhm = FWHM
print "FWHM: ", FWHM, " aperture: ",iraf.photpars.apertures
def apphot(image,coords,refstar=None,centre=False,vary=False):
"""Apperture photometry with centering based on a reference star.
NB: centre refers to shifting the coordinates by centroiding on the
reference star; recentering on the final phot depends on
centerpars.calgorithm ."""
iraf.dele('temp.mag*')
if centre:
xsh,ysh = recentre(image,refstar)
print "Fine centring: ", xsh,ysh
else: #no recentreing by reference star (but could still have calgorithm!=none)
xsh,ysh = 0,0
if vary:
setaperture(image,refstar)
shift_file_coords(coords,xsh,ysh,'tempcoords')
iraf.phot(image,'tempcoords','temp.mag2',inter="no",
mode='h',verify='no',update='no',verbose='no')
out = iraf.pdump('temp.mag2','id,flux,msky,stdev','yes',Stdout=1)
return out
def psfphot(image,coords,pststars,refstar,centre=True,vary=False):
"""PSF photometry. Centering is through phot on refstar.
Assume coords is a .als file for now. Recentering is always done
for the reference star, never for the targets."""
iraf.dele('temp.mag*')
iraf.dele('temp.psf.fits')
iraf.dele('temp.als')
if centre:
xsh,ysh = recentre(image,refstar)
print "Fine Centring: ", xsh,ysh
else: xsh,ysh = 0,0
if vary:
setaperture(image,refstar)
shift_file_coords(coords,xsh,ysh,'tempcoords2',sort='als')
shift_file_coords(pststars,xsh,ysh,'temppst2',sort='pst')
iraf.phot(image,'tempcoords2','temp.mag2',inter="no",calgorithm='none',
mode='h',verify='no',update='no',verbose='no')
iraf.psf(image,'temp.mag2','temppst2','temp.psf','temp.mag.pst','temp.mag.psg',
inter='no',mode='h',verify='no',update='no',verbose='no')
iraf.allstar(image,'temp.mag2','temp.psf','temp.als','temp.mag.arj',"default",
mode='h',verify='no',update='no',verbose='no')
out = iraf.pdump('temp.als','id,mag,merr,msky','yes',Stdout=1)
return out
def simplepsfphot(image,coords,psf,refstar,centre=True,vary=False):
"""PSF photometry, with a given PSF file in psf used for every image"""
iraf.dele('temp.mag*')
iraf.dele('temp.als')
iraf.dele('temp.sub.fits')
if centre:
xsh,ysh = recentre(image,refstar)
print "Fine Centring: ", xsh,ysh
else: xsh,ysh = 0,0
if vary:
setaperture(image,refstar)
shift_file_coords(coords,xsh,ysh,'tempcoords2',sort='als')
iraf.phot(image,'tempcoords2','temp.mag2',inter="no",calgorithm='none',
mode='h',verify='no',update='no',verbose='no')
iraf.allstar(image,'temp.mag2',psf,'temp.als','temp.mag.arj','temp.sub.fits',
mode='h',verify='no',update='no',verbose='no')
out = iraf.pdump('temp.als','id,mag,merr,msky','yes',Stdout=1)
return out
def custom1(filename): # for NACO timing mode cubes - removes horizontal banding
#iraf.imarith(filename,'-','dark','temp')
iraf.imarith(filename,'/','flatK','temp')
im = pyfits.getdata('temp.fits')
med = median(im.transpose())
out = ((im).transpose()-med).transpose()
(pyfits.ImageHDU(out)).writeto("temp2.fits",clobber=True)
iraf.imdel('temp')
iraf.imcopy('temp2[1]','temp')
def get_id(starid,output='output'):
"""from the output of the photometry, grab the magnitudes and magerrs of starid"""
mag = load(output,usecols=[4+starid*4])
merr= load(output,usecols=[5+starid*4])
return mag,merr
def run(filelist,coords,refstar,shifts=None,centre=False,psf=False,pststars=None,
ids=None,dark=0,flat=1,timestamp="TIME",output='output',custom_process=None,
vary=False):
"""If psf==True, must include all extra par files.
If PSF is a filename (.psf.fits), this profileis used to fit every image.
Timestamp can be either a file of times (same length as filelist), a header
keyword, or an array of times.
The input list can include [] notation for multiple extensions or sections
of each file (incompatible with header-based time-stamps).
custom_process(file) is a function taking a filename (possible including [x]
syntax) and places a processed image in temp.fits."""
output = open(output,'w')
x = load(coords,usecols=[1])
numstars = len(x)
myfiles = open(filelist).readlines()
myfiles = [myfiles[i][:-1] for i in range(len(myfiles))]
if timestamp.__class__ == numpy.ndarray: #--sort out times--
times = 1 #times=1 means we know the times beforehand
elif len(glob(timestamp))>0:
timestamp = load(timestamp,usecols=[0])
times=1
else:
times=0 #times=0 mean find the time from each image
if type(shifts)==type(" "): #--sort out shifts--
xshifts,yshifts = load(shifts,unpack=True)#filename give, assuming 2 columns
xshifts,yshifts = -xshifts,-yshifts #these are in the opposite sense to coords from stack
elif n.iterable(shifts):
xshifts=n.array(shifts[0]) #for shifts given as arrays/lists
yshifts=n.array(shifts[1])
else:
print "No shifts" #assume all shifts are zero
xshifts = n.zeros(len(myfiles))
yshifts = n.zeros(len(myfiles))
for i,thisfile in enumerate(myfiles): #run!
print i,thisfile
if times:
time = timestamp[i] #known time
else:
time = pyfits.getval(thisfile,timestamp) #FITS keyword
try:
iraf.dele('temp.fits')
if custom_process: #arbitrary subroutine to process a file -> temp.fits
custom_process(thisfile)
else: #typical dark/bias subtract and flatfield
iraf.imarith(thisfile,'-',dark,'temp')
iraf.imarith('temp','/',flat,'temp')
shift_file_coords(coords,xshifts[i],yshifts[i],'tempcoords') #apply coarse shifts
shift_file_coords(refstar,xshifts[i],yshifts[i],'tempref',sort='num')
if psf:
if psf is True: #full PSF fit
shift_file_coords(pststars,xshifts[i],yshifts[i],'temppst')
out=psfphot('temp.fits','tempcoords','temppst','tempref',centre,vary)
else: #DAOphot with known PSF
out=simplepsfphot('temp.fits','tempcoords',psf,'tempref',centre,vary)
else: #aperture photometry
out=apphot('temp.fits','tempcoords','tempref',centre,vary=vary)
output.write("%s %s %s "%(thisfile,time,vary_fwhm))
myids = n.array([int(out[i].split()[0]) for i in range(len(out))])
for i in ids or range(numstars):
try: #search for each requested ID
foundid = find(myids==i)[0]
output.write(out[foundid]+" ")
except: #ID not found
output.write(" 0 0 0 0 ")
output.write("\n")
except KeyboardInterrupt: #exit on Ctrl-C
break
except pyraf.irafglobals.IrafError, err:
print "IRAF error ",err,thisfile
break
except ValueError, err:
print "Value error ",err,thisfile
raise
output.close()
#iraf.dele('temp*')
| mit |
dunkhong/grr | grr/server/grr_response_server/databases/db_yara_test_lib.py | 1 | 1573 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""A module with test cases for the YARA database method."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from grr_response_server.databases import db
from grr_response_server.rdfvalues import objects as rdf_objects
class DatabaseTestYaraMixin(object):
"""A mixin class for testing YARA methods of database implementations."""
def testWriteYaraSignatureReferenceIncorrectUsername(self):
blob_id = rdf_objects.BlobID(os.urandom(32))
with self.assertRaises(db.UnknownGRRUserError) as context:
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="quux")
self.assertEqual(context.exception.username, "quux")
def testWriteYaraSignatureReferenceDuplicated(self):
self.db.WriteGRRUser("foo")
blob_id = rdf_objects.BlobID(os.urandom(32))
# Writing duplicated signatures is possible, it should not raise.
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
def testVerifyYaraSignatureReferenceSimple(self):
self.db.WriteGRRUser("foo")
blob_id = rdf_objects.BlobID(os.urandom(32))
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id))
def testVerifyYaraSignatureReferenceIncorrect(self):
blob_id = rdf_objects.BlobID(os.urandom(32))
self.assertFalse(self.db.VerifyYaraSignatureReference(blob_id))
| apache-2.0 |
baslr/ArangoDB | 3rdParty/V8/V8-5.0.71.39/tools/clang/scripts/run_tool.py | 10 | 11655 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper script to help run clang tools across Chromium code.
How to use this tool:
If you want to run the tool across all Chromium code:
run_tool.py <tool> <path/to/compiledb>
If you want to include all files mentioned in the compilation database:
run_tool.py <tool> <path/to/compiledb> --all
If you only want to run the tool across just chrome/browser and content/browser:
run_tool.py <tool> <path/to/compiledb> chrome/browser content/browser
Please see https://code.google.com/p/chromium/wiki/ClangToolRefactoring for more
information, which documents the entire automated refactoring flow in Chromium.
Why use this tool:
The clang tool implementation doesn't take advantage of multiple cores, and if
it fails mysteriously in the middle, all the generated replacements will be
lost.
Unfortunately, if the work is simply sharded across multiple cores by running
multiple RefactoringTools, problems arise when they attempt to rewrite a file at
the same time. To work around that, clang tools that are run using this tool
should output edits to stdout in the following format:
==== BEGIN EDITS ====
r:<file path>:<offset>:<length>:<replacement text>
r:<file path>:<offset>:<length>:<replacement text>
...etc...
==== END EDITS ====
Any generated edits are applied once the clang tool has finished running
across Chromium, regardless of whether some instances failed or not.
"""
import argparse
import collections
import functools
import json
import multiprocessing
import os.path
import subprocess
import sys
Edit = collections.namedtuple('Edit',
('edit_type', 'offset', 'length', 'replacement'))
def _GenerateCompileDatabase(path):
"""Generates a compile database.
Note: requires ninja.
Args:
path: The build directory to generate a compile database for.
"""
# TODO(dcheng): Incorporate Windows-specific compile DB munging from
# https://codereview.chromium.org/718873004
print 'Generating compile database in %s...' % path
args = ['ninja', '-C', path, '-t', 'compdb', 'cc', 'cxx', 'objc', 'objcxx']
output = subprocess.check_output(args)
with file(os.path.join(path, 'compile_commands.json'), 'w') as f:
f.write(output)
def _GetFilesFromGit(paths=None):
"""Gets the list of files in the git repository.
Args:
paths: Prefix filter for the returned paths. May contain multiple entries.
"""
args = []
if sys.platform == 'win32':
args.append('git.bat')
else:
args.append('git')
args.append('ls-files')
if paths:
args.extend(paths)
command = subprocess.Popen(args, stdout=subprocess.PIPE)
output, _ = command.communicate()
return [os.path.realpath(p) for p in output.splitlines()]
def _GetFilesFromCompileDB(build_directory):
""" Gets the list of files mentioned in the compilation database.
Args:
build_directory: Directory that contains the compile database.
"""
compiledb_path = os.path.join(build_directory, 'compile_commands.json')
with open(compiledb_path, 'rb') as compiledb_file:
json_commands = json.load(compiledb_file)
return [os.path.join(entry['directory'], entry['file'])
for entry in json_commands]
def _ExtractEditsFromStdout(build_directory, stdout):
"""Extracts generated list of edits from the tool's stdout.
The expected format is documented at the top of this file.
Args:
build_directory: Directory that contains the compile database. Used to
normalize the filenames.
stdout: The stdout from running the clang tool.
Returns:
A dictionary mapping filenames to the associated edits.
"""
lines = stdout.splitlines()
start_index = lines.index('==== BEGIN EDITS ====')
end_index = lines.index('==== END EDITS ====')
edits = collections.defaultdict(list)
for line in lines[start_index + 1:end_index]:
try:
edit_type, path, offset, length, replacement = line.split(':::', 4)
replacement = replacement.replace('\0', '\n')
# Normalize the file path emitted by the clang tool.
path = os.path.realpath(os.path.join(build_directory, path))
edits[path].append(Edit(edit_type, int(offset), int(length), replacement))
except ValueError:
print 'Unable to parse edit: %s' % line
return edits
def _ExecuteTool(toolname, build_directory, filename):
"""Executes the tool.
This is defined outside the class so it can be pickled for the multiprocessing
module.
Args:
toolname: Path to the tool to execute.
build_directory: Directory that contains the compile database.
filename: The file to run the tool over.
Returns:
A dictionary that must contain the key "status" and a boolean value
associated with it.
If status is True, then the generated edits are stored with the key "edits"
in the dictionary.
Otherwise, the filename and the output from stderr are associated with the
keys "filename" and "stderr" respectively.
"""
command = subprocess.Popen(
(toolname, '-p', build_directory, filename),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = command.communicate()
if command.returncode != 0:
return {'status': False, 'filename': filename, 'stderr': stderr}
else:
return {'status': True,
'edits': _ExtractEditsFromStdout(build_directory, stdout)}
class _CompilerDispatcher(object):
"""Multiprocessing controller for running clang tools in parallel."""
def __init__(self, toolname, build_directory, filenames):
"""Initializer method.
Args:
toolname: Path to the tool to execute.
build_directory: Directory that contains the compile database.
filenames: The files to run the tool over.
"""
self.__toolname = toolname
self.__build_directory = build_directory
self.__filenames = filenames
self.__success_count = 0
self.__failed_count = 0
self.__edit_count = 0
self.__edits = collections.defaultdict(list)
@property
def edits(self):
return self.__edits
@property
def failed_count(self):
return self.__failed_count
def Run(self):
"""Does the grunt work."""
pool = multiprocessing.Pool()
result_iterator = pool.imap_unordered(
functools.partial(_ExecuteTool, self.__toolname,
self.__build_directory), self.__filenames)
for result in result_iterator:
self.__ProcessResult(result)
sys.stdout.write('\n')
sys.stdout.flush()
def __ProcessResult(self, result):
"""Handles result processing.
Args:
result: The result dictionary returned by _ExecuteTool.
"""
if result['status']:
self.__success_count += 1
for k, v in result['edits'].iteritems():
self.__edits[k].extend(v)
self.__edit_count += len(v)
else:
self.__failed_count += 1
sys.stdout.write('\nFailed to process %s\n' % result['filename'])
sys.stdout.write(result['stderr'])
sys.stdout.write('\n')
percentage = (float(self.__success_count + self.__failed_count) /
len(self.__filenames)) * 100
sys.stdout.write('Succeeded: %d, Failed: %d, Edits: %d [%.2f%%]\r' %
(self.__success_count, self.__failed_count,
self.__edit_count, percentage))
sys.stdout.flush()
def _ApplyEdits(edits):
"""Apply the generated edits.
Args:
edits: A dict mapping filenames to Edit instances that apply to that file.
"""
edit_count = 0
for k, v in edits.iteritems():
# Sort the edits and iterate through them in reverse order. Sorting allows
# duplicate edits to be quickly skipped, while reversing means that
# subsequent edits don't need to have their offsets updated with each edit
# applied.
v.sort()
last_edit = None
with open(k, 'rb+') as f:
contents = bytearray(f.read())
for edit in reversed(v):
if edit == last_edit:
continue
last_edit = edit
contents[edit.offset:edit.offset + edit.length] = edit.replacement
if not edit.replacement:
_ExtendDeletionIfElementIsInList(contents, edit.offset)
edit_count += 1
f.seek(0)
f.truncate()
f.write(contents)
print 'Applied %d edits to %d files' % (edit_count, len(edits))
_WHITESPACE_BYTES = frozenset((ord('\t'), ord('\n'), ord('\r'), ord(' ')))
def _ExtendDeletionIfElementIsInList(contents, offset):
"""Extends the range of a deletion if the deleted element was part of a list.
This rewriter helper makes it easy for refactoring tools to remove elements
from a list. Even if a matcher callback knows that it is removing an element
from a list, it may not have enough information to accurately remove the list
element; for example, another matcher callback may end up removing an adjacent
list element, or all the list elements may end up being removed.
With this helper, refactoring tools can simply remove the list element and not
worry about having to include the comma in the replacement.
Args:
contents: A bytearray with the deletion already applied.
offset: The offset in the bytearray where the deleted range used to be.
"""
char_before = char_after = None
left_trim_count = 0
for byte in reversed(contents[:offset]):
left_trim_count += 1
if byte in _WHITESPACE_BYTES:
continue
if byte in (ord(','), ord(':'), ord('('), ord('{')):
char_before = chr(byte)
break
right_trim_count = 0
for byte in contents[offset:]:
right_trim_count += 1
if byte in _WHITESPACE_BYTES:
continue
if byte == ord(','):
char_after = chr(byte)
break
if char_before:
if char_after:
del contents[offset:offset + right_trim_count]
elif char_before in (',', ':'):
del contents[offset - left_trim_count:offset]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('tool', help='clang tool to run')
parser.add_argument('--all', action='store_true')
parser.add_argument(
'--generate-compdb',
action='store_true',
help='regenerate the compile database before running the tool')
parser.add_argument(
'compile_database',
help='path to the directory that contains the compile database')
parser.add_argument(
'path_filter',
nargs='*',
help='optional paths to filter what files the tool is run on')
args = parser.parse_args()
if args.generate_compdb:
_GenerateCompileDatabase(args.compile_database)
if args.all:
filenames = set(_GetFilesFromCompileDB(args.compile_database))
source_filenames = filenames
else:
filenames = set(_GetFilesFromGit(args.path_filter))
# Filter out files that aren't C/C++/Obj-C/Obj-C++.
extensions = frozenset(('.c', '.cc', '.cpp', '.m', '.mm'))
source_filenames = [f
for f in filenames
if os.path.splitext(f)[1] in extensions]
dispatcher = _CompilerDispatcher(args.tool, args.compile_database,
source_filenames)
dispatcher.Run()
# Filter out edits to files that aren't in the git repository, since it's not
# useful to modify files that aren't under source control--typically, these
# are generated files or files in a git submodule that's not part of Chromium.
_ApplyEdits({k: v
for k, v in dispatcher.edits.iteritems()
if os.path.realpath(k) in filenames})
return -dispatcher.failed_count
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
baslr/ArangoDB | 3rdParty/V8/V8-5.0.71.39/tools/find-commit-for-patch.py | 53 | 3327 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import subprocess
import sys
def GetArgs():
parser = argparse.ArgumentParser(
description="Finds a commit that a given patch can be applied to. "
"Does not actually apply the patch or modify your checkout "
"in any way.")
parser.add_argument("patch_file", help="Patch file to match")
parser.add_argument(
"--branch", "-b", default="origin/master", type=str,
help="Git tree-ish where to start searching for commits, "
"default: %(default)s")
parser.add_argument(
"--limit", "-l", default=500, type=int,
help="Maximum number of commits to search, default: %(default)s")
parser.add_argument(
"--verbose", "-v", default=False, action="store_true",
help="Print verbose output for your entertainment")
return parser.parse_args()
def FindFilesInPatch(patch_file):
files = {}
next_file = ""
with open(patch_file) as patch:
for line in patch:
if line.startswith("diff --git "):
# diff --git a/src/objects.cc b/src/objects.cc
words = line.split()
assert words[2].startswith("a/") and len(words[2]) > 2
next_file = words[2][2:]
elif line.startswith("index "):
# index add3e61..d1bbf6a 100644
hashes = line.split()[1]
old_hash = hashes.split("..")[0]
if old_hash.startswith("0000000"): continue # Ignore new files.
files[next_file] = old_hash
return files
def GetGitCommitHash(treeish):
cmd = ["git", "log", "-1", "--format=%H", treeish]
return subprocess.check_output(cmd).strip()
def CountMatchingFiles(commit, files):
matched_files = 0
# Calling out to git once and parsing the result Python-side is faster
# than calling 'git ls-tree' for every file.
cmd = ["git", "ls-tree", "-r", commit] + [f for f in files]
output = subprocess.check_output(cmd)
for line in output.splitlines():
# 100644 blob c6d5daaa7d42e49a653f9861224aad0a0244b944 src/objects.cc
_, _, actual_hash, filename = line.split()
expected_hash = files[filename]
if actual_hash.startswith(expected_hash): matched_files += 1
return matched_files
def FindFirstMatchingCommit(start, files, limit, verbose):
commit = GetGitCommitHash(start)
num_files = len(files)
if verbose: print(">>> Found %d files modified by patch." % num_files)
for _ in range(limit):
matched_files = CountMatchingFiles(commit, files)
if verbose: print("Commit %s matched %d files" % (commit, matched_files))
if matched_files == num_files:
return commit
commit = GetGitCommitHash("%s^" % commit)
print("Sorry, no matching commit found. "
"Try running 'git fetch', specifying the correct --branch, "
"and/or setting a higher --limit.")
sys.exit(1)
if __name__ == "__main__":
args = GetArgs()
files = FindFilesInPatch(args.patch_file)
commit = FindFirstMatchingCommit(args.branch, files, args.limit, args.verbose)
if args.verbose:
print(">>> Matching commit: %s" % commit)
print(subprocess.check_output(["git", "log", "-1", commit]))
print(">>> Kthxbai.")
else:
print(commit)
| apache-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Twisted-15.2.1/twisted/internet/test/test_threads.py | 39 | 7983 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorThreads}.
"""
from __future__ import division, absolute_import
__metaclass__ = type
from weakref import ref
import gc, threading
from twisted.python.threadable import isInIOThread
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.python.threadpool import ThreadPool
from twisted.internet.interfaces import IReactorThreads
class ThreadTestsBuilder(ReactorBuilder):
"""
Builder for defining tests relating to L{IReactorThreads}.
"""
requiredInterfaces = (IReactorThreads,)
def test_getThreadPool(self):
"""
C{reactor.getThreadPool()} returns an instance of L{ThreadPool} which
starts when C{reactor.run()} is called and stops before it returns.
"""
state = []
reactor = self.buildReactor()
pool = reactor.getThreadPool()
self.assertIsInstance(pool, ThreadPool)
self.assertFalse(
pool.started, "Pool should not start before reactor.run")
def f():
# Record the state for later assertions
state.append(pool.started)
state.append(pool.joined)
reactor.stop()
reactor.callWhenRunning(f)
self.runReactor(reactor, 2)
self.assertTrue(
state[0], "Pool should start after reactor.run")
self.assertFalse(
state[1], "Pool should not be joined before reactor.stop")
self.assertTrue(
pool.joined,
"Pool should be stopped after reactor.run returns")
def test_suggestThreadPoolSize(self):
"""
C{reactor.suggestThreadPoolSize()} sets the maximum size of the reactor
threadpool.
"""
reactor = self.buildReactor()
reactor.suggestThreadPoolSize(17)
pool = reactor.getThreadPool()
self.assertEqual(pool.max, 17)
def test_delayedCallFromThread(self):
"""
A function scheduled with L{IReactorThreads.callFromThread} invoked
from a delayed call is run immediately in the next reactor iteration.
When invoked from the reactor thread, previous implementations of
L{IReactorThreads.callFromThread} would skip the pipe/socket based wake
up step, assuming the reactor would wake up on its own. However, this
resulted in the reactor not noticing a insert into the thread queue at
the right time (in this case, after the thread queue has been processed
for that reactor iteration).
"""
reactor = self.buildReactor()
def threadCall():
reactor.stop()
# Set up the use of callFromThread being tested.
reactor.callLater(0, reactor.callFromThread, threadCall)
before = reactor.seconds()
self.runReactor(reactor, 60)
after = reactor.seconds()
# We specified a timeout of 60 seconds. The timeout code in runReactor
# probably won't actually work, though. If the reactor comes out of
# the event notification API just a little bit early, say after 59.9999
# seconds instead of after 60 seconds, then the queued thread call will
# get processed but the timeout delayed call runReactor sets up won't!
# Then the reactor will stop and runReactor will return without the
# timeout firing. As it turns out, select() and poll() are quite
# likely to return *slightly* earlier than we ask them to, so the
# timeout will rarely happen, even if callFromThread is broken. So,
# instead we'll measure the elapsed time and make sure it's something
# less than about half of the timeout we specified. This is heuristic.
# It assumes that select() won't ever return after 30 seconds when we
# asked it to timeout after 60 seconds. And of course like all
# time-based tests, it's slightly non-deterministic. If the OS doesn't
# schedule this process for 30 seconds, then the test might fail even
# if callFromThread is working.
self.assertTrue(after - before < 30)
def test_callFromThread(self):
"""
A function scheduled with L{IReactorThreads.callFromThread} invoked
from another thread is run in the reactor thread.
"""
reactor = self.buildReactor()
result = []
def threadCall():
result.append(threading.currentThread())
reactor.stop()
reactor.callLater(0, reactor.callInThread,
reactor.callFromThread, threadCall)
self.runReactor(reactor, 5)
self.assertEqual(result, [threading.currentThread()])
def test_stopThreadPool(self):
"""
When the reactor stops, L{ReactorBase._stopThreadPool} drops the
reactor's direct reference to its internal threadpool and removes
the associated startup and shutdown triggers.
This is the case of the thread pool being created before the reactor
is run.
"""
reactor = self.buildReactor()
threadpool = ref(reactor.getThreadPool())
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
gc.collect()
self.assertIs(threadpool(), None)
def test_stopThreadPoolWhenStartedAfterReactorRan(self):
"""
We must handle the case of shutting down the thread pool when it was
started after the reactor was run in a special way.
Some implementation background: The thread pool is started with
callWhenRunning, which only returns a system trigger ID when it is
invoked before the reactor is started.
This is the case of the thread pool being created after the reactor
is started.
"""
reactor = self.buildReactor()
threadPoolRefs = []
def acquireThreadPool():
threadPoolRefs.append(ref(reactor.getThreadPool()))
reactor.stop()
reactor.callWhenRunning(acquireThreadPool)
self.runReactor(reactor)
gc.collect()
self.assertIs(threadPoolRefs[0](), None)
def test_cleanUpThreadPoolEvenBeforeReactorIsRun(self):
"""
When the reactor has its shutdown event fired before it is run, the
thread pool is completely destroyed.
For what it's worth, the reason we support this behavior at all is
because Trial does this.
This is the case of the thread pool being created without the reactor
being started at al.
"""
reactor = self.buildReactor()
threadPoolRef = ref(reactor.getThreadPool())
reactor.fireSystemEvent("shutdown")
gc.collect()
self.assertIs(threadPoolRef(), None)
def test_isInIOThread(self):
"""
The reactor registers itself as the I/O thread when it runs so that
L{twisted.python.threadable.isInIOThread} returns C{True} if it is
called in the thread the reactor is running in.
"""
results = []
reactor = self.buildReactor()
def check():
results.append(isInIOThread())
reactor.stop()
reactor.callWhenRunning(check)
self.runReactor(reactor)
self.assertEqual([True], results)
def test_isNotInIOThread(self):
"""
The reactor registers itself as the I/O thread when it runs so that
L{twisted.python.threadable.isInIOThread} returns C{False} if it is
called in a different thread than the reactor is running in.
"""
results = []
reactor = self.buildReactor()
def check():
results.append(isInIOThread())
reactor.callFromThread(reactor.stop)
reactor.callInThread(check)
self.runReactor(reactor)
self.assertEqual([False], results)
globals().update(ThreadTestsBuilder.makeTestCaseClasses())
| mit |
CuonDeveloper/cuon | cuon_client/Client/CUON/cuon/Web2/SingleWeb2.py | 3 | 1723 | # -*- coding: utf-8 -*-
##Copyright (C) [2005] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from cuon.Databases.SingleData import SingleData
import logging
import pygtk
pygtk.require('2.0')
import gtk
#import gtk.glade
import gobject
class SingleWeb2(SingleData):
def __init__(self, allTables):
SingleData.__init__(self)
# tables.dbd and address
self.sNameOfTable = "web2"
self.xmlTableDef = 0
# self.loadTable()
# self.saveTable()
self.loadTable(allTables)
self.setStore( gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_UINT) )
self.listHeader['names'] = ['title', 'designation', 'ID']
self.listHeader['size'] = [25,10,25,25,10]
print "number of Columns "
print len(self.table.Columns)
#
self.cType = 'html'
def readNonWidgetEntries(self, dicValues):
print 'readNonWidgetEntries(self) by SingleWeb2'
dicValues['ctype'] = [self.cType, 'string']
return dicValues | gpl-3.0 |
daxxi13/CouchPotatoServer | libs/pyutil/benchmarks/bench_xor.py | 106 | 1658 | #!/usr/bin/env python
# Copyright (c) 2002-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import hmac, sys, random
from pyutil.assertutil import _assert
from pyutil.xor import xor
from pyutil import benchfunc
from pyutil import randutil
SFUNCS = [hmac._strxor, xor.py_xor,]
SFNAMES = ["hmac", "pyutil py",]
inputs = {}
def _help_init_string(N):
global inputs
if not inputs.has_key(N):
inputs[N] = [randutil.insecurerandstr(N), randutil.insecurerandstr(N),]
def _help_make_bench_xor(f):
def g(n):
assert inputs.has_key(n)
_assert(isinstance(inputs[n][0], str), "Required to be a string.", inputs[n][0])
assert len(inputs[n][0]) == n
_assert(isinstance(inputs[n][1], str), "Required to be a string.", inputs[n][1])
assert len(inputs[n][1]) == n
for SF in SFUNCS:
assert f(inputs[n][0], inputs[n][1]) == SF(inputs[n][0], inputs[n][1])
return f(inputs[n][0], inputs[n][1])
return g
def bench(SETSIZES=[2**x for x in range(0, 22, 3)]):
random.seed(0)
if len(SFUNCS) <= 1: print ""
maxnamel = max(map(len, SFNAMES))
for SETSIZE in SETSIZES:
seed = random.random()
# print "seed: ", seed
random.seed(seed)
i = 0
if len(SFUNCS) > 1: print ""
for FUNC in SFUNCS:
funcname = SFNAMES[i] + " " * (maxnamel - len(SFNAMES[i]))
print "%s" % funcname,
sys.stdout.flush()
benchfunc.rep_bench(_help_make_bench_xor(FUNC), SETSIZE, initfunc=_help_init_string, MAXREPS=2**9, MAXTIME=30)
i = i + 1
bench()
| gpl-3.0 |
AdamWill/blivet | blivet/populator/helpers/devicepopulator.py | 6 | 2082 | # populator/helpers/devicepopulator.py
# Base class for device-type-specific helpers for populating a DeviceTree.
#
# Copyright (C) 2009-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU Lesser General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY expressed or implied, including the implied
# warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU Lesser General Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with this
# program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks
# that are incorporated in the source code or documentation are not subject
# to the GNU Lesser General Public License and may only be used or
# replicated with the express permission of Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <[email protected]>
#
from .populatorhelper import PopulatorHelper
from ... import udev
# pylint: disable=abstract-method
class DevicePopulator(PopulatorHelper):
""" Populator helper base class for devices.
Subclasses must define a match method and, if they want to instantiate
a device, a run method.
"""
@classmethod
def match(cls, data):
return False
def _handle_rename(self):
name = udev.device_get_name(self.data)
if self.device.name != name:
self.device.name = name
# TODO: update name registry -- better yet, generate the name list on demand
def _handle_resize(self):
old_size = self.device.current_size
self.device.update_size()
if old_size != self.device.current_size:
self._devicetree.cancel_disk_actions(self.device.disks)
def update(self):
self._handle_rename()
self._handle_resize()
| lgpl-2.1 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/CherryPy-3.8.0/cherrypy/tutorial/tut03_get_and_post.py | 22 | 1719 | """
Tutorial - Passing variables
This tutorial shows you how to pass GET/POST variables to methods.
"""
import cherrypy
class WelcomePage:
def index(self):
# Ask for the user's name.
return '''
<form action="greetUser" method="GET">
What is your name?
<input type="text" name="name" />
<input type="submit" />
</form>'''
index.exposed = True
def greetUser(self, name=None):
# CherryPy passes all GET and POST variables as method parameters.
# It doesn't make a difference where the variables come from, how
# large their contents are, and so on.
#
# You can define default parameter values as usual. In this
# example, the "name" parameter defaults to None so we can check
# if a name was actually specified.
if name:
# Greet the user!
return "Hey %s, what's up?" % name
else:
if name is None:
# No name was specified
return 'Please enter your name <a href="./">here</a>.'
else:
return 'No, really, enter your name <a href="./">here</a>.'
greetUser.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(WelcomePage(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(WelcomePage(), config=tutconf)
| mit |
pearsonlab/nipype | nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py | 10 | 1610 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..maths import SpatialFilter
def test_SpatialFilter_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=2,
),
internal_datatype=dict(argstr='-dt %s',
position=1,
),
kernel_file=dict(argstr='%s',
position=5,
xor=['kernel_size'],
),
kernel_shape=dict(argstr='-kernel %s',
position=4,
),
kernel_size=dict(argstr='%.4f',
position=5,
xor=['kernel_file'],
),
nan2zeros=dict(argstr='-nan',
position=3,
),
operation=dict(argstr='-f%s',
mandatory=True,
position=6,
),
out_file=dict(argstr='%s',
genfile=True,
hash_files=False,
position=-2,
),
output_datatype=dict(argstr='-odt %s',
position=-1,
),
output_type=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = SpatialFilter.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SpatialFilter_outputs():
output_map = dict(out_file=dict(),
)
outputs = SpatialFilter.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
oscaro/django | django/utils/feedgenerator.py | 78 | 16377 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import force_text, iri_to_uri
from django.utils import datetime_safe
from django.utils import six
from django.utils.six import StringIO
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.timezone import is_aware
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependent results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
else:
return time_str + '-0000'
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
else:
return time_str + 'Z'
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, enclosure=None,
categories=(), item_copyright=None, ttl=None, updateddate=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate and updateddate, which are datetime.datetime
objects, and enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'updateddate': updateddate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate or updateddate. If no items
have either of these attributes this returns the current date/time.
"""
latest_date = None
date_keys = ('updateddate', 'pubdate')
for item in self.items:
for date_key in date_keys:
item_date = item.get(date_key)
if item_date:
if latest_date is None or item_date > latest_date:
latest_date = item_date
return latest_date or datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" %
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(
item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("enclosure", '',
{"url": item['enclosure'].url, "length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
if item['updateddate'] is not None:
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("link", '',
{"rel": "enclosure",
"href": item['enclosure'].url,
"length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| bsd-3-clause |
Crypt0s/Ramen | fs_libs/ftputil/build/lib/ftputil/path.py | 2 | 7681 | # Copyright (C) 2003-2013, Stefan Schwarzer <[email protected]>
# See the file LICENSE for licensing terms.
"""
ftputil.path - simulate `os.path` for FTP servers
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import posixpath
import stat
import ftputil.compat
import ftputil.error
import ftputil.tool
# The `_Path` class shouldn't be used directly by clients of the
# ftputil library.
__all__ = []
class _Path(object):
"""
Support class resembling `os.path`, accessible from the `FTPHost`
object, e. g. as `FTPHost().path.abspath(path)`.
Hint: substitute `os` with the `FTPHost` object.
"""
# `_Path` needs to provide all methods of `os.path`.
# pylint: disable=too-many-instance-attributes
def __init__(self, host):
self._host = host
# Delegate these to the `posixpath` module.
# pylint: disable=invalid-name
pp = posixpath
self.dirname = pp.dirname
self.basename = pp.basename
self.isabs = pp.isabs
self.commonprefix = pp.commonprefix
self.split = pp.split
self.splitdrive = pp.splitdrive
self.splitext = pp.splitext
self.normcase = pp.normcase
self.normpath = pp.normpath
def abspath(self, path):
"""Return an absolute path."""
original_path = path
path = ftputil.tool.as_unicode(path)
if not self.isabs(path):
path = self.join(self._host.getcwd(), path)
return ftputil.tool.same_string_type_as(original_path,
self.normpath(path))
def exists(self, path):
"""Return true if the path exists."""
try:
lstat_result = self._host.lstat(
path, _exception_for_missing_path=False)
return lstat_result is not None
except ftputil.error.RootDirError:
return True
def getmtime(self, path):
"""
Return the timestamp for the last modification for `path`
as a float.
This will raise `PermanentError` if the path doesn't exist,
but maybe other exceptions depending on the state of the
server (e. g. timeout).
"""
return self._host.stat(path).st_mtime
def getsize(self, path):
"""
Return the size of the `path` item as an integer.
This will raise `PermanentError` if the path doesn't exist,
but maybe raise other exceptions depending on the state of the
server (e. g. timeout).
"""
return self._host.stat(path).st_size
@staticmethod
def join(*paths):
"""
Join the path component from `paths` and return the joined
path.
All of these paths must be either unicode strings or byte
strings. If not, `join` raises a `TypeError`.
"""
# These checks are implicitly done by Python 3, but not by
# Python 2.
all_paths_are_unicode = all(
(isinstance(path, ftputil.compat.unicode_type)
for path in paths))
all_paths_are_bytes = all(
(isinstance(path, ftputil.compat.bytes_type)
for path in paths))
if all_paths_are_unicode or all_paths_are_bytes:
return posixpath.join(*paths)
else:
# Python 3 raises this exception for mixed strings
# in `os.path.join`, so also use this exception.
raise TypeError(
"can't mix unicode strings and bytes in path components")
# Check whether a path is a regular file/dir/link. For the first
# two cases follow links (like in `os.path`).
#
# Implementation note: The previous implementations simply called
# `stat` or `lstat` and returned `False` if they ended with
# raising a `PermanentError`. That exception usually used to
# signal a missing path. This approach has the problem, however,
# that exceptions caused by code earlier in `lstat` are obscured
# by the exception handling in `isfile`, `isdir` and `islink`.
def isfile(self, path):
"""
Return true if the `path` exists and corresponds to a regular
file (no link).
A non-existing path does _not_ cause a `PermanentError`.
"""
path = ftputil.tool.as_unicode(path)
# Workaround if we can't go up from the current directory
if path == self._host.getcwd():
return False
try:
stat_result = self._host.stat(
path, _exception_for_missing_path=False)
if stat_result is None:
return False
else:
return stat.S_ISREG(stat_result.st_mode)
except ftputil.error.RootDirError:
return False
def isdir(self, path):
"""
Return true if the `path` exists and corresponds to a
directory (no link).
A non-existing path does _not_ cause a `PermanentError`.
"""
path = ftputil.tool.as_unicode(path)
# Workaround if we can't go up from the current directory
if path == self._host.getcwd():
return True
try:
stat_result = self._host.stat(
path, _exception_for_missing_path=False)
if stat_result is None:
return False
else:
return stat.S_ISDIR(stat_result.st_mode)
except ftputil.error.RootDirError:
return True
def islink(self, path):
"""
Return true if the `path` exists and is a link.
A non-existing path does _not_ cause a `PermanentError`.
"""
path = ftputil.tool.as_unicode(path)
try:
lstat_result = self._host.lstat(
path, _exception_for_missing_path=False)
if lstat_result is None:
return False
else:
return stat.S_ISLNK(lstat_result.st_mode)
except ftputil.error.RootDirError:
return False
def walk(self, top, func, arg):
"""
Directory tree walk with callback function.
For each directory in the directory tree rooted at top
(including top itself, but excluding "." and ".."), call
func(arg, dirname, fnames). dirname is the name of the
directory, and fnames a list of the names of the files and
subdirectories in dirname (excluding "." and ".."). func may
modify the fnames list in-place (e.g. via del or slice
assignment), and walk will only recurse into the
subdirectories whose names remain in fnames; this can be used
to implement a filter, or to impose a specific order of
visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used,
e.g., to pass a filename pattern, or a mutable object designed
to accumulate statistics. Passing None for arg is common.
"""
top = ftputil.tool.as_unicode(top)
# This code (and the above documentation) is taken from
# `posixpath.py`, with slight modifications.
try:
names = self._host.listdir(top)
except OSError:
return
func(arg, top, names)
for name in names:
name = self.join(top, name)
try:
stat_result = self._host.lstat(name)
except OSError:
continue
if stat.S_ISDIR(stat_result[stat.ST_MODE]):
self.walk(name, func, arg)
| gpl-3.0 |
netscaler/neutron | neutron/tests/unit/linuxbridge/test_defaults.py | 2 | 1671 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from neutron.plugins.linuxbridge.common import config # noqa
from neutron.tests import base
class ConfigurationTest(base.BaseTestCase):
def test_defaults(self):
self.assertEqual(2,
cfg.CONF.AGENT.polling_interval)
self.assertEqual(True,
cfg.CONF.AGENT.rpc_support_old_agents)
self.assertEqual('sudo',
cfg.CONF.AGENT.root_helper)
self.assertEqual('local',
cfg.CONF.VLANS.tenant_network_type)
self.assertEqual(0,
len(cfg.CONF.VLANS.network_vlan_ranges))
self.assertEqual(0,
len(cfg.CONF.LINUX_BRIDGE.
physical_interface_mappings))
self.assertEqual(False, cfg.CONF.VXLAN.enable_vxlan)
self.assertEqual(config.DEFAULT_VXLAN_GROUP,
cfg.CONF.VXLAN.vxlan_group)
self.assertEqual(0, len(cfg.CONF.VXLAN.local_ip))
self.assertEqual(False, cfg.CONF.VXLAN.l2_population)
| apache-2.0 |
weidnerm/pi-ws2812 | version.py | 10 | 2918 | #
# SConstruct
#
# Copyright (c) 2016 Jeremy Garff <jer @ jers.net>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
# 3. Neither the name of the owner nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import SCons, os
def version_flags(env):
if not env['V']:
env['VERSIONCOMSTR'] = 'Version ${TARGET}'
def version_builders(env):
def generate_version_header(target, source, env):
headername = os.path.basename(target[0].abspath)
headerdef = headername.replace('.', '_').replace('-', '_').upper()
try:
version = open(source[0].abspath, 'r').readline().strip().split('.')
except:
version = [ '0', '0', '0' ]
f = open(headername, 'w')
f.write('/* Auto Generated Header built by version.py - DO NOT MODIFY */\n')
f.write('\n')
f.write('#ifndef __%s__\n' % (headerdef))
f.write('#define __%s__\n' % (headerdef))
f.write('\n')
f.write('#define VERSION_MAJOR %s\n' % version[0])
f.write('#define VERSION_MINOR %s\n' % version[1])
f.write('#define VERSION_MICRO %s\n' % version[2])
f.write('\n')
f.write('#endif /* __%s__ */\n' % (headerdef))
f.close()
env.Append(BUILDERS = {
'Version' : SCons.Builder.Builder(
action = SCons.Action.Action(generate_version_header, '${VERSIONCOMSTR}'),
suffix = '.h',
),
})
def exists(env):
return 1
def generate(env, **kwargs):
[f(env) for f in (version_flags, version_builders)]
| bsd-2-clause |
xupit3r/askpgh | askbot/middleware/locale.py | 13 | 1027 | "Taken from django.middleware.locale: this is the locale selecting middleware that will look at accept headers"
from django.utils.cache import patch_vary_headers
from django.utils import translation
from askbot.conf import settings
class LocaleMiddleware(object):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
def process_request(self, request):
language = settings.ASKBOT_LANGUAGE
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = translation.get_language()
#translation.deactivate()
return response
| gpl-3.0 |
LockScreen/Backend | venv/lib/python2.7/site-packages/rsa/pem.py | 216 | 3372 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Functions that load and write PEM-encoded files.'''
import base64
from rsa._compat import b, is_bytes
def _markers(pem_marker):
'''
Returns the start and end PEM markers
'''
if is_bytes(pem_marker):
pem_marker = pem_marker.decode('utf-8')
return (b('-----BEGIN %s-----' % pem_marker),
b('-----END %s-----' % pem_marker))
def load_pem(contents, pem_marker):
'''Loads a PEM file.
@param contents: the contents of the file to interpret
@param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
when your file has '-----BEGIN RSA PRIVATE KEY-----' and
'-----END RSA PRIVATE KEY-----' markers.
@return the base64-decoded content between the start and end markers.
@raise ValueError: when the content is invalid, for example when the start
marker cannot be found.
'''
(pem_start, pem_end) = _markers(pem_marker)
pem_lines = []
in_pem_part = False
for line in contents.splitlines():
line = line.strip()
# Skip empty lines
if not line:
continue
# Handle start marker
if line == pem_start:
if in_pem_part:
raise ValueError('Seen start marker "%s" twice' % pem_start)
in_pem_part = True
continue
# Skip stuff before first marker
if not in_pem_part:
continue
# Handle end marker
if in_pem_part and line == pem_end:
in_pem_part = False
break
# Load fields
if b(':') in line:
continue
pem_lines.append(line)
# Do some sanity checks
if not pem_lines:
raise ValueError('No PEM start marker "%s" found' % pem_start)
if in_pem_part:
raise ValueError('No PEM end marker "%s" found' % pem_end)
# Base64-decode the contents
pem = b('').join(pem_lines)
return base64.decodestring(pem)
def save_pem(contents, pem_marker):
'''Saves a PEM file.
@param contents: the contents to encode in PEM format
@param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
when your file has '-----BEGIN RSA PRIVATE KEY-----' and
'-----END RSA PRIVATE KEY-----' markers.
@return the base64-encoded content between the start and end markers.
'''
(pem_start, pem_end) = _markers(pem_marker)
b64 = base64.encodestring(contents).replace(b('\n'), b(''))
pem_lines = [pem_start]
for block_start in range(0, len(b64), 64):
block = b64[block_start:block_start + 64]
pem_lines.append(block)
pem_lines.append(pem_end)
pem_lines.append(b(''))
return b('\n').join(pem_lines)
| mit |
beomyeol/models | street/python/errorcounter_test.py | 22 | 4913 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for errorcounter."""
import tensorflow as tf
import errorcounter as ec
class ErrorcounterTest(tf.test.TestCase):
def testComputeErrorRate(self):
"""Tests that the percent calculation works as expected.
"""
rate = ec.ComputeErrorRate(error_count=0, truth_count=0)
self.assertEqual(rate, 100.0)
rate = ec.ComputeErrorRate(error_count=1, truth_count=0)
self.assertEqual(rate, 100.0)
rate = ec.ComputeErrorRate(error_count=10, truth_count=1)
self.assertEqual(rate, 100.0)
rate = ec.ComputeErrorRate(error_count=0, truth_count=1)
self.assertEqual(rate, 0.0)
rate = ec.ComputeErrorRate(error_count=3, truth_count=12)
self.assertEqual(rate, 25.0)
def testCountErrors(self):
"""Tests that the error counter works as expected.
"""
truth_str = 'farm barn'
counts = ec.CountErrors(ocr_text=truth_str, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=0, fp=0, truth_count=9, test_count=9))
# With a period on the end, we get a char error.
dot_str = 'farm barn.'
counts = ec.CountErrors(ocr_text=dot_str, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=0, fp=1, truth_count=9, test_count=10))
counts = ec.CountErrors(ocr_text=truth_str, truth_text=dot_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=1, fp=0, truth_count=10, test_count=9))
# Space is just another char.
no_space = 'farmbarn'
counts = ec.CountErrors(ocr_text=no_space, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=1, fp=0, truth_count=9, test_count=8))
counts = ec.CountErrors(ocr_text=truth_str, truth_text=no_space)
self.assertEqual(
counts, ec.ErrorCounts(
fn=0, fp=1, truth_count=8, test_count=9))
# Lose them all.
counts = ec.CountErrors(ocr_text='', truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=9, fp=0, truth_count=9, test_count=0))
counts = ec.CountErrors(ocr_text=truth_str, truth_text='')
self.assertEqual(
counts, ec.ErrorCounts(
fn=0, fp=9, truth_count=0, test_count=9))
def testCountWordErrors(self):
"""Tests that the error counter works as expected.
"""
truth_str = 'farm barn'
counts = ec.CountWordErrors(ocr_text=truth_str, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=0, fp=0, truth_count=2, test_count=2))
# With a period on the end, we get a word error.
dot_str = 'farm barn.'
counts = ec.CountWordErrors(ocr_text=dot_str, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=1, fp=1, truth_count=2, test_count=2))
counts = ec.CountWordErrors(ocr_text=truth_str, truth_text=dot_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=1, fp=1, truth_count=2, test_count=2))
# Space is special.
no_space = 'farmbarn'
counts = ec.CountWordErrors(ocr_text=no_space, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=2, fp=1, truth_count=2, test_count=1))
counts = ec.CountWordErrors(ocr_text=truth_str, truth_text=no_space)
self.assertEqual(
counts, ec.ErrorCounts(
fn=1, fp=2, truth_count=1, test_count=2))
# Lose them all.
counts = ec.CountWordErrors(ocr_text='', truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=2, fp=0, truth_count=2, test_count=0))
counts = ec.CountWordErrors(ocr_text=truth_str, truth_text='')
self.assertEqual(
counts, ec.ErrorCounts(
fn=0, fp=2, truth_count=0, test_count=2))
# With a space in ba rn, there is an extra add.
sp_str = 'farm ba rn'
counts = ec.CountWordErrors(ocr_text=sp_str, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=1, fp=2, truth_count=2, test_count=3))
counts = ec.CountWordErrors(ocr_text=truth_str, truth_text=sp_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=2, fp=1, truth_count=3, test_count=2))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
tensorflow/datasets | tensorflow_datasets/text/tiny_shakespeare_test.py | 1 | 1291 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tiny Shakespeare dataset."""
from tensorflow_datasets import testing
from tensorflow_datasets.text import tiny_shakespeare
class TinyShakespeareTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = tiny_shakespeare.TinyShakespeare
SPLITS = {
"train": 1,
"validation": 1,
"test": 1,
}
# If you are calling `download/download_and_extract` with a dict, like:
# dl_manager.download({'some_key': 'http://a.org/out.txt', ...})
# then the tests needs to provide the fake output paths relative to the
# fake data directory
# DL_EXTRACT_RESULT = {'some_key': 'output_file1.txt', ...}
if __name__ == "__main__":
testing.test_main()
| apache-2.0 |
aboyett/blockdiag | src/blockdiag/plugins/autoclass.py | 1 | 1130 | # -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from blockdiag import plugins
class AutoClass(plugins.NodeHandler):
def on_created(self, node):
if node.id is None:
return
for name, klass in self.diagram.classes.items():
pattern = "_%s$" % re.escape(name)
if re.search(pattern, node.id):
node.label = re.sub(pattern, '', node.id)
node.set_attributes(klass.attrs)
def setup(self, diagram, **kwargs):
plugins.install_node_handler(AutoClass(diagram, **kwargs))
| apache-2.0 |
thelac/crazyflie | win32install/generate_nsis.py | 18 | 1224 | import jinja2
import os
from subprocess import Popen, PIPE
DIST_PATH = "..\dist"
# Get list of files and directory to install/uninstall
INSTALL_FILES = []
INSTALL_DIRS = []
os.chdir(os.path.join(os.path.dirname(__file__), DIST_PATH))
for root, dirs, files in os.walk("."):
for f in files:
INSTALL_FILES += [os.path.join(root[2:], f)]
INSTALL_DIRS += [root[2:]]
print "Found {} files in {} folders to install.".format(len(INSTALL_FILES),
len(INSTALL_DIRS))
# Get git tag or VERSION
try:
process = Popen(["git", "describe", "--tags"], stdout=PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
except OSError:
raise Exception("Cannot run git: Git is required to generate installer!")
VERSION = output.strip()
print "Cfclient version {}".format(VERSION)
os.chdir(os.path.dirname(__file__))
with open("cfclient.nsi.tmpl", "r") as template_file:
TEMPLATE = template_file.read()
TMPL = jinja2.Template(TEMPLATE)
with open("cfclient.nsi", "w") as out_file:
out_file.write(TMPL.render(files=INSTALL_FILES,
dirs=INSTALL_DIRS,
version=VERSION))
| gpl-2.0 |
waynenilsen/statsmodels | statsmodels/examples/ex_kde_confint.py | 34 | 1973 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 11:02:59 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.nonparametric.api as npar
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.distributions.mixture_rvs import mixture_rvs
# example from test_kde.py mixture of two normal distributions
np.random.seed(12345)
x = mixture_rvs([.25,.75], size=200, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1, scale=.5),dict(loc=1, scale=.5)))
x.sort() # not needed
kde = npar.KDEUnivariate(x)
kde.fit('gau')
ci = kde.kernel.density_confint(kde.density, len(x))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.hist(x, bins=15, normed=True, alpha=0.25)
ax.plot(kde.support, kde.density, lw=2, color='red')
ax.fill_between(kde.support, ci[:,0], ci[:,1],
color='grey', alpha='0.7')
ax.set_title('Kernel Density Gaussian (bw = %4.2f)' % kde.bw)
# use all kernels directly
x_grid = np.linspace(np.min(x), np.max(x), 51)
x_grid = np.linspace(-3, 3, 51)
kernel_names = ['Biweight', 'Cosine', 'Epanechnikov', 'Gaussian',
'Triangular', 'Triweight', #'Uniform',
]
fig = plt.figure()
for ii, kn in enumerate(kernel_names):
ax = fig.add_subplot(2, 3, ii+1) # without uniform
ax.hist(x, bins=10, normed=True, alpha=0.25)
#reduce bandwidth for Gaussian and Uniform which are to large in example
if kn in ['Gaussian', 'Uniform']:
args = (0.5,)
else:
args = ()
kernel = getattr(kernels, kn)(*args)
kde_grid = [kernel.density(x, xi) for xi in x_grid]
confint_grid = kernel.density_confint(kde_grid, len(x))
ax.plot(x_grid, kde_grid, lw=2, color='red', label=kn)
ax.fill_between(x_grid, confint_grid[:,0], confint_grid[:,1],
color='grey', alpha='0.7')
ax.legend(loc='upper left')
plt.show()
| bsd-3-clause |
uni2u/neutron | neutron/tests/unit/test_db_migration.py | 14 | 8272 | # Copyright 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from neutron.db import migration
from neutron.db.migration import cli
from neutron.tests import base
class TestDbMigration(base.BaseTestCase):
def setUp(self):
super(TestDbMigration, self).setUp()
mock.patch('alembic.op.get_bind').start()
self.mock_alembic_is_offline = mock.patch(
'alembic.context.is_offline_mode', return_value=False).start()
self.mock_alembic_is_offline.return_value = False
self.mock_sa_inspector = mock.patch(
'sqlalchemy.engine.reflection.Inspector').start()
def _prepare_mocked_sqlalchemy_inspector(self):
mock_inspector = mock.MagicMock()
mock_inspector.get_table_names.return_value = ['foo', 'bar']
mock_inspector.get_columns.return_value = [{'name': 'foo_column'},
{'name': 'bar_column'}]
self.mock_sa_inspector.from_engine.return_value = mock_inspector
def test_schema_has_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_table('foo'))
def test_schema_has_table_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_table, 'foo')
def test_schema_has_column_missing_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column('meh', 'meh'))
def test_schema_has_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_column('foo', 'foo_column'))
def test_schema_has_column_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_column,
'foo', 'foo_col')
def test_schema_has_column_missing_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column(
'foo', column_name='meh'))
class TestCli(base.BaseTestCase):
def setUp(self):
super(TestCli, self).setUp()
self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command')
self.do_alembic_cmd = self.do_alembic_cmd_p.start()
self.mock_alembic_err = mock.patch('alembic.util.err').start()
self.mock_alembic_err.side_effect = SystemExit
def _main_test_helper(self, argv, func_name, exp_args=(), exp_kwargs={}):
with mock.patch.object(sys, 'argv', argv):
cli.main()
self.do_alembic_cmd.assert_has_calls(
[mock.call(mock.ANY, func_name, *exp_args, **exp_kwargs)]
)
def test_stamp(self):
self._main_test_helper(
['prog', 'stamp', 'foo'],
'stamp',
('foo',),
{'sql': False}
)
self._main_test_helper(
['prog', 'stamp', 'foo', '--sql'],
'stamp',
('foo',),
{'sql': True}
)
def test_current(self):
self._main_test_helper(['prog', 'current'], 'current')
def test_history(self):
self._main_test_helper(['prog', 'history'], 'history')
def test_check_migration(self):
with mock.patch.object(cli, 'validate_head_file') as validate:
self._main_test_helper(['prog', 'check_migration'], 'branches')
validate.assert_called_once_with(mock.ANY)
def test_database_sync_revision(self):
with mock.patch.object(cli, 'update_head_file') as update:
self._main_test_helper(
['prog', 'revision', '--autogenerate', '-m', 'message'],
'revision',
(),
{'message': 'message', 'sql': False, 'autogenerate': True}
)
update.assert_called_once_with(mock.ANY)
update.reset_mock()
self._main_test_helper(
['prog', 'revision', '--sql', '-m', 'message'],
'revision',
(),
{'message': 'message', 'sql': True, 'autogenerate': False}
)
update.assert_called_once_with(mock.ANY)
def test_upgrade(self):
self._main_test_helper(
['prog', 'upgrade', '--sql', 'head'],
'upgrade',
('head',),
{'sql': True}
)
self._main_test_helper(
['prog', 'upgrade', '--delta', '3'],
'upgrade',
('+3',),
{'sql': False}
)
def test_downgrade(self):
self._main_test_helper(
['prog', 'downgrade', '--sql', 'folsom'],
'downgrade',
('folsom',),
{'sql': True}
)
self._main_test_helper(
['prog', 'downgrade', '--delta', '2'],
'downgrade',
('-2',),
{'sql': False}
)
def _test_validate_head_file_helper(self, heads, file_content=None):
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc:
fc.return_value.get_heads.return_value = heads
fc.return_value.get_current_head.return_value = heads[0]
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.read.return_value = file_content
with mock.patch('os.path.isfile') as is_file:
is_file.return_value = file_content is not None
if file_content in heads:
cli.validate_head_file(mock.sentinel.config)
else:
self.assertRaises(
SystemExit,
cli.validate_head_file,
mock.sentinel.config
)
self.mock_alembic_err.assert_called_once_with(mock.ANY)
fc.assert_called_once_with(mock.sentinel.config)
def test_validate_head_file_multiple_heads(self):
self._test_validate_head_file_helper(['a', 'b'])
def test_validate_head_file_missing_file(self):
self._test_validate_head_file_helper(['a'])
def test_validate_head_file_wrong_contents(self):
self._test_validate_head_file_helper(['a'], 'b')
def test_validate_head_success(self):
self._test_validate_head_file_helper(['a'], 'a')
def test_update_head_file_multiple_heads(self):
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc:
fc.return_value.get_heads.return_value = ['a', 'b']
self.assertRaises(
SystemExit,
cli.update_head_file,
mock.sentinel.config
)
self.mock_alembic_err.assert_called_once_with(mock.ANY)
fc.assert_called_once_with(mock.sentinel.config)
def test_update_head_file_success(self):
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc:
fc.return_value.get_heads.return_value = ['a']
fc.return_value.get_current_head.return_value = 'a'
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
cli.update_head_file(mock.sentinel.config)
mock_open.return_value.write.assert_called_once_with('a')
fc.assert_called_once_with(mock.sentinel.config)
| apache-2.0 |
da-anda/xbmc | lib/libUPnP/Platinum/Build/Tools/SCons/gcc-generic.py | 283 | 1317 | import os
def generate(env, gcc_cross_prefix=None, gcc_strict=True, gcc_stop_on_warning=None, gcc_extra_options=''):
if gcc_stop_on_warning == None: gcc_stop_on_warning = env['stop_on_warning']
### compiler flags
if gcc_strict:
env.AppendUnique(CCFLAGS = ['-pedantic', '-Wall', '-W', '-Wundef', '-Wno-long-long'])
env.AppendUnique(CFLAGS = ['-Wmissing-prototypes', '-Wmissing-declarations'])
else:
env.AppendUnique(CCFLAGS = ['-Wall'])
compiler_defines = ['-D_REENTRANT']
env.AppendUnique(CCFLAGS = compiler_defines)
env.AppendUnique(CPPFLAGS = compiler_defines)
if env['build_config'] == 'Debug':
env.AppendUnique(CCFLAGS = '-g')
else:
env.AppendUnique(CCFLAGS = '-O3')
if gcc_stop_on_warning:
env.AppendUnique(CCFLAGS = ['-Werror'])
env['STRIP'] = 'strip'
if gcc_cross_prefix:
env['ENV']['PATH'] += os.environ['PATH']
env['AR'] = gcc_cross_prefix+'-ar'
env['RANLIB'] = gcc_cross_prefix+'-ranlib'
env['CC'] = gcc_cross_prefix+'-gcc ' + gcc_extra_options
env['CXX'] = gcc_cross_prefix+'-g++ ' + gcc_extra_options
env['LINK'] = gcc_cross_prefix+'-g++ ' + gcc_extra_options
env['STRIP'] = gcc_cross_prefix+'-strip'
| gpl-2.0 |
Pistachitos/Sick-Beard | lib/imdb/utils.py | 50 | 60601 | """
utils module (imdb package).
This module provides basic utilities for the imdb package.
Copyright 2004-2012 Davide Alberani <[email protected]>
2009 H. Turgut Uyar <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from __future__ import generators
import re
import string
import logging
from copy import copy, deepcopy
from time import strptime, strftime
from imdb import VERSION
from imdb import linguistics
from imdb._exceptions import IMDbParserError
# Logger for imdb.utils module.
_utils_logger = logging.getLogger('imdbpy.utils')
# The regular expression for the "long" year format of IMDb, like
# "(1998)" and "(1986/II)", where the optional roman number (that I call
# "imdbIndex" after the slash is used for movies with the same title
# and year of release.
# XXX: probably L, C, D and M are far too much! ;-)
re_year_index = re.compile(r'\(([0-9\?]{4}(/[IVXLCDM]+)?)\)')
re_extended_year_index = re.compile(r'\((TV episode|TV Series|TV mini-series|TV|Video|Video Game)? ?((?:[0-9\?]{4})(?:-[0-9\?]{4})?)(?:/([IVXLCDM]+)?)?\)')
re_remove_kind = re.compile(r'\((TV episode|TV Series|TV mini-series|TV|Video|Video Game)? ?')
# Match only the imdbIndex (for name strings).
re_index = re.compile(r'^\(([IVXLCDM]+)\)$')
# Match things inside parentheses.
re_parentheses = re.compile(r'(\(.*\))')
# Match the number of episodes.
re_episodes = re.compile('\s?\((\d+) episodes\)', re.I)
re_episode_info = re.compile(r'{\s*(.+?)?\s?(\([0-9\?]{4}-[0-9\?]{1,2}-[0-9\?]{1,2}\))?\s?(\(#[0-9]+\.[0-9]+\))?}')
# Common suffixes in surnames.
_sname_suffixes = ('de', 'la', 'der', 'den', 'del', 'y', 'da', 'van',
'e', 'von', 'the', 'di', 'du', 'el', 'al')
def canonicalName(name):
"""Return the given name in canonical "Surname, Name" format.
It assumes that name is in the 'Name Surname' format."""
# XXX: some statistics (as of 17 Apr 2008, over 2288622 names):
# - just a surname: 69476
# - single surname, single name: 2209656
# - composed surname, composed name: 9490
# - composed surname, single name: 67606
# (2: 59764, 3: 6862, 4: 728)
# - single surname, composed name: 242310
# (2: 229467, 3: 9901, 4: 2041, 5: 630)
# - Jr.: 8025
# Don't convert names already in the canonical format.
if name.find(', ') != -1: return name
if isinstance(name, unicode):
joiner = u'%s, %s'
sur_joiner = u'%s %s'
sur_space = u' %s'
space = u' '
else:
joiner = '%s, %s'
sur_joiner = '%s %s'
sur_space = ' %s'
space = ' '
sname = name.split(' ')
snl = len(sname)
if snl == 2:
# Just a name and a surname: how boring...
name = joiner % (sname[1], sname[0])
elif snl > 2:
lsname = [x.lower() for x in sname]
if snl == 3: _indexes = (0, snl-2)
else: _indexes = (0, snl-2, snl-3)
# Check for common surname prefixes at the beginning and near the end.
for index in _indexes:
if lsname[index] not in _sname_suffixes: continue
try:
# Build the surname.
surn = sur_joiner % (sname[index], sname[index+1])
del sname[index]
del sname[index]
try:
# Handle the "Jr." after the name.
if lsname[index+2].startswith('jr'):
surn += sur_space % sname[index]
del sname[index]
except (IndexError, ValueError):
pass
name = joiner % (surn, space.join(sname))
break
except ValueError:
continue
else:
name = joiner % (sname[-1], space.join(sname[:-1]))
return name
def normalizeName(name):
"""Return a name in the normal "Name Surname" format."""
if isinstance(name, unicode):
joiner = u'%s %s'
else:
joiner = '%s %s'
sname = name.split(', ')
if len(sname) == 2:
name = joiner % (sname[1], sname[0])
return name
def analyze_name(name, canonical=None):
"""Return a dictionary with the name and the optional imdbIndex
keys, from the given string.
If canonical is None (default), the name is stored in its own style.
If canonical is True, the name is converted to canonical style.
If canonical is False, the name is converted to normal format.
raise an IMDbParserError exception if the name is not valid.
"""
original_n = name
name = name.strip()
res = {}
imdbIndex = ''
opi = name.rfind('(')
cpi = name.rfind(')')
# Strip notes (but not if the name starts with a parenthesis).
if opi not in (-1, 0) and cpi > opi:
if re_index.match(name[opi:cpi+1]):
imdbIndex = name[opi+1:cpi]
name = name[:opi].rstrip()
else:
# XXX: for the birth and death dates case like " (1926-2004)"
name = re_parentheses.sub('', name).strip()
if not name:
raise IMDbParserError('invalid name: "%s"' % original_n)
if canonical is not None:
if canonical:
name = canonicalName(name)
else:
name = normalizeName(name)
res['name'] = name
if imdbIndex:
res['imdbIndex'] = imdbIndex
return res
def build_name(name_dict, canonical=None):
"""Given a dictionary that represents a "long" IMDb name,
return a string.
If canonical is None (default), the name is returned in the stored style.
If canonical is True, the name is converted to canonical style.
If canonical is False, the name is converted to normal format.
"""
name = name_dict.get('canonical name') or name_dict.get('name', '')
if not name: return ''
if canonical is not None:
if canonical:
name = canonicalName(name)
else:
name = normalizeName(name)
imdbIndex = name_dict.get('imdbIndex')
if imdbIndex:
name += ' (%s)' % imdbIndex
return name
# XXX: here only for backward compatibility. Find and remove any dependency.
_articles = linguistics.GENERIC_ARTICLES
_unicodeArticles = linguistics.toUnicode(_articles)
articlesDicts = linguistics.articlesDictsForLang(None)
spArticles = linguistics.spArticlesForLang(None)
def canonicalTitle(title, lang=None):
"""Return the title in the canonic format 'Movie Title, The';
beware that it doesn't handle long imdb titles, but only the
title portion, without year[/imdbIndex] or special markup.
The 'lang' argument can be used to specify the language of the title.
"""
isUnicode = isinstance(title, unicode)
articlesDicts = linguistics.articlesDictsForLang(lang)
try:
if title.split(', ')[-1].lower() in articlesDicts[isUnicode]:
return title
except IndexError:
pass
if isUnicode:
_format = u'%s, %s'
else:
_format = '%s, %s'
ltitle = title.lower()
spArticles = linguistics.spArticlesForLang(lang)
for article in spArticles[isUnicode]:
if ltitle.startswith(article):
lart = len(article)
title = _format % (title[lart:], title[:lart])
if article[-1] == ' ':
title = title[:-1]
break
## XXX: an attempt using a dictionary lookup.
##for artSeparator in (' ', "'", '-'):
## article = _articlesDict.get(ltitle.split(artSeparator)[0])
## if article is not None:
## lart = len(article)
## # check titles like "una", "I'm Mad" and "L'abbacchio".
## if title[lart:] == '' or (artSeparator != ' ' and
## title[lart:][1] != artSeparator): continue
## title = '%s, %s' % (title[lart:], title[:lart])
## if artSeparator == ' ': title = title[1:]
## break
return title
def normalizeTitle(title, lang=None):
"""Return the title in the normal "The Title" format;
beware that it doesn't handle long imdb titles, but only the
title portion, without year[/imdbIndex] or special markup.
The 'lang' argument can be used to specify the language of the title.
"""
isUnicode = isinstance(title, unicode)
stitle = title.split(', ')
articlesDicts = linguistics.articlesDictsForLang(lang)
if len(stitle) > 1 and stitle[-1].lower() in articlesDicts[isUnicode]:
sep = ' '
if stitle[-1][-1] in ("'", '-'):
sep = ''
if isUnicode:
_format = u'%s%s%s'
_joiner = u', '
else:
_format = '%s%s%s'
_joiner = ', '
title = _format % (stitle[-1], sep, _joiner.join(stitle[:-1]))
return title
def _split_series_episode(title):
"""Return the series and the episode titles; if this is not a
series' episode, the returned series title is empty.
This function recognize two different styles:
"The Series" An Episode (2005)
"The Series" (2004) {An Episode (2005) (#season.episode)}"""
series_title = ''
episode_or_year = ''
if title[-1:] == '}':
# Title of the episode, as in the plain text data files.
begin_eps = title.rfind('{')
if begin_eps == -1: return '', ''
series_title = title[:begin_eps].rstrip()
# episode_or_year is returned with the {...}
episode_or_year = title[begin_eps:].strip()
if episode_or_year[:12] == '{SUSPENDED}}': return '', ''
# XXX: works only with tv series; it's still unclear whether
# IMDb will support episodes for tv mini series and tv movies...
elif title[0:1] == '"':
second_quot = title[1:].find('"') + 2
if second_quot != 1: # a second " was found.
episode_or_year = title[second_quot:].lstrip()
first_char = episode_or_year[0:1]
if not first_char: return '', ''
if first_char != '(':
# There is not a (year) but the title of the episode;
# that means this is an episode title, as returned by
# the web server.
series_title = title[:second_quot]
##elif episode_or_year[-1:] == '}':
## # Title of the episode, as in the plain text data files.
## begin_eps = episode_or_year.find('{')
## if begin_eps == -1: return series_title, episode_or_year
## series_title = title[:second_quot+begin_eps].rstrip()
## # episode_or_year is returned with the {...}
## episode_or_year = episode_or_year[begin_eps:]
return series_title, episode_or_year
def is_series_episode(title):
"""Return True if 'title' is an series episode."""
title = title.strip()
if _split_series_episode(title)[0]: return 1
return 0
def analyze_title(title, canonical=None, canonicalSeries=None,
canonicalEpisode=None, _emptyString=u''):
"""Analyze the given title and return a dictionary with the
"stripped" title, the kind of the show ("movie", "tv series", etc.),
the year of production and the optional imdbIndex (a roman number
used to distinguish between movies with the same title and year).
If canonical is None (default), the title is stored in its own style.
If canonical is True, the title is converted to canonical style.
If canonical is False, the title is converted to normal format.
raise an IMDbParserError exception if the title is not valid.
"""
# XXX: introduce the 'lang' argument?
if canonical is not None:
canonicalSeries = canonicalEpisode = canonical
original_t = title
result = {}
title = title.strip()
year = _emptyString
kind = _emptyString
imdbIndex = _emptyString
series_title, episode_or_year = _split_series_episode(title)
if series_title:
# It's an episode of a series.
series_d = analyze_title(series_title, canonical=canonicalSeries)
oad = sen = ep_year = _emptyString
# Plain text data files format.
if episode_or_year[0:1] == '{' and episode_or_year[-1:] == '}':
match = re_episode_info.findall(episode_or_year)
if match:
# Episode title, original air date and #season.episode
episode_or_year, oad, sen = match[0]
episode_or_year = episode_or_year.strip()
if not oad:
# No year, but the title is something like (2005-04-12)
if episode_or_year and episode_or_year[0] == '(' and \
episode_or_year[-1:] == ')' and \
episode_or_year[1:2] != '#':
oad = episode_or_year
if oad[1:5] and oad[5:6] == '-':
try:
ep_year = int(oad[1:5])
except (TypeError, ValueError):
pass
if not oad and not sen and episode_or_year.startswith('(#'):
sen = episode_or_year
elif episode_or_year.startswith('Episode dated'):
oad = episode_or_year[14:]
if oad[-4:].isdigit():
try:
ep_year = int(oad[-4:])
except (TypeError, ValueError):
pass
episode_d = analyze_title(episode_or_year, canonical=canonicalEpisode)
episode_d['kind'] = u'episode'
episode_d['episode of'] = series_d
if oad:
episode_d['original air date'] = oad[1:-1]
if ep_year and episode_d.get('year') is None:
episode_d['year'] = ep_year
if sen and sen[2:-1].find('.') != -1:
seas, epn = sen[2:-1].split('.')
if seas:
# Set season and episode.
try: seas = int(seas)
except: pass
try: epn = int(epn)
except: pass
episode_d['season'] = seas
if epn:
episode_d['episode'] = epn
return episode_d
# First of all, search for the kind of show.
# XXX: Number of entries at 17 Apr 2008:
# movie: 379,871
# episode: 483,832
# tv movie: 61,119
# tv series: 44,795
# video movie: 57,915
# tv mini series: 5,497
# video game: 5,490
# More up-to-date statistics: http://us.imdb.com/database_statistics
if title.endswith('(TV)'):
kind = u'tv movie'
title = title[:-4].rstrip()
elif title.endswith('(V)'):
kind = u'video movie'
title = title[:-3].rstrip()
elif title.endswith('(video)'):
kind = u'video movie'
title = title[:-7].rstrip()
elif title.endswith('(mini)'):
kind = u'tv mini series'
title = title[:-6].rstrip()
elif title.endswith('(VG)'):
kind = u'video game'
title = title[:-4].rstrip()
# Search for the year and the optional imdbIndex (a roman number).
yi = re_year_index.findall(title)
if not yi:
yi = re_extended_year_index.findall(title)
if yi:
yk, yiy, yii = yi[-1]
yi = [(yiy, yii)]
if yk == 'TV episode':
kind = u'episode'
elif yk == 'TV':
kind = u'tv movie'
elif yk == 'TV Series':
kind = u'tv series'
elif yk == 'Video':
kind = u'video movie'
elif yk == 'TV mini-series':
kind = u'tv mini series'
elif yk == 'Video Game':
kind = u'video game'
title = re_remove_kind.sub('(', title)
if yi:
last_yi = yi[-1]
year = last_yi[0]
if last_yi[1]:
imdbIndex = last_yi[1][1:]
year = year[:-len(imdbIndex)-1]
i = title.rfind('(%s)' % last_yi[0])
if i != -1:
title = title[:i-1].rstrip()
# This is a tv (mini) series: strip the '"' at the begin and at the end.
# XXX: strip('"') is not used for compatibility with Python 2.0.
if title and title[0] == title[-1] == '"':
if not kind:
kind = u'tv series'
title = title[1:-1].strip()
elif title.endswith('(TV series)'):
kind = u'tv series'
title = title[:-11].rstrip()
if not title:
raise IMDbParserError('invalid title: "%s"' % original_t)
if canonical is not None:
if canonical:
title = canonicalTitle(title)
else:
title = normalizeTitle(title)
# 'kind' is one in ('movie', 'episode', 'tv series', 'tv mini series',
# 'tv movie', 'video movie', 'video game')
result['title'] = title
result['kind'] = kind or u'movie'
if year and year != '????':
if '-' in year:
result['series years'] = year
year = year[:4]
try:
result['year'] = int(year)
except (TypeError, ValueError):
pass
if imdbIndex:
result['imdbIndex'] = imdbIndex
if isinstance(_emptyString, str):
result['kind'] = str(kind or 'movie')
return result
_web_format = '%d %B %Y'
_ptdf_format = '(%Y-%m-%d)'
def _convertTime(title, fromPTDFtoWEB=1, _emptyString=u''):
"""Convert a time expressed in the pain text data files, to
the 'Episode dated ...' format used on the web site; if
fromPTDFtoWEB is false, the inverted conversion is applied."""
try:
if fromPTDFtoWEB:
from_format = _ptdf_format
to_format = _web_format
else:
from_format = u'Episode dated %s' % _web_format
to_format = _ptdf_format
t = strptime(title, from_format)
title = strftime(to_format, t)
if fromPTDFtoWEB:
if title[0] == '0': title = title[1:]
title = u'Episode dated %s' % title
except ValueError:
pass
if isinstance(_emptyString, str):
try:
title = str(title)
except UnicodeDecodeError:
pass
return title
def build_title(title_dict, canonical=None, canonicalSeries=None,
canonicalEpisode=None, ptdf=0, lang=None, _doYear=1,
_emptyString=u''):
"""Given a dictionary that represents a "long" IMDb title,
return a string.
If canonical is None (default), the title is returned in the stored style.
If canonical is True, the title is converted to canonical style.
If canonical is False, the title is converted to normal format.
lang can be used to specify the language of the title.
If ptdf is true, the plain text data files format is used.
"""
if canonical is not None:
canonicalSeries = canonical
pre_title = _emptyString
kind = title_dict.get('kind')
episode_of = title_dict.get('episode of')
if kind == 'episode' and episode_of is not None:
# Works with both Movie instances and plain dictionaries.
doYear = 0
if ptdf:
doYear = 1
pre_title = build_title(episode_of, canonical=canonicalSeries,
ptdf=0, _doYear=doYear,
_emptyString=_emptyString)
ep_dict = {'title': title_dict.get('title', ''),
'imdbIndex': title_dict.get('imdbIndex')}
ep_title = ep_dict['title']
if not ptdf:
doYear = 1
ep_dict['year'] = title_dict.get('year', '????')
if ep_title[0:1] == '(' and ep_title[-1:] == ')' and \
ep_title[1:5].isdigit():
ep_dict['title'] = _convertTime(ep_title, fromPTDFtoWEB=1,
_emptyString=_emptyString)
else:
doYear = 0
if ep_title.startswith('Episode dated'):
ep_dict['title'] = _convertTime(ep_title, fromPTDFtoWEB=0,
_emptyString=_emptyString)
episode_title = build_title(ep_dict,
canonical=canonicalEpisode, ptdf=ptdf,
_doYear=doYear, _emptyString=_emptyString)
if ptdf:
oad = title_dict.get('original air date', _emptyString)
if len(oad) == 10 and oad[4] == '-' and oad[7] == '-' and \
episode_title.find(oad) == -1:
episode_title += ' (%s)' % oad
seas = title_dict.get('season')
if seas is not None:
episode_title += ' (#%s' % seas
episode = title_dict.get('episode')
if episode is not None:
episode_title += '.%s' % episode
episode_title += ')'
episode_title = '{%s}' % episode_title
return '%s %s' % (pre_title, episode_title)
title = title_dict.get('title', '')
if not title: return _emptyString
if canonical is not None:
if canonical:
title = canonicalTitle(title, lang=lang)
else:
title = normalizeTitle(title, lang=lang)
if pre_title:
title = '%s %s' % (pre_title, title)
if kind in (u'tv series', u'tv mini series'):
title = '"%s"' % title
if _doYear:
imdbIndex = title_dict.get('imdbIndex')
year = title_dict.get('year') or u'????'
if isinstance(_emptyString, str):
year = str(year)
title += ' (%s' % year
if imdbIndex:
title += '/%s' % imdbIndex
title += ')'
if kind:
if kind == 'tv movie':
title += ' (TV)'
elif kind == 'video movie':
title += ' (V)'
elif kind == 'tv mini series':
title += ' (mini)'
elif kind == 'video game':
title += ' (VG)'
return title
def split_company_name_notes(name):
"""Return two strings, the first representing the company name,
and the other representing the (optional) notes."""
name = name.strip()
notes = u''
if name.endswith(')'):
fpidx = name.find('(')
if fpidx != -1:
notes = name[fpidx:]
name = name[:fpidx].rstrip()
return name, notes
def analyze_company_name(name, stripNotes=False):
"""Return a dictionary with the name and the optional 'country'
keys, from the given string.
If stripNotes is true, tries to not consider optional notes.
raise an IMDbParserError exception if the name is not valid.
"""
if stripNotes:
name = split_company_name_notes(name)[0]
o_name = name
name = name.strip()
country = None
if name.endswith(']'):
idx = name.rfind('[')
if idx != -1:
country = name[idx:]
name = name[:idx].rstrip()
if not name:
raise IMDbParserError('invalid name: "%s"' % o_name)
result = {'name': name}
if country:
result['country'] = country
return result
def build_company_name(name_dict, _emptyString=u''):
"""Given a dictionary that represents a "long" IMDb company name,
return a string.
"""
name = name_dict.get('name')
if not name:
return _emptyString
country = name_dict.get('country')
if country is not None:
name += ' %s' % country
return name
class _LastC:
"""Size matters."""
def __cmp__(self, other):
if isinstance(other, self.__class__): return 0
return 1
_last = _LastC()
def cmpMovies(m1, m2):
"""Compare two movies by year, in reverse order; the imdbIndex is checked
for movies with the same year of production and title."""
# Sort tv series' episodes.
m1e = m1.get('episode of')
m2e = m2.get('episode of')
if m1e is not None and m2e is not None:
cmp_series = cmpMovies(m1e, m2e)
if cmp_series != 0:
return cmp_series
m1s = m1.get('season')
m2s = m2.get('season')
if m1s is not None and m2s is not None:
if m1s < m2s:
return 1
elif m1s > m2s:
return -1
m1p = m1.get('episode')
m2p = m2.get('episode')
if m1p < m2p:
return 1
elif m1p > m2p:
return -1
try:
if m1e is None: m1y = int(m1.get('year', 0))
else: m1y = int(m1e.get('year', 0))
except ValueError:
m1y = 0
try:
if m2e is None: m2y = int(m2.get('year', 0))
else: m2y = int(m2e.get('year', 0))
except ValueError:
m2y = 0
if m1y > m2y: return -1
if m1y < m2y: return 1
# Ok, these movies have the same production year...
#m1t = m1.get('canonical title', _last)
#m2t = m2.get('canonical title', _last)
# It should works also with normal dictionaries (returned from searches).
#if m1t is _last and m2t is _last:
m1t = m1.get('title', _last)
m2t = m2.get('title', _last)
if m1t < m2t: return -1
if m1t > m2t: return 1
# Ok, these movies have the same title...
m1i = m1.get('imdbIndex', _last)
m2i = m2.get('imdbIndex', _last)
if m1i > m2i: return -1
if m1i < m2i: return 1
m1id = getattr(m1, 'movieID', None)
# Introduce this check even for other comparisons functions?
# XXX: is it safe to check without knowning the data access system?
# probably not a great idea. Check for 'kind', instead?
if m1id is not None:
m2id = getattr(m2, 'movieID', None)
if m1id > m2id: return -1
elif m1id < m2id: return 1
return 0
def cmpPeople(p1, p2):
"""Compare two people by billingPos, name and imdbIndex."""
p1b = getattr(p1, 'billingPos', None) or _last
p2b = getattr(p2, 'billingPos', None) or _last
if p1b > p2b: return 1
if p1b < p2b: return -1
p1n = p1.get('canonical name', _last)
p2n = p2.get('canonical name', _last)
if p1n is _last and p2n is _last:
p1n = p1.get('name', _last)
p2n = p2.get('name', _last)
if p1n > p2n: return 1
if p1n < p2n: return -1
p1i = p1.get('imdbIndex', _last)
p2i = p2.get('imdbIndex', _last)
if p1i > p2i: return 1
if p1i < p2i: return -1
return 0
def cmpCompanies(p1, p2):
"""Compare two companies."""
p1n = p1.get('long imdb name', _last)
p2n = p2.get('long imdb name', _last)
if p1n is _last and p2n is _last:
p1n = p1.get('name', _last)
p2n = p2.get('name', _last)
if p1n > p2n: return 1
if p1n < p2n: return -1
p1i = p1.get('country', _last)
p2i = p2.get('country', _last)
if p1i > p2i: return 1
if p1i < p2i: return -1
return 0
# References to titles, names and characters.
# XXX: find better regexp!
re_titleRef = re.compile(r'_(.+?(?: \([0-9\?]{4}(?:/[IVXLCDM]+)?\))?(?: \(mini\)| \(TV\)| \(V\)| \(VG\))?)_ \(qv\)')
# FIXME: doesn't match persons with ' in the name.
re_nameRef = re.compile(r"'([^']+?)' \(qv\)")
# XXX: good choice? Are there characters with # in the name?
re_characterRef = re.compile(r"#([^']+?)# \(qv\)")
# Functions used to filter the text strings.
def modNull(s, titlesRefs, namesRefs, charactersRefs):
"""Do nothing."""
return s
def modClearTitleRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove titles references."""
return re_titleRef.sub(r'\1', s)
def modClearNameRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove names references."""
return re_nameRef.sub(r'\1', s)
def modClearCharacterRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove characters references"""
return re_characterRef.sub(r'\1', s)
def modClearRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove titles, names and characters references."""
s = modClearTitleRefs(s, {}, {}, {})
s = modClearCharacterRefs(s, {}, {}, {})
return modClearNameRefs(s, {}, {}, {})
def modifyStrings(o, modFunct, titlesRefs, namesRefs, charactersRefs):
"""Modify a string (or string values in a dictionary or strings
in a list), using the provided modFunct function and titlesRefs
namesRefs and charactersRefs references dictionaries."""
# Notice that it doesn't go any deeper than the first two levels in a list.
if isinstance(o, (unicode, str)):
return modFunct(o, titlesRefs, namesRefs, charactersRefs)
elif isinstance(o, (list, tuple, dict)):
_stillorig = 1
if isinstance(o, (list, tuple)): keys = xrange(len(o))
else: keys = o.keys()
for i in keys:
v = o[i]
if isinstance(v, (unicode, str)):
if _stillorig:
o = copy(o)
_stillorig = 0
o[i] = modFunct(v, titlesRefs, namesRefs, charactersRefs)
elif isinstance(v, (list, tuple)):
modifyStrings(o[i], modFunct, titlesRefs, namesRefs,
charactersRefs)
return o
def date_and_notes(s):
"""Parse (birth|death) date and notes; returns a tuple in the
form (date, notes)."""
s = s.strip()
if not s: return (u'', u'')
notes = u''
if s[0].isdigit() or s.split()[0].lower() in ('c.', 'january', 'february',
'march', 'april', 'may', 'june',
'july', 'august', 'september',
'october', 'november',
'december', 'ca.', 'circa',
'????,'):
i = s.find(',')
if i != -1:
notes = s[i+1:].strip()
s = s[:i]
else:
notes = s
s = u''
if s == '????': s = u''
return s, notes
class RolesList(list):
"""A list of Person or Character instances, used for the currentRole
property."""
def __unicode__(self):
return u' / '.join([unicode(x) for x in self])
def __str__(self):
# FIXME: does it make sense at all? Return a unicode doesn't
# seem right, in __str__.
return u' / '.join([unicode(x).encode('utf8') for x in self])
# Replace & with &, but only if it's not already part of a charref.
#_re_amp = re.compile(r'(&)(?!\w+;)', re.I)
#_re_amp = re.compile(r'(?<=\W)&(?=[^a-zA-Z0-9_#])')
_re_amp = re.compile(r'&(?![^a-zA-Z0-9_#]{1,5};)')
def escape4xml(value):
"""Escape some chars that can't be present in a XML value."""
if isinstance(value, int):
value = str(value)
value = _re_amp.sub('&', value)
value = value.replace('"', '"').replace("'", ''')
value = value.replace('<', '<').replace('>', '>')
if isinstance(value, unicode):
value = value.encode('ascii', 'xmlcharrefreplace')
return value
def _refsToReplace(value, modFunct, titlesRefs, namesRefs, charactersRefs):
"""Return three lists - for movie titles, persons and characters names -
with two items tuples: the first item is the reference once escaped
by the user-provided modFunct function, the second is the same
reference un-escaped."""
mRefs = []
for refRe, refTemplate in [(re_titleRef, u'_%s_ (qv)'),
(re_nameRef, u"'%s' (qv)"),
(re_characterRef, u'#%s# (qv)')]:
theseRefs = []
for theRef in refRe.findall(value):
# refTemplate % theRef values don't change for a single
# _Container instance, so this is a good candidate for a
# cache or something - even if it's so rarely used that...
# Moreover, it can grow - ia.update(...) - and change if
# modFunct is modified.
goodValue = modFunct(refTemplate % theRef, titlesRefs, namesRefs,
charactersRefs)
# Prevents problems with crap in plain text data files.
# We should probably exclude invalid chars and string that
# are too long in the re_*Ref expressions.
if '_' in goodValue or len(goodValue) > 128:
continue
toReplace = escape4xml(goodValue)
# Only the 'value' portion is replaced.
replaceWith = goodValue.replace(theRef, escape4xml(theRef))
theseRefs.append((toReplace, replaceWith))
mRefs.append(theseRefs)
return mRefs
def _handleTextNotes(s):
"""Split text::notes strings."""
ssplit = s.split('::', 1)
if len(ssplit) == 1:
return s
return u'%s<notes>%s</notes>' % (ssplit[0], ssplit[1])
def _normalizeValue(value, withRefs=False, modFunct=None, titlesRefs=None,
namesRefs=None, charactersRefs=None):
"""Replace some chars that can't be present in a XML text."""
# XXX: use s.encode(encoding, 'xmlcharrefreplace') ? Probably not
# a great idea: after all, returning a unicode is safe.
if isinstance(value, (unicode, str)):
if not withRefs:
value = _handleTextNotes(escape4xml(value))
else:
# Replace references that were accidentally escaped.
replaceLists = _refsToReplace(value, modFunct, titlesRefs,
namesRefs, charactersRefs)
value = modFunct(value, titlesRefs or {}, namesRefs or {},
charactersRefs or {})
value = _handleTextNotes(escape4xml(value))
for replaceList in replaceLists:
for toReplace, replaceWith in replaceList:
value = value.replace(toReplace, replaceWith)
else:
value = unicode(value)
return value
def _tag4TON(ton, addAccessSystem=False, _containerOnly=False):
"""Build a tag for the given _Container instance;
both open and close tags are returned."""
tag = ton.__class__.__name__.lower()
what = 'name'
if tag == 'movie':
value = ton.get('long imdb title') or ton.get('title', '')
what = 'title'
else:
value = ton.get('long imdb name') or ton.get('name', '')
value = _normalizeValue(value)
extras = u''
crl = ton.currentRole
if crl:
if not isinstance(crl, list):
crl = [crl]
for cr in crl:
crTag = cr.__class__.__name__.lower()
crValue = cr['long imdb name']
crValue = _normalizeValue(crValue)
crID = cr.getID()
if crID is not None:
extras += u'<current-role><%s id="%s">' \
u'<name>%s</name></%s>' % (crTag, crID,
crValue, crTag)
else:
extras += u'<current-role><%s><name>%s</name></%s>' % \
(crTag, crValue, crTag)
if cr.notes:
extras += u'<notes>%s</notes>' % _normalizeValue(cr.notes)
extras += u'</current-role>'
theID = ton.getID()
if theID is not None:
beginTag = u'<%s id="%s"' % (tag, theID)
if addAccessSystem and ton.accessSystem:
beginTag += ' access-system="%s"' % ton.accessSystem
if not _containerOnly:
beginTag += u'><%s>%s</%s>' % (what, value, what)
else:
beginTag += u'>'
else:
if not _containerOnly:
beginTag = u'<%s><%s>%s</%s>' % (tag, what, value, what)
else:
beginTag = u'<%s>' % tag
beginTag += extras
if ton.notes:
beginTag += u'<notes>%s</notes>' % _normalizeValue(ton.notes)
return (beginTag, u'</%s>' % tag)
TAGS_TO_MODIFY = {
'movie.parents-guide': ('item', True),
'movie.number-of-votes': ('item', True),
'movie.soundtrack.item': ('item', True),
'movie.quotes': ('quote', False),
'movie.quotes.quote': ('line', False),
'movie.demographic': ('item', True),
'movie.episodes': ('season', True),
'movie.episodes.season': ('episode', True),
'person.merchandising-links': ('item', True),
'person.genres': ('item', True),
'person.quotes': ('quote', False),
'person.keywords': ('item', True),
'character.quotes': ('item', True),
'character.quotes.item': ('quote', False),
'character.quotes.item.quote': ('line', False)
}
_allchars = string.maketrans('', '')
_keepchars = _allchars.translate(_allchars, string.ascii_lowercase + '-' +
string.digits)
def _tagAttr(key, fullpath):
"""Return a tuple with a tag name and a (possibly empty) attribute,
applying the conversions specified in TAGS_TO_MODIFY and checking
that the tag is safe for a XML document."""
attrs = {}
_escapedKey = escape4xml(key)
if fullpath in TAGS_TO_MODIFY:
tagName, useTitle = TAGS_TO_MODIFY[fullpath]
if useTitle:
attrs['key'] = _escapedKey
elif not isinstance(key, unicode):
if isinstance(key, str):
tagName = unicode(key, 'ascii', 'ignore')
else:
strType = str(type(key)).replace("<type '", "").replace("'>", "")
attrs['keytype'] = strType
tagName = unicode(key)
else:
tagName = key
if isinstance(key, int):
attrs['keytype'] = 'int'
origTagName = tagName
tagName = tagName.lower().replace(' ', '-')
tagName = str(tagName).translate(_allchars, _keepchars)
if origTagName != tagName:
if 'key' not in attrs:
attrs['key'] = _escapedKey
if (not tagName) or tagName[0].isdigit() or tagName[0] == '-':
# This is a fail-safe: we should never be here, since unpredictable
# keys must be listed in TAGS_TO_MODIFY.
# This will proably break the DTD/schema, but at least it will
# produce a valid XML.
tagName = 'item'
_utils_logger.error('invalid tag: %s [%s]' % (_escapedKey, fullpath))
attrs['key'] = _escapedKey
return tagName, u' '.join([u'%s="%s"' % i for i in attrs.items()])
def _seq2xml(seq, _l=None, withRefs=False, modFunct=None,
titlesRefs=None, namesRefs=None, charactersRefs=None,
_topLevel=True, key2infoset=None, fullpath=''):
"""Convert a sequence or a dictionary to a list of XML
unicode strings."""
if _l is None:
_l = []
if isinstance(seq, dict):
for key in seq:
value = seq[key]
if isinstance(key, _Container):
# Here we're assuming that a _Container is never a top-level
# key (otherwise we should handle key2infoset).
openTag, closeTag = _tag4TON(key)
# So that fullpath will contains something meaningful.
tagName = key.__class__.__name__.lower()
else:
tagName, attrs = _tagAttr(key, fullpath)
openTag = u'<%s' % tagName
if attrs:
openTag += ' %s' % attrs
if _topLevel and key2infoset and key in key2infoset:
openTag += u' infoset="%s"' % key2infoset[key]
if isinstance(value, int):
openTag += ' type="int"'
elif isinstance(value, float):
openTag += ' type="float"'
openTag += u'>'
closeTag = u'</%s>' % tagName
_l.append(openTag)
_seq2xml(value, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath, tagName))
_l.append(closeTag)
elif isinstance(seq, (list, tuple)):
tagName, attrs = _tagAttr('item', fullpath)
beginTag = u'<%s' % tagName
if attrs:
beginTag += u' %s' % attrs
#beginTag += u'>'
closeTag = u'</%s>' % tagName
for item in seq:
if isinstance(item, _Container):
_seq2xml(item, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath,
item.__class__.__name__.lower()))
else:
openTag = beginTag
if isinstance(item, int):
openTag += ' type="int"'
elif isinstance(item, float):
openTag += ' type="float"'
openTag += u'>'
_l.append(openTag)
_seq2xml(item, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath, tagName))
_l.append(closeTag)
else:
if isinstance(seq, _Container):
_l.extend(_tag4TON(seq))
else:
# Text, ints, floats and the like.
_l.append(_normalizeValue(seq, withRefs=withRefs,
modFunct=modFunct,
titlesRefs=titlesRefs,
namesRefs=namesRefs,
charactersRefs=charactersRefs))
return _l
_xmlHead = u"""<?xml version="1.0"?>
<!DOCTYPE %s SYSTEM "http://imdbpy.sf.net/dtd/imdbpy{VERSION}.dtd">
"""
_xmlHead = _xmlHead.replace('{VERSION}',
VERSION.replace('.', '').split('dev')[0][:2])
class _Container(object):
"""Base class for Movie, Person, Character and Company classes."""
# The default sets of information retrieved.
default_info = ()
# Aliases for some not-so-intuitive keys.
keys_alias = {}
# List of keys to modify.
keys_tomodify_list = ()
# Function used to compare two instances of this class.
cmpFunct = None
# Regular expression used to build the 'full-size (headshot|cover url)'.
_re_fullsizeURL = re.compile(r'\._V1\._SX(\d+)_SY(\d+)_')
def __init__(self, myID=None, data=None, notes=u'',
currentRole=u'', roleID=None, roleIsPerson=False,
accessSystem=None, titlesRefs=None, namesRefs=None,
charactersRefs=None, modFunct=None, *args, **kwds):
"""Initialize a Movie, Person, Character or Company object.
*myID* -- your personal identifier for this object.
*data* -- a dictionary used to initialize the object.
*notes* -- notes for the person referred in the currentRole
attribute; e.g.: '(voice)' or the alias used in the
movie credits.
*accessSystem* -- a string representing the data access system used.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
self.reset()
self.accessSystem = accessSystem
self.myID = myID
if data is None: data = {}
self.set_data(data, override=1)
self.notes = notes
if titlesRefs is None: titlesRefs = {}
self.update_titlesRefs(titlesRefs)
if namesRefs is None: namesRefs = {}
self.update_namesRefs(namesRefs)
if charactersRefs is None: charactersRefs = {}
self.update_charactersRefs(charactersRefs)
self.set_mod_funct(modFunct)
self.keys_tomodify = {}
for item in self.keys_tomodify_list:
self.keys_tomodify[item] = None
self._roleIsPerson = roleIsPerson
if not roleIsPerson:
from imdb.Character import Character
self._roleClass = Character
else:
from imdb.Person import Person
self._roleClass = Person
self.currentRole = currentRole
if roleID:
self.roleID = roleID
self._init(*args, **kwds)
def _get_roleID(self):
"""Return the characterID or personID of the currentRole object."""
if not self.__role:
return None
if isinstance(self.__role, list):
return [x.getID() for x in self.__role]
return self.currentRole.getID()
def _set_roleID(self, roleID):
"""Set the characterID or personID of the currentRole object."""
if not self.__role:
# XXX: needed? Just ignore it? It's probably safer to
# ignore it, to prevent some bugs in the parsers.
#raise IMDbError,"Can't set ID of an empty Character/Person object."
pass
if not self._roleIsPerson:
if not isinstance(roleID, (list, tuple)):
self.currentRole.characterID = roleID
else:
for index, item in enumerate(roleID):
self.__role[index].characterID = item
else:
if not isinstance(roleID, (list, tuple)):
self.currentRole.personID = roleID
else:
for index, item in enumerate(roleID):
self.__role[index].personID = item
roleID = property(_get_roleID, _set_roleID,
doc="the characterID or personID of the currentRole object.")
def _get_currentRole(self):
"""Return a Character or Person instance."""
if self.__role:
return self.__role
return self._roleClass(name=u'', accessSystem=self.accessSystem,
modFunct=self.modFunct)
def _set_currentRole(self, role):
"""Set self.currentRole to a Character or Person instance."""
if isinstance(role, (unicode, str)):
if not role:
self.__role = None
else:
self.__role = self._roleClass(name=role, modFunct=self.modFunct,
accessSystem=self.accessSystem)
elif isinstance(role, (list, tuple)):
self.__role = RolesList()
for item in role:
if isinstance(item, (unicode, str)):
self.__role.append(self._roleClass(name=item,
accessSystem=self.accessSystem,
modFunct=self.modFunct))
else:
self.__role.append(item)
if not self.__role:
self.__role = None
else:
self.__role = role
currentRole = property(_get_currentRole, _set_currentRole,
doc="The role of a Person in a Movie" + \
" or the interpreter of a Character in a Movie.")
def _init(self, **kwds): pass
def reset(self):
"""Reset the object."""
self.data = {}
self.myID = None
self.notes = u''
self.titlesRefs = {}
self.namesRefs = {}
self.charactersRefs = {}
self.modFunct = modClearRefs
self.current_info = []
self.infoset2keys = {}
self.key2infoset = {}
self.__role = None
self._reset()
def _reset(self): pass
def clear(self):
"""Reset the dictionary."""
self.data.clear()
self.notes = u''
self.titlesRefs = {}
self.namesRefs = {}
self.charactersRefs = {}
self.current_info = []
self.infoset2keys = {}
self.key2infoset = {}
self.__role = None
self._clear()
def _clear(self): pass
def get_current_info(self):
"""Return the current set of information retrieved."""
return self.current_info
def update_infoset_map(self, infoset, keys, mainInfoset):
"""Update the mappings between infoset and keys."""
if keys is None:
keys = []
if mainInfoset is not None:
theIS = mainInfoset
else:
theIS = infoset
self.infoset2keys[theIS] = keys
for key in keys:
self.key2infoset[key] = theIS
def set_current_info(self, ci):
"""Set the current set of information retrieved."""
# XXX:Remove? It's never used and there's no way to update infoset2keys.
self.current_info = ci
def add_to_current_info(self, val, keys=None, mainInfoset=None):
"""Add a set of information to the current list."""
if val not in self.current_info:
self.current_info.append(val)
self.update_infoset_map(val, keys, mainInfoset)
def has_current_info(self, val):
"""Return true if the given set of information is in the list."""
return val in self.current_info
def set_mod_funct(self, modFunct):
"""Set the fuction used to modify the strings."""
if modFunct is None: modFunct = modClearRefs
self.modFunct = modFunct
def update_titlesRefs(self, titlesRefs):
"""Update the dictionary with the references to movies."""
self.titlesRefs.update(titlesRefs)
def get_titlesRefs(self):
"""Return the dictionary with the references to movies."""
return self.titlesRefs
def update_namesRefs(self, namesRefs):
"""Update the dictionary with the references to names."""
self.namesRefs.update(namesRefs)
def get_namesRefs(self):
"""Return the dictionary with the references to names."""
return self.namesRefs
def update_charactersRefs(self, charactersRefs):
"""Update the dictionary with the references to characters."""
self.charactersRefs.update(charactersRefs)
def get_charactersRefs(self):
"""Return the dictionary with the references to characters."""
return self.charactersRefs
def set_data(self, data, override=0):
"""Set the movie data to the given dictionary; if 'override' is
set, the previous data is removed, otherwise the two dictionary
are merged.
"""
if not override:
self.data.update(data)
else:
self.data = data
def getID(self):
"""Return movieID, personID, characterID or companyID."""
raise NotImplementedError('override this method')
def __cmp__(self, other):
"""Compare two Movie, Person, Character or Company objects."""
# XXX: raise an exception?
if self.cmpFunct is None: return -1
if not isinstance(other, self.__class__): return -1
return self.cmpFunct(other)
def __hash__(self):
"""Hash for this object."""
# XXX: does it always work correctly?
theID = self.getID()
if theID is not None and self.accessSystem not in ('UNKNOWN', None):
# Handle 'http' and 'mobile' as they are the same access system.
acs = self.accessSystem
if acs in ('mobile', 'httpThin'):
acs = 'http'
# There must be some indication of the kind of the object, too.
s4h = '%s:%s[%s]' % (self.__class__.__name__, theID, acs)
else:
s4h = repr(self)
return hash(s4h)
def isSame(self, other):
"""Return True if the two represent the same object."""
if not isinstance(other, self.__class__): return 0
if hash(self) == hash(other): return 1
return 0
def __len__(self):
"""Number of items in the data dictionary."""
return len(self.data)
def getAsXML(self, key, _with_add_keys=True):
"""Return a XML representation of the specified key, or None
if empty. If _with_add_keys is False, dinamically generated
keys are excluded."""
# Prevent modifyStrings in __getitem__ to be called; if needed,
# it will be called by the _normalizeValue function.
origModFunct = self.modFunct
self.modFunct = modNull
# XXX: not totally sure it's a good idea, but could prevent
# problems (i.e.: the returned string always contains
# a DTD valid tag, and not something that can be only in
# the keys_alias map).
key = self.keys_alias.get(key, key)
if (not _with_add_keys) and (key in self._additional_keys()):
self.modFunct = origModFunct
return None
try:
withRefs = False
if key in self.keys_tomodify and \
origModFunct not in (None, modNull):
withRefs = True
value = self.get(key)
if value is None:
return None
tag = self.__class__.__name__.lower()
return u''.join(_seq2xml({key: value}, withRefs=withRefs,
modFunct=origModFunct,
titlesRefs=self.titlesRefs,
namesRefs=self.namesRefs,
charactersRefs=self.charactersRefs,
key2infoset=self.key2infoset,
fullpath=tag))
finally:
self.modFunct = origModFunct
def asXML(self, _with_add_keys=True):
"""Return a XML representation of the whole object.
If _with_add_keys is False, dinamically generated keys are excluded."""
beginTag, endTag = _tag4TON(self, addAccessSystem=True,
_containerOnly=True)
resList = [beginTag]
for key in self.keys():
value = self.getAsXML(key, _with_add_keys=_with_add_keys)
if not value:
continue
resList.append(value)
resList.append(endTag)
head = _xmlHead % self.__class__.__name__.lower()
return head + u''.join(resList)
def _getitem(self, key):
"""Handle special keys."""
return None
def __getitem__(self, key):
"""Return the value for a given key, checking key aliases;
a KeyError exception is raised if the key is not found.
"""
value = self._getitem(key)
if value is not None: return value
# Handle key aliases.
key = self.keys_alias.get(key, key)
rawData = self.data[key]
if key in self.keys_tomodify and \
self.modFunct not in (None, modNull):
try:
return modifyStrings(rawData, self.modFunct, self.titlesRefs,
self.namesRefs, self.charactersRefs)
except RuntimeError, e:
# Symbian/python 2.2 has a poor regexp implementation.
import warnings
warnings.warn('RuntimeError in '
"imdb.utils._Container.__getitem__; if it's not "
"a recursion limit exceeded and we're not running "
"in a Symbian environment, it's a bug:\n%s" % e)
return rawData
def __setitem__(self, key, item):
"""Directly store the item with the given key."""
self.data[key] = item
def __delitem__(self, key):
"""Remove the given section or key."""
# XXX: how to remove an item of a section?
del self.data[key]
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
return []
def keys(self):
"""Return a list of valid keys."""
return self.data.keys() + self._additional_keys()
def items(self):
"""Return the items in the dictionary."""
return [(k, self.get(k)) for k in self.keys()]
# XXX: is this enough?
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self):
"""Return the values in the dictionary."""
return [self.get(k) for k in self.keys()]
def has_key(self, key):
"""Return true if a given section is defined."""
try:
self.__getitem__(key)
except KeyError:
return 0
return 1
# XXX: really useful???
# consider also that this will confuse people who meant to
# call ia.update(movieObject, 'data set') instead.
def update(self, dict):
self.data.update(dict)
def get(self, key, failobj=None):
"""Return the given section, or default if it's not found."""
try:
return self.__getitem__(key)
except KeyError:
return failobj
def setdefault(self, key, failobj=None):
if not self.has_key(key):
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __repr__(self):
"""String representation of an object."""
raise NotImplementedError('override this method')
def __str__(self):
"""Movie title or person name."""
raise NotImplementedError('override this method')
def __contains__(self, key):
raise NotImplementedError('override this method')
def append_item(self, key, item):
"""The item is appended to the list identified by the given key."""
self.data.setdefault(key, []).append(item)
def set_item(self, key, item):
"""Directly store the item with the given key."""
self.data[key] = item
def __nonzero__(self):
"""Return true if self.data contains something."""
if self.data: return 1
return 0
def __deepcopy__(self, memo):
raise NotImplementedError('override this method')
def copy(self):
"""Return a deep copy of the object itself."""
return deepcopy(self)
def flatten(seq, toDescend=(list, dict, tuple), yieldDictKeys=0,
onlyKeysType=(_Container,), scalar=None):
"""Iterate over nested lists and dictionaries; toDescend is a list
or a tuple of types to be considered non-scalar; if yieldDictKeys is
true, also dictionaries' keys are yielded; if scalar is not None, only
items of the given type(s) are yielded."""
if scalar is None or isinstance(seq, scalar):
yield seq
if isinstance(seq, toDescend):
if isinstance(seq, (dict, _Container)):
if yieldDictKeys:
# Yield also the keys of the dictionary.
for key in seq.iterkeys():
for k in flatten(key, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
if onlyKeysType and isinstance(k, onlyKeysType):
yield k
for value in seq.itervalues():
for v in flatten(value, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
yield v
elif not isinstance(seq, (str, unicode, int, float)):
for item in seq:
for i in flatten(item, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
yield i
| gpl-3.0 |
Hellowlol/PyTunes | modules/newznab.py | 1 | 9273 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
import pytunes
import math
from pytunes.proxy import get_image
from urllib2 import urlopen, quote
from json import loads
import logging
class Newznab:
def __init__(self):
self.logger = logging.getLogger('modules.newznab')
pytunes.MODULES.append({
'name': 'Newznab Search',
'id': 'newznab',
'fields': [
{'type':'bool', 'label':'Enable', 'name':'newznab_enable'},
{'type':'text', 'label':'Menu name', 'name':'newznab_name', 'placeholder':''},
#{'type':'select',
# 'label':'Default NZB Client',
# 'name':'default_nzb_id',
# 'options':[],
# 'desc':'Only Enabled Clients Will Show'
#},
{'type':'text', 'label':'Console Category', 'name':'newznab_console', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'Movies Category', 'name':'newznab_movies', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'Audio Category', 'name':'newznab_audio', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'PC Category', 'name':'newznab_pc', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'TV Category', 'name':'newznab_tv', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'XXX Category', 'name':'newznab_xxx', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'Books Category', 'name':'newznab_books', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'Other Category', 'name':'newznab_other', 'desc':'From Sabnzbd Configuration'}
]})
pytunes.MODULES.append({
'name': 'Newznab Servers',
'id': 'newznab_update_server',
'action': '%ssettings/setnewzserver' % pytunes.WEBDIR,
#'test': pytunes.WEBDIR + 'newznab/ping',
'fields': [
{'type':'select',
'label':'Newznab Servers',
'name':'newznab_server_id',
'options':[
{'name':'New', 'value':0}
]},
{'type':'text',
'label':'Name',
'name':'newznab_server_name'},
{'type':'text', 'label':'Host', 'name':'newznab_server_host'},
{'type':'text', 'label':'Apikey', 'name':'newznab_server_apikey'},
{'type':'bool', 'label':'Use SSL', 'name':'newznab_server_ssl'}
]})
@cherrypy.expose()
def index(self, query='', **kwargs):
return pytunes.LOOKUP.get_template('newznab.html').render(query=query, scriptname='newznab')
"""
NOT IMPLEMENTET
@cherrypy.expose()
@cherrypy.tools.json_out()
def ping(self, newznab_host, newznab_apikey, **kwargs):
self.logger.debug("Pinging newznab-host")
return 1
"""
@cherrypy.expose()
def thumb(self, url, h=None, w=None, o=100):
if url.startswith('rageid'):
settings = pytunes.settings
host = settings.get('newznab_host', '').replace('http://', '').replace('https://', '')
ssl = 's' if settings.get('newznab_ssl', 0) else ''
url = 'http%s://%s/covers/tv/%s.jpg' % (ssl, host, url[6:])
return get_image(url, h, w, o)
@cherrypy.expose()
def getcategories(self, **kwargs):
self.logger.debug("Fetching available categories")
ret = ''
try:
settings = pytunes.settings
self.current = settings.get_current_newznab_host()
host = self.current.host.replace('http://', '').replace('https://', '')
ssl = '' if self.current.ssl == '0' else 's'
apikey = self.current.apikey
url = 'http%s://%s/api?t=caps&o=xml' % (ssl, host)
self.logger.debug("Fetching Cat information from: %s" % url)
caps = urlopen(url, timeout=10).read()
lines = caps.split('\n')
opt_line = '<option value="%s">%s</option>'
for line in lines:
if 'category' in line and 'genre' not in line and not '/cat' in line:
junk,id,name = line.strip().split(' ')
id = id.split('"')[1]
main_name = name.split('"')[1]
ret += opt_line % (id, main_name)
if 'subcat' in line:
subcat = line.strip().split(' name')
id = subcat[0].split('"')[1]
name = '%s > %s' % (main_name, subcat[1].split('"')[1])
ret += opt_line % (id, name)
except:
self.logger.error('Unable to fetch categories from: %s' % url)
return ret
@cherrypy.expose()
def search(self, q='', cat='', **kwargs):
ret = ''
row = '<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>'
settings = pytunes.settings
sab_cat = {
'1000':settings.get('newznab_console', ''),
'2000':settings.get('newznab_movies', ''),
'3000':settings.get('newznab_audio', ''),
'4000':settings.get('newznab_pc', ''),
'5000':settings.get('newznab_tv', ''),
'6000':settings.get('newznab_xxx', ''),
'7000':settings.get('newznab_books', ''),
'8000':settings.get('newznab_other', '')
}
if cat:
cat = '&cat=%s' % cat
res = self.fetch('search&q=%s%s&extended=1' % (quote(q), cat))
#put in staticvars
link = "<a href='/newznab/AddNzbFromUrl?nzb_url=%s&nzb_category=%s' class='ajax-link' title='Download' cat='%s'><i class='icon-download-alt'></i></a>"
try:
results = res['channel']['item']
except:
results = res
grabs = '0'
for each in results:
files = str(each['attr'][4]['@attributes']['value'])
grabs = str(each['attr'][6]['@attributes']['value'])
category = each['category']
title = each['title']
cat = sab_cat[str(each['attr'][0]['@attributes']['value'])]
num = int(each['enclosure']['@attributes']['length'])
for x in [' bytes',' KB',' MB',' GB']:
if num < 1024.0:
size = "%3.2f%s" % (num, x)
break
num /= 1024.0
dl = link % (quote(each['link']), cat, cat)
ret += row % (title, category, size, files, grabs, dl)
return ret
@cherrypy.expose()
@cherrypy.tools.json_out()
def AddNzbFromUrl(self, nzb_url, nzb_category=''):
self.logger.debug("Adding nzb from url")
if nzb_category:
nzb_category = '&cat=%s' % nzb_category
return self.send('&mode=addurl&name=%s%s' % (quote(nzb_url), nzb_category))
def fetch(self, cmd):
try:
settings = pytunes.settings
self.current = settings.get_current_newznab_host()
host = self.current.host.replace('http://', '').replace('https://', '')
ssl = 's' if settings.get('newznab_ssl') == 'on' else ''
apikey = self.current.apikey
url = 'http%s://%s/api?o=json&apikey=%s&t=%s' ( ssl, host, apikey, cmd)
self.logger.debug("Fetching information from: %s" % url)
return loads(urlopen(url, timeout=30).read())
except Exception, e:
self.logger.debug("Exception%s: " % str(e))
self.logger.error("Unable to fetch information from: newznab %s" % str(e))
def send(self, link):
try:
host = pytunes.settings.get('sabnzbd_host', '')
port = str(pytunes.settings.get('sabnzbd_port', ''))
apikey = pytunes.settings.get('sabnzbd_apikey', '')
sabnzbd_basepath = pytunes.settings.get('sabnzbd_basepath', '/sabnzbd/')
ssl = 's' if pytunes.settings.get('sabnzbd_ssl', 0) else ''
if(sabnzbd_basepath == ""):
sabnzbd_basepath = "/sabnzbd/"
if not(sabnzbd_basepath.endswith('/')):
sabnzbd_basepath += "/"
url = 'http%s://%s:%s%sapi?output=json&apikey=%s%s' % (ssl, host, port, sabnzbd_basepath, apikey, link)
self.logger.debug("Sending NZB to: %s: " % url)
return loads(urlopen(url, timeout=10).read())
except:
self.logger.error("Cannot contact sabnzbd")
return
#Future use...use staticvars
@cherrypy.expose()
def GetClients(self):
nzbclients = ''
if pytunes.settings.get('nzbget_enable', ''):
nzbclients += '<option id="nzbget">NZBget</option>'
if pytunes.settings.get('sabnzbd_enable', ''):
nzbclients += '<option id="sabnzbd">Sabnzbd+</option>'
if not nzbclients:
nzbclients = '<option>No Clients Enabled</option>'
return nzbclients
| gpl-3.0 |
3dfxmadscientist/cbss-server | addons/base_vat/__openerp__.py | 125 | 2928 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'VAT Number Validation',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
VAT validation for Partner's VAT numbers.
=========================================
After installing this module, values entered in the VAT field of Partners will
be validated for all supported countries. The country is inferred from the
2-letter country code that prefixes the VAT number, e.g. ``BE0477472701``
will be validated using the Belgian rules.
There are two different levels of VAT number validation:
--------------------------------------------------------
* By default, a simple off-line check is performed using the known validation
rules for the country, usually a simple check digit. This is quick and
always available, but allows numbers that are perhaps not truly allocated,
or not valid anymore.
* When the "VAT VIES Check" option is enabled (in the configuration of the user's
Company), VAT numbers will be instead submitted to the online EU VIES
database, which will truly verify that the number is valid and currently
allocated to a EU company. This is a little bit slower than the simple
off-line check, requires an Internet connection, and may not be available
all the time. If the service is not available or does not support the
requested country (e.g. for non-EU countries), a simple check will be performed
instead.
Supported countries currently include EU countries, and a few non-EU countries
such as Chile, Colombia, Mexico, Norway or Russia. For unsupported countries,
only the country code will be validated.
""",
'author': 'OpenERP SA',
'depends': ['account'],
'website': 'http://www.openerp.com',
'data': ['base_vat_view.xml'],
'installable': True,
'auto_install': False,
'images': ['images/1_partner_vat.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
freelawproject/recap-server | settings.py | 1 | 1377 | """Settings are derived by compiling any files ending in .py in the settings
directory, in alphabetical order.
This results in the following concept:
- default settings are in 10-public.py (this should contain most settings)
- custom settings are in 05-private.py (an example of this file is here for
you)
- any overrides to public settings can go in 20-private.py (you'll need to
create this)
"""
from __future__ import with_statement
import os
import glob
import sys
def _generate_secret_key(file_path):
import random
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
def random_char():
return chars[int(len(chars)*random.random())]
rand_str = ''.join(random_char() for i in range(64))
with open(file_path, 'w') as f:
f.write('SECRET_KEY=%s\n' % repr(rand_str))
ROOT_PATH = os.path.dirname(__file__)
# Try importing the SECRET_KEY from the file secret_key.py. If it doesn't exist,
# there is an import error, and the key is generated and written to the file.
try:
from secret_key import SECRET_KEY
except ImportError:
_generate_secret_key(os.path.join(ROOT_PATH, 'secret_key.py'))
from secret_key import SECRET_KEY
# Load the conf files.
conf_files = glob.glob(os.path.join(
os.path.dirname(__file__), 'settings', '*.py'))
conf_files.sort()
for f in conf_files:
execfile(os.path.abspath(f))
| gpl-3.0 |
laiqiqi886/kbengine | kbe/res/scripts/common/Lib/ast.py | 91 | 12034 | """
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
def parse(source, filename='<unknown>', mode='exec'):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
"""
return compile(source, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
sets, booleans, and None.
"""
if isinstance(node_or_string, str):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, (Str, Bytes)):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Set):
return set(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, NameConstant):
return node.value
elif isinstance(node, UnaryOp) and \
isinstance(node.op, (UAdd, USub)) and \
isinstance(node.operand, (Num, UnaryOp, BinOp)):
operand = _convert(node.operand)
if isinstance(node.op, UAdd):
return + operand
else:
return - operand
elif isinstance(node, BinOp) and \
isinstance(node.op, (Add, Sub)) and \
isinstance(node.right, (Num, UnaryOp, BinOp)) and \
isinstance(node.left, (Num, UnaryOp, BinOp)):
left = _convert(node.left)
right = _convert(node.right)
if isinstance(node.op, Add):
return left + right
else:
return left - right
raise ValueError('malformed node or string: ' + repr(node))
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
| lgpl-3.0 |
ramitalat/odoo | addons/hr_contract/__openerp__.py | 260 | 1834 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Contracts',
'version': '1.0',
'category': 'Human Resources',
'description': """
Add all information on the employee form to manage contracts.
=============================================================
* Contract
* Place of Birth,
* Medical Examination Date
* Company Vehicle
You can assign several contracts per employee.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'depends': ['base_action_rule', 'hr'],
'data': [
'security/ir.model.access.csv',
'hr_contract_view.xml',
'hr_contract_data.xml',
'base_action_rule_view.xml',
],
'demo': [],
'test': ['test/test_hr_contract.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gfreed/android_external_chromium-org | tools/metrics/histograms/pretty_print.py | 53 | 12009 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the histograms.xml file, alphabetizing tags, wrapping text
at 80 chars, enforcing standard attribute ordering, and standardizing
indentation.
This is quite a bit more complicated than just calling tree.toprettyxml();
we need additional customization, like special attribute ordering in tags
and wrapping text nodes, so we implement our own full custom XML pretty-printer.
"""
from __future__ import with_statement
import diffutil
import json
import logging
import shutil
import sys
import textwrap
import xml.dom.minidom
WRAP_COLUMN = 80
# Desired order for tag attributes; attributes listed here will appear first,
# and in the same order as in these lists.
# { tag_name: [attribute_name, ...] }
ATTRIBUTE_ORDER = {
'enum': ['name', 'type'],
'histogram': ['name', 'enum', 'units'],
'int': ['value', 'label'],
'fieldtrial': ['name', 'separator', 'ordering'],
'group': ['name', 'label'],
'affected-histogram': ['name'],
'with-group': ['name'],
}
# Tag names for top-level nodes whose children we don't want to indent.
TAGS_THAT_DONT_INDENT = [
'histogram-configuration',
'histograms',
'fieldtrials',
'enums'
]
# Extra vertical spacing rules for special tag names.
# {tag_name: (newlines_after_open, newlines_before_close, newlines_after_close)}
TAGS_THAT_HAVE_EXTRA_NEWLINE = {
'histogram-configuration': (2, 1, 1),
'histograms': (2, 1, 1),
'fieldtrials': (2, 1, 1),
'enums': (2, 1, 1),
'histogram': (1, 1, 1),
'enum': (1, 1, 1),
'fieldtrial': (1, 1, 1),
}
# Tags that we allow to be squished into a single line for brevity.
TAGS_THAT_ALLOW_SINGLE_LINE = [
'summary',
'int',
]
# Tags whose children we want to alphabetize. The key is the parent tag name,
# and the value is a pair of the tag name of the children we want to sort,
# and a key function that maps each child node to the desired sort key.
ALPHABETIZATION_RULES = {
'histograms': ('histogram', lambda n: n.attributes['name'].value.lower()),
'enums': ('enum', lambda n: n.attributes['name'].value.lower()),
'enum': ('int', lambda n: int(n.attributes['value'].value)),
'fieldtrials': ('fieldtrial', lambda n: n.attributes['name'].value.lower()),
'fieldtrial': ('affected-histogram',
lambda n: n.attributes['name'].value.lower()),
}
class Error(Exception):
pass
def LastLineLength(s):
"""Returns the length of the last line in s.
Args:
s: A multi-line string, including newlines.
Returns:
The length of the last line in s, in characters.
"""
if s.rfind('\n') == -1: return len(s)
return len(s) - s.rfind('\n') - len('\n')
def XmlEscape(s):
"""XML-escapes the given string, replacing magic characters (&<>") with their
escaped equivalents."""
s = s.replace("&", "&").replace("<", "<")
s = s.replace("\"", """).replace(">", ">")
return s
def PrettyPrintNode(node, indent=0):
"""Pretty-prints the given XML node at the given indent level.
Args:
node: The minidom node to pretty-print.
indent: The current indent level.
Returns:
The pretty-printed string (including embedded newlines).
Raises:
Error if the XML has unknown tags or attributes.
"""
# Handle the top-level document node.
if node.nodeType == xml.dom.minidom.Node.DOCUMENT_NODE:
return '\n'.join([PrettyPrintNode(n) for n in node.childNodes])
# Handle text nodes.
if node.nodeType == xml.dom.minidom.Node.TEXT_NODE:
# Wrap each paragraph in the text to fit in the 80 column limit.
wrapper = textwrap.TextWrapper()
wrapper.initial_indent = ' ' * indent
wrapper.subsequent_indent = ' ' * indent
wrapper.break_on_hyphens = False
wrapper.break_long_words = False
wrapper.width = WRAP_COLUMN
text = XmlEscape(node.data)
# Remove any common indent.
text = textwrap.dedent(text.strip('\n'))
lines = text.split('\n')
# Split the text into paragraphs at blank line boundaries.
paragraphs = [[]]
for l in lines:
if len(l.strip()) == 0 and len(paragraphs[-1]) > 0:
paragraphs.append([])
else:
paragraphs[-1].append(l)
# Remove trailing empty paragraph if present.
if len(paragraphs) > 0 and len(paragraphs[-1]) == 0:
paragraphs = paragraphs[:-1]
# Wrap each paragraph and separate with two newlines.
return '\n\n'.join([wrapper.fill('\n'.join(p)) for p in paragraphs])
# Handle element nodes.
if node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE:
newlines_after_open, newlines_before_close, newlines_after_close = (
TAGS_THAT_HAVE_EXTRA_NEWLINE.get(node.tagName, (1, 1, 0)))
# Open the tag.
s = ' ' * indent + '<' + node.tagName
# Calculate how much space to allow for the '>' or '/>'.
closing_chars = 1
if not node.childNodes:
closing_chars = 2
# Pretty-print the attributes.
attributes = node.attributes.keys()
if attributes:
# Reorder the attributes.
if not node.tagName in ATTRIBUTE_ORDER:
unrecognized_attributes = attributes;
else:
unrecognized_attributes = (
[a for a in attributes if not a in ATTRIBUTE_ORDER[node.tagName]])
attributes = (
[a for a in ATTRIBUTE_ORDER[node.tagName] if a in attributes])
for a in unrecognized_attributes:
logging.error(
'Unrecognized attribute "%s" in tag "%s"' % (a, node.tagName))
if unrecognized_attributes:
raise Error()
for a in attributes:
value = XmlEscape(node.attributes[a].value)
# Replace sequences of whitespace with single spaces.
words = value.split()
a_str = ' %s="%s"' % (a, ' '.join(words))
# Start a new line if the attribute will make this line too long.
if LastLineLength(s) + len(a_str) + closing_chars > WRAP_COLUMN:
s += '\n' + ' ' * (indent + 3)
# Output everything up to the first quote.
s += ' %s="' % (a)
value_indent_level = LastLineLength(s)
# Output one word at a time, splitting to the next line where necessary.
column = value_indent_level
for i, word in enumerate(words):
# This is slightly too conservative since not every word will be
# followed by the closing characters...
if i > 0 and (column + len(word) + 1 + closing_chars > WRAP_COLUMN):
s = s.rstrip() # remove any trailing whitespace
s += '\n' + ' ' * value_indent_level
column = value_indent_level
s += word + ' '
column += len(word) + 1
s = s.rstrip() # remove any trailing whitespace
s += '"'
s = s.rstrip() # remove any trailing whitespace
# Pretty-print the child nodes.
if node.childNodes:
s += '>'
# Calculate the new indent level for child nodes.
new_indent = indent
if node.tagName not in TAGS_THAT_DONT_INDENT:
new_indent += 2
child_nodes = node.childNodes
# Recursively pretty-print the child nodes.
child_nodes = [PrettyPrintNode(n, indent=new_indent) for n in child_nodes]
child_nodes = [c for c in child_nodes if len(c.strip()) > 0]
# Determine whether we can fit the entire node on a single line.
close_tag = '</%s>' % node.tagName
space_left = WRAP_COLUMN - LastLineLength(s) - len(close_tag)
if (node.tagName in TAGS_THAT_ALLOW_SINGLE_LINE and
len(child_nodes) == 1 and len(child_nodes[0].strip()) <= space_left):
s += child_nodes[0].strip()
else:
s += '\n' * newlines_after_open + '\n'.join(child_nodes)
s += '\n' * newlines_before_close + ' ' * indent
s += close_tag
else:
s += '/>'
s += '\n' * newlines_after_close
return s
# Handle comment nodes.
if node.nodeType == xml.dom.minidom.Node.COMMENT_NODE:
return '<!--%s-->\n' % node.data
# Ignore other node types. This could be a processing instruction (<? ... ?>)
# or cdata section (<![CDATA[...]]!>), neither of which are legal in the
# histograms XML at present.
logging.error('Ignoring unrecognized node data: %s' % node.toxml())
raise Error()
def unsafeAppendChild(parent, child):
"""Append child to parent's list of children, ignoring the possibility that it
is already in another node's childNodes list. Requires that the previous
parent of child is discarded (to avoid non-tree DOM graphs).
This can provide a significant speedup as O(n^2) operations are removed (in
particular, each child insertion avoids the need to traverse the old parent's
entire list of children)."""
child.parentNode = None
parent.appendChild(child)
child.parentNode = parent
def TransformByAlphabetizing(node):
"""Transform the given XML by alphabetizing specific node types according to
the rules in ALPHABETIZATION_RULES.
Args:
node: The minidom node to transform.
Returns:
The minidom node, with children appropriately alphabetized. Note that the
transformation is done in-place, i.e. the original minidom tree is modified
directly.
"""
if node.nodeType != xml.dom.minidom.Node.ELEMENT_NODE:
for c in node.childNodes: TransformByAlphabetizing(c)
return node
# Element node with a tag name that we alphabetize the children of?
if node.tagName in ALPHABETIZATION_RULES:
# Put subnodes in a list of node,key pairs to allow for custom sorting.
subtag, key_function = ALPHABETIZATION_RULES[node.tagName]
subnodes = []
last_key = -1
for c in node.childNodes:
if (c.nodeType == xml.dom.minidom.Node.ELEMENT_NODE and
c.tagName == subtag):
last_key = key_function(c)
# Subnodes that we don't want to rearrange use the last node's key,
# so they stay in the same relative position.
subnodes.append( (c, last_key) )
# Sort the subnode list.
subnodes.sort(key=lambda pair: pair[1])
# Re-add the subnodes, transforming each recursively.
while node.firstChild:
node.removeChild(node.firstChild)
for (c, _) in subnodes:
unsafeAppendChild(node, TransformByAlphabetizing(c))
return node
# Recursively handle other element nodes and other node types.
for c in node.childNodes: TransformByAlphabetizing(c)
return node
def PrettyPrint(raw_xml):
"""Pretty-print the given XML.
Args:
xml: The contents of the histograms XML file, as a string.
Returns:
The pretty-printed version.
"""
tree = xml.dom.minidom.parseString(raw_xml)
tree = TransformByAlphabetizing(tree)
return PrettyPrintNode(tree)
def main():
logging.basicConfig(level=logging.INFO)
presubmit = ('--presubmit' in sys.argv)
logging.info('Loading histograms.xml...')
with open('histograms.xml', 'rb') as f:
xml = f.read()
# Check there are no CR ('\r') characters in the file.
if '\r' in xml:
logging.info('DOS-style line endings (CR characters) detected - these are '
'not allowed. Please run dos2unix histograms.xml')
sys.exit(1)
logging.info('Pretty-printing...')
try:
pretty = PrettyPrint(xml)
except Error:
logging.error('Aborting parsing due to fatal errors.')
sys.exit(1)
if xml == pretty:
logging.info('histograms.xml is correctly pretty-printed.')
sys.exit(0)
if presubmit:
logging.info('histograms.xml is not formatted correctly; run '
'pretty_print.py to fix.')
sys.exit(1)
if not diffutil.PromptUserToAcceptDiff(
xml, pretty,
'Is the prettified version acceptable?'):
logging.error('Aborting')
return
logging.info('Creating backup file histograms.before.pretty-print.xml')
shutil.move('histograms.xml', 'histograms.before.pretty-print.xml')
logging.info('Writing new histograms.xml file')
with open('histograms.xml', 'wb') as f:
f.write(pretty)
if __name__ == '__main__':
main()
| bsd-3-clause |
chrismeyersfsu/ansible | lib/ansible/plugins/filter/ipaddr.py | 19 | 19242 | # (c) 2014, Maciej Delmanowski <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from functools import partial
import types
try:
import netaddr
except ImportError:
# in this case, we'll make the filters return error messages (see bottom)
netaddr = None
else:
class mac_linux(netaddr.mac_unix):
pass
mac_linux.word_fmt = '%.2x'
from ansible import errors
# ---- IP address and network query helpers ----
def _empty_ipaddr_query(v, vtype):
# We don't have any query to process, so just check what type the user
# expects, and return the IP address in a correct format
if v:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
def _6to4_query(v, vtype, value):
if v.version == 4:
if v.size == 1:
ipconv = str(v.ip)
elif v.size > 1:
if v.ip != v.network:
ipconv = str(v.ip)
else:
ipconv = False
if ipaddr(ipconv, 'public'):
numbers = list(map(int, ipconv.split('.')))
try:
return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
except:
return False
elif v.version == 6:
if vtype == 'address':
if ipaddr(str(v), '2002::/16'):
return value
elif vtype == 'network':
if v.ip != v.network:
if ipaddr(str(v.ip), '2002::/16'):
return value
else:
return False
def _ip_query(v):
if v.size == 1:
return str(v.ip)
if v.size > 1:
# /31 networks in netaddr have no broadcast address
if v.ip != v.network or not v.broadcast:
return str(v.ip)
def _gateway_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _bool_ipaddr_query(v):
if v:
return True
def _broadcast_query(v):
if v.size > 1:
return str(v.broadcast)
def _cidr_query(v):
return str(v)
def _cidr_lookup_query(v, iplist, value):
try:
if v in iplist:
return value
except:
return False
def _host_query(v):
if v.size == 1:
return str(v)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _hostmask_query(v):
return str(v.hostmask)
def _int_query(v, vtype):
if vtype == 'address':
return int(v.ip)
elif vtype == 'network':
return str(int(v.ip)) + '/' + str(int(v.prefixlen))
def _ipv4_query(v, value):
if v.version == 6:
try:
return str(v.ipv4())
except:
return False
else:
return value
def _ipv6_query(v, value):
if v.version == 4:
return str(v.ipv6())
else:
return value
def _link_local_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v.version == 4:
if ipaddr(str(v_ip), '169.254.0.0/24'):
return value
elif v.version == 6:
if ipaddr(str(v_ip), 'fe80::/10'):
return value
def _loopback_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_loopback():
return value
def _multicast_query(v, value):
if v.is_multicast():
return value
def _net_query(v):
if v.size > 1:
if v.ip == v.network:
return str(v.network) + '/' + str(v.prefixlen)
def _netmask_query(v):
return str(v.netmask)
def _network_query(v):
if v.size > 1:
return str(v.network)
def _prefix_query(v):
return int(v.prefixlen)
def _private_query(v, value):
if v.is_private():
return value
def _public_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_unicast() and not v_ip.is_private() and \
not v_ip.is_loopback() and not v_ip.is_netmask() and \
not v_ip.is_hostmask():
return value
def _revdns_query(v):
v_ip = netaddr.IPAddress(str(v.ip))
return v_ip.reverse_dns
def _size_query(v):
return v.size
def _subnet_query(v):
return str(v.cidr)
def _type_query(v):
if v.size == 1:
return 'address'
if v.size > 1:
if v.ip != v.network:
return 'address'
else:
return 'network'
def _unicast_query(v, value):
if v.is_unicast():
return value
def _version_query(v):
return v.version
def _wrap_query(v, vtype, value):
if v.version == 6:
if vtype == 'address':
return '[' + str(v.ip) + ']'
elif vtype == 'network':
return '[' + str(v.ip) + ']/' + str(v.prefixlen)
else:
return value
# ---- HWaddr query helpers ----
def _bare_query(v):
v.dialect = netaddr.mac_bare
return str(v)
def _bool_hwaddr_query(v):
if v:
return True
def _int_hwaddr_query(v):
return int(v)
def _cisco_query(v):
v.dialect = netaddr.mac_cisco
return str(v)
def _empty_hwaddr_query(v, value):
if v:
return value
def _linux_query(v):
v.dialect = mac_linux
return str(v)
def _postgresql_query(v):
v.dialect = netaddr.mac_pgsql
return str(v)
def _unix_query(v):
v.dialect = netaddr.mac_unix
return str(v)
def _win_query(v):
v.dialect = netaddr.mac_eui48
return str(v)
# ---- IP address and network filters ----
def ipaddr(value, query = '', version = False, alias = 'ipaddr'):
''' Check if string is an IP address or network and filter it '''
query_func_extra_args = {
'': ('vtype',),
'6to4': ('vtype', 'value'),
'cidr_lookup': ('iplist', 'value'),
'int': ('vtype',),
'ipv4': ('value',),
'ipv6': ('value',),
'link-local': ('value',),
'loopback': ('value',),
'lo': ('value',),
'multicast': ('value',),
'private': ('value',),
'public': ('value',),
'unicast': ('value',),
'wrap': ('vtype', 'value'),
}
query_func_map = {
'': _empty_ipaddr_query,
'6to4': _6to4_query,
'address': _ip_query,
'address/prefix': _gateway_query,
'bool': _bool_ipaddr_query,
'broadcast': _broadcast_query,
'cidr': _cidr_query,
'cidr_lookup': _cidr_lookup_query,
'gateway': _gateway_query,
'gw': _gateway_query,
'host': _host_query,
'host/prefix': _gateway_query,
'hostmask': _hostmask_query,
'hostnet': _gateway_query,
'int': _int_query,
'ip': _ip_query,
'ipv4': _ipv4_query,
'ipv6': _ipv6_query,
'link-local': _link_local_query,
'lo': _loopback_query,
'loopback': _loopback_query,
'multicast': _multicast_query,
'net': _net_query,
'netmask': _netmask_query,
'network': _network_query,
'prefix': _prefix_query,
'private': _private_query,
'public': _public_query,
'revdns': _revdns_query,
'router': _gateway_query,
'size': _size_query,
'subnet': _subnet_query,
'type': _type_query,
'unicast': _unicast_query,
'v4': _ipv4_query,
'v6': _ipv6_query,
'version': _version_query,
'wrap': _wrap_query,
}
vtype = None
if not value:
return False
elif value == True:
return False
# Check if value is a list and parse each element
elif isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, str(query), version):
_ret.append(ipaddr(element, str(query), version))
if _ret:
return _ret
else:
return list()
# Check if value is a number and convert it to an IP address
elif str(value).isdigit():
# We don't know what IP version to assume, so let's check IPv4 first,
# then IPv6
try:
if ((not version) or (version and version == 4)):
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = int(value)
v.prefixlen = 32
elif version and version == 6:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# IPv4 didn't work the first time, so it definitely has to be IPv6
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# The value is too big for IPv6. Are you a nanobot?
except:
return False
# We got an IP address, let's mark it as such
value = str(v)
vtype = 'address'
# value has not been recognized, check if it's a valid IP string
else:
try:
v = netaddr.IPNetwork(value)
# value is a valid IP string, check if user specified
# CIDR prefix or just an IP address, this will indicate default
# output format
try:
address, prefix = value.split('/')
vtype = 'network'
except:
vtype = 'address'
# value hasn't been recognized, maybe it's a numerical CIDR?
except:
try:
address, prefix = value.split('/')
address.isdigit()
address = int(address)
prefix.isdigit()
prefix = int(prefix)
# It's not numerical CIDR, give up
except:
return False
# It is something, so let's try and build a CIDR from the parts
try:
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv4 CIDR
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv6 CIDR. Give up.
except:
return False
# We have a valid CIDR, so let's write it in correct format
value = str(v)
vtype = 'network'
# We have a query string but it's not in the known query types. Check if
# that string is a valid subnet, if so, we can check later if given IP
# address/network is inside that specific subnet
try:
### ?? 6to4 and link-local were True here before. Should they still?
if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'):
iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
query = 'cidr_lookup'
except:
pass
# This code checks if value maches the IP version the user wants, ie. if
# it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
# If version does not match, return False
if version and v.version != version:
return False
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
try:
float(query)
if v.size == 1:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
elif v.size > 1:
try:
return str(v[query]) + '/' + str(v.prefixlen)
except:
return False
else:
return value
except:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def ipwrap(value, query = ''):
try:
if isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, query, version = False, alias = 'ipwrap'):
_ret.append(ipaddr(element, 'wrap'))
else:
_ret.append(element)
return _ret
else:
_ret = ipaddr(value, query, version = False, alias = 'ipwrap')
if _ret:
return ipaddr(_ret, 'wrap')
else:
return value
except:
return value
def ipv4(value, query = ''):
return ipaddr(value, query, version = 4, alias = 'ipv4')
def ipv6(value, query = ''):
return ipaddr(value, query, version = 6, alias = 'ipv6')
# Split given subnet into smaller subnets or find out the biggest subnet of
# a given IP address with given CIDR prefix
# Usage:
#
# - address or address/prefix | ipsubnet
# returns CIDR subnet of a given input
#
# - address/prefix | ipsubnet(cidr)
# returns number of possible subnets for given CIDR prefix
#
# - address/prefix | ipsubnet(cidr, index)
# returns new subnet with given CIDR prefix
#
# - address | ipsubnet(cidr)
# returns biggest subnet with given CIDR prefix that address belongs to
#
# - address | ipsubnet(cidr, index)
# returns next indexed subnet which contains given address
def ipsubnet(value, query = '', index = 'x'):
''' Manipulate IPv4/IPv6 subnets '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return str(value)
elif str(query).isdigit():
vsize = ipaddr(v, 'size')
query = int(query)
try:
float(index)
index = int(index)
if vsize > 1:
try:
return str(list(value.subnet(query))[index])
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[index])
except:
return False
except:
if vsize > 1:
try:
return str(len(list(value.subnet(query))))
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[0])
except:
return False
return False
# Returns the nth host within a network described by value.
# Usage:
#
# - address or address/prefix | nthhost(nth)
# returns the nth host within the given network
def nthhost(value, query=''):
''' Get the nth host within a given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
nth = int(query)
if value.size > nth:
return value[nth]
except ValueError:
return False
return False
# Returns the SLAAC address within a network for a given HW/MAC address.
# Usage:
#
# - prefix | slaac(mac)
def slaac(value, query = ''):
''' Get the SLAAC address within given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
if ipaddr(value, 'version') != 6:
return False
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
mac = hwaddr(query, alias = 'slaac')
eui = netaddr.EUI(mac)
except:
return False
return eui.ipv6(value.network)
# ---- HWaddr / MAC address filters ----
def hwaddr(value, query = '', alias = 'hwaddr'):
''' Check if string is a HW/MAC address and filter it '''
query_func_extra_args = {
'': ('value',),
}
query_func_map = {
'': _empty_hwaddr_query,
'bare': _bare_query,
'bool': _bool_hwaddr_query,
'int': _int_hwaddr_query,
'cisco': _cisco_query,
'eui48': _win_query,
'linux': _linux_query,
'pgsql': _postgresql_query,
'postgresql': _postgresql_query,
'psql': _postgresql_query,
'unix': _unix_query,
'win': _win_query,
}
try:
v = netaddr.EUI(value)
except:
if query and query != 'bool':
raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def macaddr(value, query = ''):
return hwaddr(value, query, alias = 'macaddr')
def _need_netaddr(f_name, *args, **kwargs):
raise errors.AnsibleFilterError('The {0} filter requires python-netaddr be'
' installed on the ansible controller'.format(f_name))
def ip4_hex(arg):
''' Convert an IPv4 address to Hexadecimal notation '''
numbers = list(map(int, arg.split('.')))
return '{:02x}{:02x}{:02x}{:02x}'.format(*numbers)
# ---- Ansible filters ----
class FilterModule(object):
''' IP address and network manipulation filters '''
filter_map = {
# IP addresses and networks
'ipaddr': ipaddr,
'ipwrap': ipwrap,
'ipv4': ipv4,
'ipv6': ipv6,
'ipsubnet': ipsubnet,
'nthhost': nthhost,
'slaac': slaac,
'ip4_hex': ip4_hex,
# MAC / HW addresses
'hwaddr': hwaddr,
'macaddr': macaddr
}
def filters(self):
if netaddr:
return self.filter_map
else:
# Need to install python-netaddr for these filters to work
return dict((f, partial(_need_netaddr, f)) for f in self.filter_map)
| gpl-3.0 |
mxamin/youtube-dl | youtube_dl/extractor/criterion.py | 1 | 1284 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class CriterionIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?criterion\.com/films/(?P<id>[0-9]+)-.+'
_TEST = {
'url': 'http://www.criterion.com/films/184-le-samourai',
'md5': 'bc51beba55685509883a9a7830919ec3',
'info_dict': {
'id': '184',
'ext': 'mp4',
'title': 'Le Samouraï',
'description': 'md5:a2b4b116326558149bef81f76dcbb93f',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
final_url = self._search_regex(
r'so.addVariable\("videoURL", "(.+?)"\)\;', webpage, 'video url')
title = self._og_search_title(webpage)
description = self._html_search_meta('description', webpage)
thumbnail = self._search_regex(
r'so.addVariable\("thumbnailURL", "(.+?)"\)\;',
webpage, 'thumbnail url')
return {
'id': video_id,
'url': final_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| unlicense |
kernel-sanders/arsenic-mobile | Dependencies/Twisted-13.0.0/twisted/python/test/test_inotify.py | 50 | 3584 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python._inotify}.
"""
from twisted.trial.unittest import TestCase
from twisted.python.runtime import platform
if platform.supportsINotify():
from ctypes import c_int, c_uint32, c_char_p
from twisted.python import _inotify
from twisted.python._inotify import (
INotifyError, initializeModule, init, add)
else:
_inotify = None
class INotifyTests(TestCase):
"""
Tests for L{twisted.python._inotify}.
"""
if _inotify is None:
skip = "This platform doesn't support INotify."
def test_missingInit(self):
"""
If the I{libc} object passed to L{initializeModule} has no
C{inotify_init} attribute, L{ImportError} is raised.
"""
class libc:
def inotify_add_watch(self):
pass
def inotify_rm_watch(self):
pass
self.assertRaises(ImportError, initializeModule, libc())
def test_missingAdd(self):
"""
If the I{libc} object passed to L{initializeModule} has no
C{inotify_add_watch} attribute, L{ImportError} is raised.
"""
class libc:
def inotify_init(self):
pass
def inotify_rm_watch(self):
pass
self.assertRaises(ImportError, initializeModule, libc())
def test_missingRemove(self):
"""
If the I{libc} object passed to L{initializeModule} has no
C{inotify_rm_watch} attribute, L{ImportError} is raised.
"""
class libc:
def inotify_init(self):
pass
def inotify_add_watch(self):
pass
self.assertRaises(ImportError, initializeModule, libc())
def test_setTypes(self):
"""
If the I{libc} object passed to L{initializeModule} has all of the
necessary attributes, it sets the C{argtypes} and C{restype} attributes
of the three ctypes methods used from libc.
"""
class libc:
def inotify_init(self):
pass
inotify_init = staticmethod(inotify_init)
def inotify_rm_watch(self):
pass
inotify_rm_watch = staticmethod(inotify_rm_watch)
def inotify_add_watch(self):
pass
inotify_add_watch = staticmethod(inotify_add_watch)
c = libc()
initializeModule(c)
self.assertEqual(c.inotify_init.argtypes, [])
self.assertEqual(c.inotify_init.restype, c_int)
self.assertEqual(c.inotify_rm_watch.argtypes, [c_int, c_int])
self.assertEqual(c.inotify_rm_watch.restype, c_int)
self.assertEqual(
c.inotify_add_watch.argtypes, [c_int, c_char_p, c_uint32])
self.assertEqual(c.inotify_add_watch.restype, c_int)
def test_failedInit(self):
"""
If C{inotify_init} returns a negative number, L{init} raises
L{INotifyError}.
"""
class libc:
def inotify_init(self):
return -1
self.patch(_inotify, 'libc', libc())
self.assertRaises(INotifyError, init)
def test_failedAddWatch(self):
"""
If C{inotify_add_watch} returns a negative number, L{add}
raises L{INotifyError}.
"""
class libc:
def inotify_add_watch(self, fd, path, mask):
return -1
self.patch(_inotify, 'libc', libc())
self.assertRaises(INotifyError, add, 3, '/foo', 0)
| gpl-3.0 |
qedsoftware/commcare-hq | custom/opm/constants.py | 1 | 1732 | from corehq.apps.fixtures.models import FixtureDataItem
from corehq.util.quickcache import quickcache
DOMAIN = 'opm'
PREG_REG_XMLNS = "http://openrosa.org/formdesigner/D127C457-3E15-4F5E-88C3-98CD1722C625"
VHND_XMLNS = "http://openrosa.org/formdesigner/ff5de10d75afda15cddb3b00a0b1e21d33a50d59"
BIRTH_PREP_XMLNS = "http://openrosa.org/formdesigner/50378991-FEC3-408D-B4A5-A264F3B52184"
DELIVERY_XMLNS = "http://openrosa.org/formdesigner/492F8F0E-EE7D-4B28-B890-7CDA5F137194"
CHILD_FOLLOWUP_XMLNS = "http://openrosa.org/formdesigner/C90C2C1F-3B34-47F3-B3A3-061EAAC1A601"
CFU1_XMLNS = "http://openrosa.org/formdesigner/d642dd328514f2af92c093d414d63e5b2670b9c"
CFU2_XMLNS = "http://openrosa.org/formdesigner/9ef423bba8595a99976f0bc9532617841253a7fa"
CFU3_XMLNS = "http://openrosa.org/formdesigner/f15b9f8fb92e2552b1885897ece257609ed16649"
GROWTH_MONITORING_XMLNS= "http://openrosa.org/formdesigner/F1356F3F-C695-491F-9277-7F9B5522200C"
CLOSE_FORM = "http://openrosa.org/formdesigner/41A1B3E0-C1A4-41EA-AE90-71A328F0D8FD"
CHILDREN_FORMS = [CFU1_XMLNS, CFU2_XMLNS, CFU3_XMLNS, CHILD_FOLLOWUP_XMLNS]
OPM_XMLNSs = [PREG_REG_XMLNS, VHND_XMLNS, BIRTH_PREP_XMLNS, DELIVERY_XMLNS,
CHILD_FOLLOWUP_XMLNS, CFU1_XMLNS, CFU2_XMLNS, CFU3_XMLNS,
GROWTH_MONITORING_XMLNS, CLOSE_FORM]
# TODO Move these to a cached fixtures lookup
MONTH_AMT = 250
TWO_YEAR_AMT = 2000
THREE_YEAR_AMT = 3000
@quickcache([], timeout=30 * 60)
def get_fixture_data():
fixtures = FixtureDataItem.get_indexed_items(DOMAIN, 'condition_amounts', 'condition')
return dict((k, int(fixture['rs_amount'])) for k, fixture in fixtures.items())
class InvalidRow(Exception):
"""
Raise this in the row constructor to skip row
"""
| bsd-3-clause |
roadmapper/ansible | test/units/modules/network/opx/opx_module.py | 52 | 2604 | # (c) 2018 Red Hat Inc.
#
# (c) 2018 Dell Inc. or its subsidiaries. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestOpxModule(ModuleTestCase):
def execute_module(self, failed=False, changed=False,
response=None, msg=None, db=None,
commit_event=None):
self.load_fixtures(response)
if failed:
result = self.failed(msg)
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed, db)
self.assertEqual(result['changed'], changed, result)
return result
def failed(self, msg):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
self.assertEqual(result['msg'], msg, result)
return result
def changed(self, changed=False, db=None):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
print("res" + str(result) + "dv=" + str(db) + "ch=" + str(changed))
self.assertEqual(result['changed'], changed, result)
if db:
self.assertEqual(result['db'], db, result)
return result
def load_fixtures(self, response=None):
pass
| gpl-3.0 |
brijeshkesariya/odoo | addons/account_payment/wizard/account_payment_order.py | 338 | 5906 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
from openerp.tools.translate import _
class payment_order_create(osv.osv_memory):
"""
Create a payment object with lines corresponding to the account move line
to pay according to the date and the mode provided by the user.
Hypothesis:
- Small number of non-reconciled move line, payment mode and bank account type,
- Big number of partner and bank account.
If a type is given, unsuitable account Entry lines are ignored.
"""
_name = 'payment.order.create'
_description = 'payment.order.create'
_columns = {
'duedate': fields.date('Due Date', required=True),
'entries': fields.many2many('account.move.line', 'line_pay_rel', 'pay_id', 'line_id', 'Entries')
}
_defaults = {
'duedate': lambda *a: time.strftime('%Y-%m-%d'),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if not context: context = {}
res = super(payment_order_create, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
if context and 'line_ids' in context:
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='entries']")
for node in nodes:
node.set('domain', '[("id", "in", '+ str(context['line_ids'])+')]')
res['arch'] = etree.tostring(doc)
return res
def create_payment(self, cr, uid, ids, context=None):
order_obj = self.pool.get('payment.order')
line_obj = self.pool.get('account.move.line')
payment_obj = self.pool.get('payment.line')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
line_ids = [entry.id for entry in data.entries]
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
payment = order_obj.browse(cr, uid, context['active_id'], context=context)
t = None
line2bank = line_obj.line2bank(cr, uid, line_ids, t, context)
## Finally populate the current payment with new lines:
for line in line_obj.browse(cr, uid, line_ids, context=context):
if payment.date_prefered == "now":
#no payment date => immediate payment
date_to_pay = False
elif payment.date_prefered == 'due':
date_to_pay = line.date_maturity
elif payment.date_prefered == 'fixed':
date_to_pay = payment.date_scheduled
payment_obj.create(cr, uid,{
'move_line_id': line.id,
'amount_currency': line.amount_residual_currency,
'bank_id': line2bank.get(line.id),
'order_id': payment.id,
'partner_id': line.partner_id and line.partner_id.id or False,
'communication': line.ref or '/',
'state': line.invoice and line.invoice.reference_type != 'none' and 'structured' or 'normal',
'date': date_to_pay,
'currency': (line.invoice and line.invoice.currency_id.id) or line.journal_id.currency.id or line.journal_id.company_id.currency_id.id,
}, context=context)
return {'type': 'ir.actions.act_window_close'}
def search_entries(self, cr, uid, ids, context=None):
line_obj = self.pool.get('account.move.line')
mod_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
search_due_date = data.duedate
# payment = self.pool.get('payment.order').browse(cr, uid, context['active_id'], context=context)
# Search for move line to pay:
domain = [('reconcile_id', '=', False), ('account_id.type', '=', 'payable'), ('credit', '>', 0), ('account_id.reconcile', '=', True)]
domain = domain + ['|', ('date_maturity', '<=', search_due_date), ('date_maturity', '=', False)]
line_ids = line_obj.search(cr, uid, domain, context=context)
context = dict(context, line_ids=line_ids)
model_data_ids = mod_obj.search(cr, uid,[('model', '=', 'ir.ui.view'), ('name', '=', 'view_create_payment_order_lines')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {'name': _('Entry Lines'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'payment.order.create',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
SANBI-SA/tools-iuc | data_managers/data_manager_humann2_database_downloader/data_manager/data_manager_humann2_download.py | 9 | 5204 | #!/usr/bin/env python
#
# Data manager for reference data for the 'humann2' Galaxy tools
import datetime
import json
import optparse
import os
import shutil
import subprocess
import sys
HUMANN2_REFERENCE_DATA = {
"full": "Full",
"DEMO": "Demo",
"uniref50_diamond": "Full UniRef50",
"uniref50_ec_filtered_diamond": "EC-filtered UniRef50",
"uniref50_GO_filtered_rapsearch2": "GO filtered UniRef50 for rapsearch2",
"uniref90_diamond": "Full UniRef90",
"uniref90_ec_filtered_diamond": "EC-filtered UniRef90",
"DEMO_diamond": "Demo"
}
# Utility functions for interacting with Galaxy JSON
def read_input_json(jsonfile):
"""Read the JSON supplied from the data manager tool
Returns a tuple (param_dict,extra_files_path)
'param_dict' is an arbitrary dictionary of parameters
input into the tool; 'extra_files_path' is the path
to a directory where output files must be put for the
receiving data manager to pick them up.
NB the directory pointed to by 'extra_files_path'
doesn't exist initially, it is the job of the script
to create it if necessary.
"""
params = json.loads(open(jsonfile).read())
return (params['param_dict'],
params['output_data'][0]['extra_files_path'])
# Utility functions for creating data table dictionaries
#
# Example usage:
# >>> d = create_data_tables_dict()
# >>> add_data_table(d,'my_data')
# >>> add_data_table_entry(dict(dbkey='hg19',value='human'))
# >>> add_data_table_entry(dict(dbkey='mm9',value='mouse'))
# >>> print str(json.dumps(d))
def create_data_tables_dict():
"""Return a dictionary for storing data table information
Returns a dictionary that can be used with 'add_data_table'
and 'add_data_table_entry' to store information about a
data table. It can be converted to JSON to be sent back to
the data manager.
"""
d = {}
d['data_tables'] = {}
return d
def add_data_table(d, table):
"""Add a data table to the data tables dictionary
Creates a placeholder for a data table called 'table'.
"""
d['data_tables'][table] = []
def add_data_table_entry(d, table, entry):
"""Add an entry to a data table
Appends an entry to the data table 'table'. 'entry'
should be a dictionary where the keys are the names of
columns in the data table.
Raises an exception if the named data table doesn't
exist.
"""
try:
d['data_tables'][table].append(entry)
except KeyError:
raise Exception("add_data_table_entry: no table '%s'" % table)
def download_humann2_db(data_tables, table_name, database, build, target_dir):
"""Download HUMAnN2 database
Creates references to the specified file(s) on the Galaxy
server in the appropriate data table (determined from the
file extension).
The 'data_tables' dictionary should have been created using
the 'create_data_tables_dict' and 'add_data_table' functions.
Arguments:
data_tables: a dictionary containing the data table info
table_name: name of the table
database: database to download (chocophlan or uniref)
build: build of the database to download
target_dir: directory to put copy or link to the data file
"""
value = "%s-%s-%s" % (database, build, datetime.date.today().isoformat())
db_target_dir = os.path.join(target_dir, database)
build_target_dir = os.path.join(db_target_dir, build)
cmd = "humann2_databases --download %s %s %s --update-config no" % (
database,
build,
db_target_dir)
subprocess.check_call(cmd, shell=True)
shutil.move(os.path.join(db_target_dir, database), build_target_dir)
add_data_table_entry(
data_tables,
table_name,
dict(
dbkey=build,
value=value,
name=HUMANN2_REFERENCE_DATA[build],
path=build_target_dir))
if __name__ == "__main__":
print("Starting...")
# Read command line
parser = optparse.OptionParser(description='Download HUMAnN2 database')
parser.add_option('--database', help="Database name")
parser.add_option('--build', help="Build of the database")
options, args = parser.parse_args()
print("args : %s" % args)
# Check for JSON file
if len(args) != 1:
sys.stderr.write("Need to supply JSON file name")
sys.exit(1)
jsonfile = args[0]
# Read the input JSON
params, target_dir = read_input_json(jsonfile)
# Make the target directory
print("Making %s" % target_dir)
os.mkdir(target_dir)
# Set up data tables dictionary
data_tables = create_data_tables_dict()
if options.database == "chocophlan":
table_name = 'humann2_nucleotide_database'
else:
table_name = 'humann2_protein_database'
add_data_table(data_tables, table_name)
# Fetch data from specified data sources
download_humann2_db(
data_tables,
table_name,
options.database,
options.build,
target_dir)
# Write output JSON
print("Outputting JSON")
print(str(json.dumps(data_tables)))
open(jsonfile, 'wb').write(json.dumps(data_tables))
print("Done.")
| mit |
tonioo/modoboa | modoboa/lib/u2u_decode.py | 1 | 2282 | # -*- coding: utf-8 -*-
"""
Unstructured rfc2047 header to unicode.
A stupid (and not accurate) answer to https://bugs.python.org/issue1079.
"""
from __future__ import unicode_literals
import re
from email.header import decode_header, make_header
from email.utils import parseaddr
from django.utils.encoding import smart_text
# check spaces between encoded_words (and strip them)
sre = re.compile(r"\?=[ \t]+=\?")
# re pat for MIME encoded_word (without trailing spaces)
mre = re.compile(r"=\?[^?]*?\?[bq]\?[^?\t]*?\?=", re.I)
# re do detect encoded ASCII characters
ascii_re = re.compile(r"=[\dA-F]{2,3}", re.I)
def clean_spaces(m):
"""Replace unencoded spaces in string.
:param str m: a match object
:return: the cleaned string
"""
return m.group(0).replace(" ", "=20")
def clean_non_printable_char(m):
"""Strip non printable characters."""
code = int(m.group(0)[1:], 16)
if code < 20:
return ""
return m.group(0)
def decode_mime(m):
"""Substitute matching encoded_word with unicode equiv."""
h = decode_header(clean_spaces(m))
try:
u = smart_text(make_header(h))
except (LookupError, UnicodeDecodeError):
return m.group(0)
return u
def clean_header(header):
"""Clean header function."""
header = "".join(header.splitlines())
header = sre.sub("?==?", header)
return ascii_re.sub(clean_non_printable_char, header)
def u2u_decode(s):
"""utility function for (final) decoding of mime header
note: resulting string is in one line (no \n within)
note2: spaces between enc_words are stripped (see RFC2047)
"""
return mre.sub(decode_mime, clean_header(s)).strip(" \r\t\n")
def decode_address(value):
"""Special function for address decoding.
We need a dedicated processing because RFC1342 explicitely says
address MUST NOT contain encoded-word:
These are the ONLY locations where an encoded-word may appear. In
particular, an encoded-word MUST NOT appear in any portion of an
"address". In addition, an encoded-word MUST NOT be used in a
Received header field.
"""
phrase, address = parseaddr(clean_header(value))
if phrase:
phrase = mre.sub(decode_mime, phrase)
return phrase, address
| isc |
klothe/tablib | tablib/packages/yaml/dumper.py | 542 | 2719 |
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
from emitter import *
from serializer import *
from representer import *
from resolver import *
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class Dumper(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| mit |
overdrive3000/skytools | python/skytools/scripting.py | 3 | 36632 |
"""Useful functions and classes for database scripts.
"""
import errno
import logging
import logging.config
import logging.handlers
import optparse
import os
import select
import signal
import sys
import time
import skytools
import skytools.skylog
try:
import skytools.installer_config
default_skylog = skytools.installer_config.skylog
except ImportError:
default_skylog = 0
__pychecker__ = 'no-badexcept'
__all__ = ['BaseScript', 'UsageError', 'daemonize', 'DBScript']
class UsageError(Exception):
"""User induced error."""
#
# daemon mode
#
def daemonize():
"""Turn the process into daemon.
Goes background and disables all i/o.
"""
# launch new process, kill parent
pid = os.fork()
if pid != 0:
os._exit(0)
# start new session
os.setsid()
# stop i/o
fd = os.open("/dev/null", os.O_RDWR)
os.dup2(fd, 0)
os.dup2(fd, 1)
os.dup2(fd, 2)
if fd > 2:
os.close(fd)
#
# Pidfile locking+cleanup & daemonization combined
#
def run_single_process(runnable, daemon, pidfile):
"""Run runnable class, possibly daemonized, locked on pidfile."""
# check if another process is running
if pidfile and os.path.isfile(pidfile):
if skytools.signal_pidfile(pidfile, 0):
print("Pidfile exists, another process running?")
sys.exit(1)
else:
print("Ignoring stale pidfile")
# daemonize if needed
if daemon:
daemonize()
# clean only own pidfile
own_pidfile = False
try:
if pidfile:
data = str(os.getpid())
skytools.write_atomic(pidfile, data)
own_pidfile = True
runnable.run()
finally:
if own_pidfile:
try:
os.remove(pidfile)
except: pass
#
# logging setup
#
_log_config_done = 0
_log_init_done = {}
def _load_log_config(fn, defs):
"""Fixed fileConfig."""
# Work around fileConfig default behaviour to disable
# not only old handlers on load (which slightly makes sense)
# but also old logger objects (which does not make sense).
if sys.hexversion >= 0x2060000:
logging.config.fileConfig(fn, defs, False)
else:
logging.config.fileConfig(fn, defs)
root = logging.getLogger()
for lg in root.manager.loggerDict.values():
lg.disabled = 0
def _init_log(job_name, service_name, cf, log_level, is_daemon):
"""Logging setup happens here."""
global _log_init_done, _log_config_done
got_skylog = 0
use_skylog = cf.getint("use_skylog", default_skylog)
# if non-daemon, avoid skylog if script is running on console.
# set use_skylog=2 to disable.
if not is_daemon and use_skylog == 1:
if os.isatty(sys.stdout.fileno()):
use_skylog = 0
# load logging config if needed
if use_skylog and not _log_config_done:
# python logging.config braindamage:
# cannot specify external classess without such hack
logging.skylog = skytools.skylog
skytools.skylog.set_service_name(service_name, job_name)
# load general config
flist = cf.getlist('skylog_locations',
['skylog.ini', '~/.skylog.ini', '/etc/skylog.ini'])
for fn in flist:
fn = os.path.expanduser(fn)
if os.path.isfile(fn):
defs = {'job_name': job_name, 'service_name': service_name}
_load_log_config(fn, defs)
got_skylog = 1
break
_log_config_done = 1
if not got_skylog:
sys.stderr.write("skylog.ini not found!\n")
sys.exit(1)
# avoid duplicate logging init for job_name
log = logging.getLogger(job_name)
if job_name in _log_init_done:
return log
_log_init_done[job_name] = 1
# tune level on root logger
root = logging.getLogger()
root.setLevel(log_level)
# compatibility: specify ini file in script config
def_fmt = '%(asctime)s %(process)s %(levelname)s %(message)s'
def_datefmt = '' # None
logfile = cf.getfile("logfile", "")
if logfile:
fstr = cf.get('logfmt_file', def_fmt)
fstr_date = cf.get('logdatefmt_file', def_datefmt)
if log_level < logging.INFO:
fstr = cf.get('logfmt_file_verbose', fstr)
fstr_date = cf.get('logdatefmt_file_verbose', fstr_date)
fmt = logging.Formatter(fstr, fstr_date)
size = cf.getint('log_size', 10*1024*1024)
num = cf.getint('log_count', 3)
hdlr = logging.handlers.RotatingFileHandler(
logfile, 'a', size, num)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
# if skylog.ini is disabled or not available, log at least to stderr
if not got_skylog:
fstr = cf.get('logfmt_console', def_fmt)
fstr_date = cf.get('logdatefmt_console', def_datefmt)
if log_level < logging.INFO:
fstr = cf.get('logfmt_console_verbose', fstr)
fstr_date = cf.get('logdatefmt_console_verbose', fstr_date)
hdlr = logging.StreamHandler()
fmt = logging.Formatter(fstr, fstr_date)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
return log
class BaseScript(object):
"""Base class for service scripts.
Handles logging, daemonizing, config, errors.
Config template::
## Parameters for skytools.BaseScript ##
# how many seconds to sleep between work loops
# if missing or 0, then instead sleeping, the script will exit
loop_delay = 1.0
# where to log
logfile = ~/log/%(job_name)s.log
# where to write pidfile
pidfile = ~/pid/%(job_name)s.pid
# per-process name to use in logging
#job_name = %(config_name)s
# whether centralized logging should be used
# search-path [ ./skylog.ini, ~/.skylog.ini, /etc/skylog.ini ]
# 0 - disabled
# 1 - enabled, unless non-daemon on console (os.isatty())
# 2 - always enabled
#use_skylog = 0
# where to find skylog.ini
#skylog_locations = skylog.ini, ~/.skylog.ini, /etc/skylog.ini
# how many seconds to sleep after catching a exception
#exception_sleep = 20
"""
service_name = None
job_name = None
cf = None
cf_defaults = {}
pidfile = None
# >0 - sleep time if work() requests sleep
# 0 - exit if work requests sleep
# <0 - run work() once [same as looping=0]
loop_delay = 1.0
# 0 - run work() once
# 1 - run work() repeatedly
looping = 1
# result from last work() call:
# 1 - there is probably more work, don't sleep
# 0 - no work, sleep before calling again
# -1 - exception was thrown
work_state = 1
# setup logger here, this allows override by subclass
log = logging.getLogger('skytools.BaseScript')
def __init__(self, service_name, args):
"""Script setup.
User class should override work() and optionally __init__(), startup(),
reload(), reset(), shutdown() and init_optparse().
NB: In case of daemon, __init__() and startup()/work()/shutdown() will be
run in different processes. So nothing fancy should be done in __init__().
@param service_name: unique name for script.
It will be also default job_name, if not specified in config.
@param args: cmdline args (sys.argv[1:]), but can be overridden
"""
self.service_name = service_name
self.go_daemon = 0
self.need_reload = 0
self.exception_count = 0
self.stat_dict = {}
self.log_level = logging.INFO
# parse command line
parser = self.init_optparse()
self.options, self.args = parser.parse_args(args)
# check args
if self.options.version:
self.print_version()
sys.exit(0)
if self.options.daemon:
self.go_daemon = 1
if self.options.quiet:
self.log_level = logging.WARNING
if self.options.verbose > 1:
self.log_level = skytools.skylog.TRACE
elif self.options.verbose:
self.log_level = logging.DEBUG
self.cf_override = {}
if self.options.set:
for a in self.options.set:
k, v = a.split('=', 1)
self.cf_override[k.strip()] = v.strip()
if self.options.ini:
self.print_ini()
sys.exit(0)
# read config file
self.reload()
# init logging
_init_log(self.job_name, self.service_name, self.cf, self.log_level, self.go_daemon)
# send signal, if needed
if self.options.cmd == "kill":
self.send_signal(signal.SIGTERM)
elif self.options.cmd == "stop":
self.send_signal(signal.SIGINT)
elif self.options.cmd == "reload":
self.send_signal(signal.SIGHUP)
def print_version(self):
service = self.service_name
if getattr(self, '__version__', None):
service += ' version %s' % self.__version__
print '%s, Skytools version %s' % (service, skytools.__version__)
def print_ini(self):
"""Prints out ini file from doc string of the script of default for dbscript
Used by --ini option on command line.
"""
# current service name
print("[%s]\n" % self.service_name)
# walk class hierarchy
bases = [self.__class__]
while len(bases) > 0:
parents = []
for c in bases:
for p in c.__bases__:
if p not in parents:
parents.append(p)
doc = c.__doc__
if doc:
self._print_ini_frag(doc)
bases = parents
def _print_ini_frag(self, doc):
# use last '::' block as config template
pos = doc and doc.rfind('::\n') or -1
if pos < 0:
return
doc = doc[pos+2 : ].rstrip()
doc = skytools.dedent(doc)
# merge overrided options into output
for ln in doc.splitlines():
vals = ln.split('=', 1)
if len(vals) != 2:
print(ln)
continue
k = vals[0].strip()
v = vals[1].strip()
if k and k[0] == '#':
print(ln)
k = k[1:]
if k in self.cf_override:
print('%s = %s' % (k, self.cf_override[k]))
elif k in self.cf_override:
if v:
print('#' + ln)
print('%s = %s' % (k, self.cf_override[k]))
else:
print(ln)
print('')
def load_config(self):
"""Loads and returns skytools.Config instance.
By default it uses first command-line argument as config
file name. Can be overridden.
"""
if len(self.args) < 1:
print("need config file, use --help for help.")
sys.exit(1)
conf_file = self.args[0]
return skytools.Config(self.service_name, conf_file,
user_defs = self.cf_defaults,
override = self.cf_override)
def init_optparse(self, parser = None):
"""Initialize a OptionParser() instance that will be used to
parse command line arguments.
Note that it can be overridden both directions - either DBScript
will initialize an instance and pass it to user code or user can
initialize and then pass to DBScript.init_optparse().
@param parser: optional OptionParser() instance,
where DBScript should attach its own arguments.
@return: initialized OptionParser() instance.
"""
if parser:
p = parser
else:
p = optparse.OptionParser()
p.set_usage("%prog [options] INI")
# generic options
p.add_option("-q", "--quiet", action="store_true",
help = "log only errors and warnings")
p.add_option("-v", "--verbose", action="count",
help = "log verbosely")
p.add_option("-d", "--daemon", action="store_true",
help = "go background")
p.add_option("-V", "--version", action="store_true",
help = "print version info and exit")
p.add_option("", "--ini", action="store_true",
help = "display sample ini file")
p.add_option("", "--set", action="append",
help = "override config setting (--set 'PARAM=VAL')")
# control options
g = optparse.OptionGroup(p, 'control running process')
g.add_option("-r", "--reload",
action="store_const", const="reload", dest="cmd",
help = "reload config (send SIGHUP)")
g.add_option("-s", "--stop",
action="store_const", const="stop", dest="cmd",
help = "stop program safely (send SIGINT)")
g.add_option("-k", "--kill",
action="store_const", const="kill", dest="cmd",
help = "kill program immediately (send SIGTERM)")
p.add_option_group(g)
return p
def send_signal(self, sig):
if not self.pidfile:
self.log.warning("No pidfile in config, nothing to do")
elif os.path.isfile(self.pidfile):
alive = skytools.signal_pidfile(self.pidfile, sig)
if not alive:
self.log.warning("pidfile exists, but process not running")
else:
self.log.warning("No pidfile, process not running")
sys.exit(0)
def set_single_loop(self, do_single_loop):
"""Changes whether the script will loop or not."""
if do_single_loop:
self.looping = 0
else:
self.looping = 1
def _boot_daemon(self):
run_single_process(self, self.go_daemon, self.pidfile)
def start(self):
"""This will launch main processing thread."""
if self.go_daemon:
if not self.pidfile:
self.log.error("Daemon needs pidfile")
sys.exit(1)
self.run_func_safely(self._boot_daemon)
def stop(self):
"""Safely stops processing loop."""
self.looping = 0
def reload(self):
"Reload config."
# avoid double loading on startup
if not self.cf:
self.cf = self.load_config()
else:
self.cf.reload()
self.log.info ("Config reloaded")
self.job_name = self.cf.get("job_name")
self.pidfile = self.cf.getfile("pidfile", '')
self.loop_delay = self.cf.getfloat("loop_delay", self.loop_delay)
self.exception_sleep = self.cf.getfloat("exception_sleep", 20)
self.exception_quiet = self.cf.getlist("exception_quiet", [])
self.exception_grace = self.cf.getfloat("exception_grace", 5*60)
self.exception_reset = self.cf.getfloat("exception_reset", 15*60)
def hook_sighup(self, sig, frame):
"Internal SIGHUP handler. Minimal code here."
self.need_reload = 1
last_sigint = 0
def hook_sigint(self, sig, frame):
"Internal SIGINT handler. Minimal code here."
self.stop()
t = time.time()
if t - self.last_sigint < 1:
self.log.warning("Double ^C, fast exit")
sys.exit(1)
self.last_sigint = t
def stat_get(self, key):
"""Reads a stat value."""
try:
value = self.stat_dict[key]
except KeyError:
value = None
return value
def stat_put(self, key, value):
"""Sets a stat value."""
self.stat_dict[key] = value
def stat_increase(self, key, increase = 1):
"""Increases a stat value."""
try:
self.stat_dict[key] += increase
except KeyError:
self.stat_dict[key] = increase
def send_stats(self):
"Send statistics to log."
res = []
for k, v in self.stat_dict.items():
res.append("%s: %s" % (k, v))
if len(res) == 0:
return
logmsg = "{%s}" % ", ".join(res)
self.log.info(logmsg)
self.stat_dict = {}
def reset(self):
"Something bad happened, reset all state."
pass
def run(self):
"Thread main loop."
# run startup, safely
self.run_func_safely(self.startup)
while 1:
# reload config, if needed
if self.need_reload:
self.reload()
self.need_reload = 0
# do some work
work = self.run_once()
if not self.looping or self.loop_delay < 0:
break
# remember work state
self.work_state = work
# should sleep?
if not work:
if self.loop_delay > 0:
self.sleep(self.loop_delay)
if not self.looping:
break
else:
break
# run shutdown, safely?
self.shutdown()
def run_once(self):
state = self.run_func_safely(self.work, True)
# send stats that was added
self.send_stats()
return state
last_func_fail = None
def run_func_safely(self, func, prefer_looping = False):
"Run users work function, safely."
try:
r = func()
if self.last_func_fail and time.time() > self.last_func_fail + self.exception_reset:
self.last_func_fail = None
# set exception count to 0 after success
self.exception_count = 0
return r
except UsageError, d:
self.log.error(str(d))
sys.exit(1)
except MemoryError, d:
try: # complex logging may not succeed
self.log.exception("Job %s out of memory, exiting" % self.job_name)
except MemoryError:
self.log.fatal("Out of memory")
sys.exit(1)
except SystemExit, d:
self.send_stats()
if prefer_looping and self.looping and self.loop_delay > 0:
self.log.info("got SystemExit(%s), exiting" % str(d))
self.reset()
raise d
except KeyboardInterrupt, d:
self.send_stats()
if prefer_looping and self.looping and self.loop_delay > 0:
self.log.info("got KeyboardInterrupt, exiting")
self.reset()
sys.exit(1)
except Exception, d:
try: # this may fail too
self.send_stats()
except:
pass
if self.last_func_fail is None:
self.last_func_fail = time.time()
emsg = str(d).rstrip()
self.reset()
self.exception_hook(d, emsg)
# reset and sleep
self.reset()
if prefer_looping and self.looping and self.loop_delay > 0:
# increase exception count & sleep
self.exception_count += 1
self.sleep_on_exception()
return -1
sys.exit(1)
def sleep(self, secs):
"""Make script sleep for some amount of time."""
try:
time.sleep(secs)
except IOError, ex:
if ex.errno != errno.EINTR:
raise
def sleep_on_exception(self):
"""Make script sleep for some amount of time when an exception occurs.
To implement more advance exception sleeping like exponential backoff you
can override this method. Also note that you can use self.exception_count
to track the number of consecutive exceptions.
"""
self.sleep(self.exception_sleep)
def _is_quiet_exception(self, ex):
return ((self.exception_quiet == ["ALL"] or ex.__class__.__name__ in self.exception_quiet)
and self.last_func_fail and time.time() < self.last_func_fail + self.exception_grace)
def exception_hook(self, det, emsg):
"""Called on after exception processing.
Can do additional logging.
@param det: exception details
@param emsg: exception msg
"""
lm = "Job %s crashed: %s" % (self.job_name, emsg)
if self._is_quiet_exception(det):
self.log.warning(lm)
else:
self.log.exception(lm)
def work(self):
"""Here should user's processing happen.
Return value is taken as boolean - if true, the next loop
starts immediately. If false, DBScript sleeps for a loop_delay.
"""
raise Exception("Nothing implemented?")
def startup(self):
"""Will be called just before entering main loop.
In case of daemon, if will be called in same process as work(),
unlike __init__().
"""
self.started = time.time()
# set signals
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, self.hook_sighup)
if hasattr(signal, 'SIGINT'):
signal.signal(signal.SIGINT, self.hook_sigint)
def shutdown(self):
"""Will be called just after exiting main loop.
In case of daemon, if will be called in same process as work(),
unlike __init__().
"""
pass
# define some aliases (short-cuts / backward compatibility cruft)
stat_add = stat_put # Old, deprecated function.
stat_inc = stat_increase
##
## DBScript
##
#: how old connections need to be closed
DEF_CONN_AGE = 20*60 # 20 min
class DBScript(BaseScript):
"""Base class for database scripts.
Handles database connection state.
Config template::
## Parameters for skytools.DBScript ##
# default lifetime for database connections (in seconds)
#connection_lifetime = 1200
"""
def __init__(self, service_name, args):
"""Script setup.
User class should override work() and optionally __init__(), startup(),
reload(), reset() and init_optparse().
NB: in case of daemon, the __init__() and startup()/work() will be
run in different processes. So nothing fancy should be done in __init__().
@param service_name: unique name for script.
It will be also default job_name, if not specified in config.
@param args: cmdline args (sys.argv[1:]), but can be overridden
"""
self.db_cache = {}
self._db_defaults = {}
self._listen_map = {} # dbname: channel_list
BaseScript.__init__(self, service_name, args)
def connection_hook(self, dbname, conn):
pass
def set_database_defaults(self, dbname, **kwargs):
self._db_defaults[dbname] = kwargs
def add_connect_string_profile(self, connstr, profile):
"""Add extra profile info to connect string.
"""
if profile:
extra = self.cf.get("%s_extra_connstr" % profile, '')
if extra:
connstr += ' ' + extra
return connstr
def get_database(self, dbname, autocommit = 0, isolation_level = -1,
cache = None, connstr = None, profile = None):
"""Load cached database connection.
User must not store it permanently somewhere,
as all connections will be invalidated on reset.
"""
max_age = self.cf.getint('connection_lifetime', DEF_CONN_AGE)
if not cache:
cache = dbname
params = {}
defs = self._db_defaults.get(cache, {})
params.update(defs)
if isolation_level >= 0:
params['isolation_level'] = isolation_level
elif autocommit:
params['isolation_level'] = 0
elif params.get('autocommit', 0):
params['isolation_level'] = 0
elif not 'isolation_level' in params:
params['isolation_level'] = skytools.I_READ_COMMITTED
if not 'max_age' in params:
params['max_age'] = max_age
if cache in self.db_cache:
dbc = self.db_cache[cache]
if connstr is None:
connstr = self.cf.get(dbname, '')
if connstr:
connstr = self.add_connect_string_profile(connstr, profile)
dbc.check_connstr(connstr)
else:
if not connstr:
connstr = self.cf.get(dbname)
connstr = self.add_connect_string_profile(connstr, profile)
# connstr might contain password, it is not a good idea to log it
filtered_connstr = connstr
pos = connstr.lower().find('password')
if pos >= 0:
filtered_connstr = connstr[:pos] + ' [...]'
self.log.debug("Connect '%s' to '%s'" % (cache, filtered_connstr))
dbc = DBCachedConn(cache, connstr, params['max_age'], setup_func = self.connection_hook)
self.db_cache[cache] = dbc
clist = []
if cache in self._listen_map:
clist = self._listen_map[cache]
return dbc.get_connection(params['isolation_level'], clist)
def close_database(self, dbname):
"""Explicitly close a cached connection.
Next call to get_database() will reconnect.
"""
if dbname in self.db_cache:
dbc = self.db_cache[dbname]
dbc.reset()
del self.db_cache[dbname]
def reset(self):
"Something bad happened, reset all connections."
for dbc in self.db_cache.values():
dbc.reset()
self.db_cache = {}
BaseScript.reset(self)
def run_once(self):
state = BaseScript.run_once(self)
# reconnect if needed
for dbc in self.db_cache.values():
dbc.refresh()
return state
def exception_hook(self, d, emsg):
"""Log database and query details from exception."""
curs = getattr(d, 'cursor', None)
conn = getattr(curs, 'connection', None)
cname = getattr(conn, 'my_name', None)
if cname:
# Properly named connection
cname = d.cursor.connection.my_name
sql = getattr(curs, 'query', None) or '?'
if len(sql) > 200: # avoid logging londiste huge batched queries
sql = sql[:60] + " ..."
lm = "Job %s got error on connection '%s': %s. Query: %s" % (
self.job_name, cname, emsg, sql)
if self._is_quiet_exception(d):
self.log.warning(lm)
else:
self.log.exception(lm)
else:
BaseScript.exception_hook(self, d, emsg)
def sleep(self, secs):
"""Make script sleep for some amount of time."""
fdlist = []
for dbname in self._listen_map.keys():
if dbname not in self.db_cache:
continue
fd = self.db_cache[dbname].fileno()
if fd is None:
continue
fdlist.append(fd)
if not fdlist:
return BaseScript.sleep(self, secs)
try:
if hasattr(select, 'poll'):
p = select.poll()
for fd in fdlist:
p.register(fd, select.POLLIN)
p.poll(int(secs * 1000))
else:
select.select(fdlist, [], [], secs)
except select.error, d:
self.log.info('wait canceled')
def _exec_cmd(self, curs, sql, args, quiet = False, prefix = None):
"""Internal tool: Run SQL on cursor."""
if self.options.verbose:
self.log.debug("exec_cmd: %s" % skytools.quote_statement(sql, args))
_pfx = ""
if prefix:
_pfx = "[%s] " % prefix
curs.execute(sql, args)
ok = True
rows = curs.fetchall()
for row in rows:
try:
code = row['ret_code']
msg = row['ret_note']
except KeyError:
self.log.error("Query does not conform to exec_cmd API:")
self.log.error("SQL: %s" % skytools.quote_statement(sql, args))
self.log.error("Row: %s" % repr(row.copy()))
sys.exit(1)
level = code / 100
if level == 1:
self.log.debug("%s%d %s" % (_pfx, code, msg))
elif level == 2:
if quiet:
self.log.debug("%s%d %s" % (_pfx, code, msg))
else:
self.log.info("%s%s" % (_pfx, msg,))
elif level == 3:
self.log.warning("%s%s" % (_pfx, msg,))
else:
self.log.error("%s%s" % (_pfx, msg,))
self.log.debug("Query was: %s" % skytools.quote_statement(sql, args))
ok = False
return (ok, rows)
def _exec_cmd_many(self, curs, sql, baseargs, extra_list, quiet = False, prefix=None):
"""Internal tool: Run SQL on cursor multiple times."""
ok = True
rows = []
for a in extra_list:
(tmp_ok, tmp_rows) = self._exec_cmd(curs, sql, baseargs + [a], quiet, prefix)
if not tmp_ok:
ok = False
rows += tmp_rows
return (ok, rows)
def exec_cmd(self, db_or_curs, q, args, commit = True, quiet = False, prefix = None):
"""Run SQL on db with code/value error handling."""
if hasattr(db_or_curs, 'cursor'):
db = db_or_curs
curs = db.cursor()
else:
db = None
curs = db_or_curs
(ok, rows) = self._exec_cmd(curs, q, args, quiet, prefix)
if ok:
if commit and db:
db.commit()
return rows
else:
if db:
db.rollback()
if self.options.verbose:
raise Exception("db error")
# error is already logged
sys.exit(1)
def exec_cmd_many(self, db_or_curs, sql, baseargs, extra_list,
commit = True, quiet = False, prefix = None):
"""Run SQL on db multiple times."""
if hasattr(db_or_curs, 'cursor'):
db = db_or_curs
curs = db.cursor()
else:
db = None
curs = db_or_curs
(ok, rows) = self._exec_cmd_many(curs, sql, baseargs, extra_list, quiet, prefix)
if ok:
if commit and db:
db.commit()
return rows
else:
if db:
db.rollback()
if self.options.verbose:
raise Exception("db error")
# error is already logged
sys.exit(1)
def execute_with_retry (self, dbname, stmt, args, exceptions = None):
""" Execute SQL and retry if it fails.
Return number of retries and current valid cursor, or raise an exception.
"""
sql_retry = self.cf.getbool("sql_retry", False)
sql_retry_max_count = self.cf.getint("sql_retry_max_count", 10)
sql_retry_max_time = self.cf.getint("sql_retry_max_time", 300)
sql_retry_formula_a = self.cf.getint("sql_retry_formula_a", 1)
sql_retry_formula_b = self.cf.getint("sql_retry_formula_b", 5)
sql_retry_formula_cap = self.cf.getint("sql_retry_formula_cap", 60)
elist = exceptions or tuple()
stime = time.time()
tried = 0
dbc = None
while True:
try:
if dbc is None:
if dbname not in self.db_cache:
self.get_database(dbname, autocommit=1)
dbc = self.db_cache[dbname]
if dbc.isolation_level != skytools.I_AUTOCOMMIT:
raise skytools.UsageError ("execute_with_retry: autocommit required")
else:
dbc.reset()
curs = dbc.get_connection(dbc.isolation_level).cursor()
curs.execute (stmt, args)
break
except elist, e:
if not sql_retry or tried >= sql_retry_max_count or time.time() - stime >= sql_retry_max_time:
raise
self.log.info("Job %s got error on connection %s: %s" % (self.job_name, dbname, e))
except:
raise
# y = a + bx , apply cap
y = sql_retry_formula_a + sql_retry_formula_b * tried
if sql_retry_formula_cap is not None and y > sql_retry_formula_cap:
y = sql_retry_formula_cap
tried += 1
self.log.info("Retry #%i in %i seconds ...", tried, y)
self.sleep(y)
return tried, curs
def listen(self, dbname, channel):
"""Make connection listen for specific event channel.
Listening will be activated on next .get_database() call.
Basically this means that DBScript.sleep() will poll for events
on that db connection, so when event appears, script will be
woken up.
"""
if dbname not in self._listen_map:
self._listen_map[dbname] = []
clist = self._listen_map[dbname]
if channel not in clist:
clist.append(channel)
def unlisten(self, dbname, channel='*'):
"""Stop connection for listening on specific event channel.
Listening will stop on next .get_database() call.
"""
if dbname not in self._listen_map:
return
if channel == '*':
del self._listen_map[dbname]
return
clist = self._listen_map[dbname]
try:
clist.remove(channel)
except ValueError:
pass
class DBCachedConn(object):
"""Cache a db connection."""
def __init__(self, name, loc, max_age = DEF_CONN_AGE, verbose = False, setup_func=None, channels=[]):
self.name = name
self.loc = loc
self.conn = None
self.conn_time = 0
self.max_age = max_age
self.isolation_level = -1
self.verbose = verbose
self.setup_func = setup_func
self.listen_channel_list = []
def fileno(self):
if not self.conn:
return None
return self.conn.cursor().fileno()
def get_connection(self, isolation_level = -1, listen_channel_list = []):
# default isolation_level is READ COMMITTED
if isolation_level < 0:
isolation_level = skytools.I_READ_COMMITTED
# new conn?
if not self.conn:
self.isolation_level = isolation_level
self.conn = skytools.connect_database(self.loc)
self.conn.my_name = self.name
self.conn.set_isolation_level(isolation_level)
self.conn_time = time.time()
if self.setup_func:
self.setup_func(self.name, self.conn)
else:
if self.isolation_level != isolation_level:
raise Exception("Conflict in isolation_level")
self._sync_listen(listen_channel_list)
# done
return self.conn
def _sync_listen(self, new_clist):
if not new_clist and not self.listen_channel_list:
return
curs = self.conn.cursor()
for ch in self.listen_channel_list:
if ch not in new_clist:
curs.execute("UNLISTEN %s" % skytools.quote_ident(ch))
for ch in new_clist:
if ch not in self.listen_channel_list:
curs.execute("LISTEN %s" % skytools.quote_ident(ch))
if self.isolation_level != skytools.I_AUTOCOMMIT:
self.conn.commit()
self.listen_channel_list = new_clist[:]
def refresh(self):
if not self.conn:
return
#for row in self.conn.notifies():
# if row[0].lower() == "reload":
# self.reset()
# return
if not self.max_age:
return
if time.time() - self.conn_time >= self.max_age:
self.reset()
def reset(self):
if not self.conn:
return
# drop reference
conn = self.conn
self.conn = None
self.listen_channel_list = []
# close
try:
conn.close()
except: pass
def check_connstr(self, connstr):
"""Drop connection if connect string has changed.
"""
if self.loc != connstr:
self.reset()
| isc |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-1.5/django/contrib/localflavor/it/it_province.py | 110 | 2779 | # -*- coding: utf-8 -*
from __future__ import unicode_literals
PROVINCE_CHOICES = (
('AG', 'Agrigento'),
('AL', 'Alessandria'),
('AN', 'Ancona'),
('AO', 'Aosta'),
('AR', 'Arezzo'),
('AP', 'Ascoli Piceno'),
('AT', 'Asti'),
('AV', 'Avellino'),
('BA', 'Bari'),
('BT', 'Barletta-Andria-Trani'), # active starting from 2009
('BL', 'Belluno'),
('BN', 'Benevento'),
('BG', 'Bergamo'),
('BI', 'Biella'),
('BO', 'Bologna'),
('BZ', 'Bolzano/Bozen'),
('BS', 'Brescia'),
('BR', 'Brindisi'),
('CA', 'Cagliari'),
('CL', 'Caltanissetta'),
('CB', 'Campobasso'),
('CI', 'Carbonia-Iglesias'),
('CE', 'Caserta'),
('CT', 'Catania'),
('CZ', 'Catanzaro'),
('CH', 'Chieti'),
('CO', 'Como'),
('CS', 'Cosenza'),
('CR', 'Cremona'),
('KR', 'Crotone'),
('CN', 'Cuneo'),
('EN', 'Enna'),
('FM', 'Fermo'), # active starting from 2009
('FE', 'Ferrara'),
('FI', 'Firenze'),
('FG', 'Foggia'),
('FC', 'Forlì-Cesena'),
('FR', 'Frosinone'),
('GE', 'Genova'),
('GO', 'Gorizia'),
('GR', 'Grosseto'),
('IM', 'Imperia'),
('IS', 'Isernia'),
('SP', 'La Spezia'),
('AQ', 'L’Aquila'),
('LT', 'Latina'),
('LE', 'Lecce'),
('LC', 'Lecco'),
('LI', 'Livorno'),
('LO', 'Lodi'),
('LU', 'Lucca'),
('MC', 'Macerata'),
('MN', 'Mantova'),
('MS', 'Massa-Carrara'),
('MT', 'Matera'),
('VS', 'Medio Campidano'),
('ME', 'Messina'),
('MI', 'Milano'),
('MO', 'Modena'),
('MB', 'Monza e Brianza'), # active starting from 2009
('NA', 'Napoli'),
('NO', 'Novara'),
('NU', 'Nuoro'),
('OG', 'Ogliastra'),
('OT', 'Olbia-Tempio'),
('OR', 'Oristano'),
('PD', 'Padova'),
('PA', 'Palermo'),
('PR', 'Parma'),
('PV', 'Pavia'),
('PG', 'Perugia'),
('PU', 'Pesaro e Urbino'),
('PE', 'Pescara'),
('PC', 'Piacenza'),
('PI', 'Pisa'),
('PT', 'Pistoia'),
('PN', 'Pordenone'),
('PZ', 'Potenza'),
('PO', 'Prato'),
('RG', 'Ragusa'),
('RA', 'Ravenna'),
('RC', 'Reggio Calabria'),
('RE', 'Reggio Emilia'),
('RI', 'Rieti'),
('RN', 'Rimini'),
('RM', 'Roma'),
('RO', 'Rovigo'),
('SA', 'Salerno'),
('SS', 'Sassari'),
('SV', 'Savona'),
('SI', 'Siena'),
('SR', 'Siracusa'),
('SO', 'Sondrio'),
('TA', 'Taranto'),
('TE', 'Teramo'),
('TR', 'Terni'),
('TO', 'Torino'),
('TP', 'Trapani'),
('TN', 'Trento'),
('TV', 'Treviso'),
('TS', 'Trieste'),
('UD', 'Udine'),
('VA', 'Varese'),
('VE', 'Venezia'),
('VB', 'Verbano Cusio Ossola'),
('VC', 'Vercelli'),
('VR', 'Verona'),
('VV', 'Vibo Valentia'),
('VI', 'Vicenza'),
('VT', 'Viterbo'),
)
| lgpl-3.0 |
AnhellO/DAS_Sistemas | Ago-Dic-2017/Enrique Castillo/Ordinario/test/Lib/site-packages/django/contrib/gis/management/commands/ogrinspect.py | 20 | 5848 | import argparse
from django.contrib.gis import gdal
from django.core.management.base import BaseCommand, CommandError
from django.utils.inspect import get_func_args
class LayerOptionAction(argparse.Action):
"""
Custom argparse action for the `ogrinspect` `layer_key` keyword option
which may be an integer or a string.
"""
def __call__(self, parser, namespace, value, option_string=None):
try:
setattr(namespace, self.dest, int(value))
except ValueError:
setattr(namespace, self.dest, value)
class ListOptionAction(argparse.Action):
"""
Custom argparse action for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
def __call__(self, parser, namespace, value, option_string=None):
if value.lower() == 'true':
setattr(namespace, self.dest, True)
else:
setattr(namespace, self.dest, value.split(','))
class Command(BaseCommand):
help = (
'Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n'
'a GeoDjango model with the given model name. For example:\n'
' ./manage.py ogrinspect zipcode.shp Zipcode'
)
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('data_source', help='Path to the data source.')
parser.add_argument('model_name', help='Name of the model to create.')
parser.add_argument(
'--blank', dest='blank',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `blank=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.',
)
parser.add_argument(
'--decimal', dest='decimal',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR float fields to '
'generate `DecimalField` instead of the default '
'`FloatField`. Set to `true` to apply to all OGR float fields.',
)
parser.add_argument(
'--geom-name', dest='geom_name', default='geom',
help='Specifies the model name for the Geometry Field (defaults to `geom`)'
)
parser.add_argument(
'--layer', dest='layer_key',
action=LayerOptionAction, default=0,
help='The key for specifying which layer in the OGR data '
'source to use. Defaults to 0 (the first layer). May be '
'an integer or a string identifier for the layer.',
)
parser.add_argument(
'--multi-geom', action='store_true', dest='multi_geom',
help='Treat the geometry in the data source as a geometry collection.',
)
parser.add_argument(
'--name-field', dest='name_field',
help='Specifies a field name to return for the __str__() method.',
)
parser.add_argument(
'--no-imports', action='store_false', dest='imports',
help='Do not include `from django.contrib.gis.db import models` statement.',
)
parser.add_argument(
'--null', dest='null', action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `null=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.',
)
parser.add_argument(
'--srid', dest='srid',
help='The SRID to use for the Geometry Field. If it can be '
'determined, the SRID of the data source is used.',
)
parser.add_argument(
'--mapping', action='store_true', dest='mapping',
help='Generate mapping dictionary for use with `LayerMapping`.',
)
def handle(self, *args, **options):
data_source, model_name = options.pop('data_source'), options.pop('model_name')
# Getting the OGR DataSource from the string parameter.
try:
ds = gdal.DataSource(data_source)
except gdal.GDALException as msg:
raise CommandError(msg)
# Returning the output of ogrinspect with the given arguments
# and options.
from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping
# Filter options to params accepted by `_ogrinspect`
ogr_options = {k: v for k, v in options.items()
if k in get_func_args(_ogrinspect) and v is not None}
output = [s for s in _ogrinspect(ds, model_name, **ogr_options)]
if options['mapping']:
# Constructing the keyword arguments for `mapping`, and
# calling it on the data source.
kwargs = {
'geom_name': options['geom_name'],
'layer_key': options['layer_key'],
'multi_geom': options['multi_geom'],
}
mapping_dict = mapping(ds, **kwargs)
# This extra legwork is so that the dictionary definition comes
# out in the same order as the fields in the model definition.
rev_mapping = {v: k for k, v in mapping_dict.items()}
output.extend(['', '', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name,
'%s_mapping = {' % model_name.lower()])
output.extend(" '%s': '%s'," % (
rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields
)
output.extend([" '%s': '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}'])
return '\n'.join(output) + '\n'
| mit |
almeidapaulopt/erpnext | erpnext/accounts/report/share_balance/share_balance.py | 19 | 1475 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
if not filters.get("date"):
frappe.throw(_("Please select date"))
columns = get_columns(filters)
date = filters.get("date")
data = []
if not filters.get("shareholder"):
pass
else:
share_type, no_of_shares, rate, amount = 1, 2, 3, 4
all_shares = get_all_shares(filters.get("shareholder"))
for share_entry in all_shares:
row = False
for datum in data:
if datum[share_type] == share_entry.share_type:
datum[no_of_shares] += share_entry.no_of_shares
datum[amount] += share_entry.amount
if datum[no_of_shares] == 0:
datum[rate] = 0
else:
datum[rate] = datum[amount] / datum[no_of_shares]
row = True
break
# new entry
if not row:
row = [filters.get("shareholder"),
share_entry.share_type, share_entry.no_of_shares, share_entry.rate, share_entry.amount]
data.append(row)
return columns, data
def get_columns(filters):
columns = [
_("Shareholder") + ":Link/Shareholder:150",
_("Share Type") + "::90",
_("No of Shares") + "::90",
_("Average Rate") + ":Currency:90",
_("Amount") + ":Currency:90"
]
return columns
def get_all_shares(shareholder):
return frappe.get_doc('Shareholder', shareholder).share_balance
| gpl-3.0 |
zhuwenping/python-for-android | python-modules/twisted/twisted/internet/test/test_qtreactor.py | 59 | 1108 | # Copyright (c) 2009 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys
from twisted.trial import unittest
from twisted.python.runtime import platform
from twisted.python.util import sibpath
from twisted.internet.utils import getProcessOutputAndValue
skipWindowsNopywin32 = None
if platform.isWindows():
try:
import win32process
except ImportError:
skipWindowsNopywin32 = ("On windows, spawnProcess is not available "
"in the absence of win32process.")
class QtreactorTestCase(unittest.TestCase):
"""
Tests for L{twisted.internet.qtreactor}.
"""
def test_importQtreactor(self):
"""
Attempting to import L{twisted.internet.qtreactor} should raise an
C{ImportError} indicating that C{qtreactor} is no longer a part of
Twisted.
"""
sys.modules["qtreactor"] = None
from twisted.plugins.twisted_qtstub import errorMessage
try:
import twisted.internet.qtreactor
except ImportError, e:
self.assertEquals(str(e), errorMessage)
| apache-2.0 |
justinpotts/mozillians | vendor-local/lib/python/kombu/transport/SQS.py | 10 | 11233 | """
kombu.transport.SQS
===================
Amazon SQS transport.
:copyright: (c) 2010 - 2012 by Ask Solem
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import socket
import string
from Queue import Empty
from anyjson import loads, dumps
from boto import exception
from boto import sdb as _sdb
from boto import sqs as _sqs
from boto.sdb.domain import Domain
from boto.sdb.connection import SDBConnection
from boto.sqs.connection import SQSConnection
from boto.sqs.message import Message
from kombu.exceptions import StdChannelError
from kombu.utils import cached_property, uuid
from kombu.utils.encoding import safe_str
from . import virtual
# dots are replaced by dash, all other punctuation
# replaced by underscore.
CHARS_REPLACE_TABLE = dict((ord(c), 0x5f)
for c in string.punctuation if c not in '-_.')
CHARS_REPLACE_TABLE[0x2e] = 0x2d # '.' -> '-'
class Table(Domain):
"""Amazon SimpleDB domain describing the message routing table."""
# caches queues already bound, so we don't have to declare them again.
_already_bound = set()
def routes_for(self, exchange):
"""Iterator giving all routes for an exchange."""
return self.select("""WHERE exchange = '%s'""" % exchange)
def get_queue(self, queue):
"""Get binding for queue."""
qid = self._get_queue_id(queue)
if qid:
return self.get_item(qid)
def create_binding(self, queue):
"""Get binding item for queue.
Creates the item if it doesn't exist.
"""
item = self.get_queue(queue)
if item:
return item, item["id"]
id = uuid()
return self.new_item(id), id
def queue_bind(self, exchange, routing_key, pattern, queue):
if queue not in self._already_bound:
binding, id = self.create_binding(queue)
binding.update(exchange=exchange,
routing_key=routing_key or "",
pattern=pattern or "",
queue=queue or "",
id=id)
binding.save()
self._already_bound.add(queue)
def queue_delete(self, queue):
"""delete queue by name."""
self._already_bound.discard(queue)
item = self._get_queue_item(queue)
if item:
self.delete_item(item)
def exchange_delete(self, exchange):
"""Delete all routes for `exchange`."""
for item in self.routes_for(exchange):
self.delete_item(item["id"])
def get_item(self, item_name):
"""Uses `consistent_read` by default."""
# Domain is an old-style class, can't use super().
for consistent_read in (False, True):
item = Domain.get_item(self, item_name, consistent_read)
if item:
return item
def select(self, query='', next_token=None, consistent_read=True,
max_items=None):
"""Uses `consistent_read` by default."""
query = """SELECT * FROM `%s` %s""" % (self.name, query)
return Domain.select(self, query, next_token,
consistent_read, max_items)
def _try_first(self, query='', **kwargs):
for c in (False, True):
for item in self.select(query, consistent_read=c, **kwargs):
return item
def get_exchanges(self):
return list(set(i["exchange"] for i in self.select()))
def _get_queue_item(self, queue):
return self._try_first("""WHERE queue = '%s' limit 1""" % queue)
def _get_queue_id(self, queue):
item = self._get_queue_item(queue)
if item:
return item["id"]
class Channel(virtual.Channel):
Table = Table
default_region = "us-east-1"
domain_format = "kombu%(vhost)s"
_sdb = None
_sqs = None
_queue_cache = {}
_noack_queues = set()
def __init__(self, *args, **kwargs):
super(Channel, self).__init__(*args, **kwargs)
# SQS blows up when you try to create a new queue if one already
# exists with a different visibility_timeout, so this prepopulates
# the queue_cache to protect us from recreating
# queues that are known to already exist.
queues = self.sqs.get_all_queues()
for queue in queues:
self._queue_cache[queue.name] = queue
def basic_consume(self, queue, no_ack, *args, **kwargs):
if no_ack:
self._noack_queues.add(queue)
return super(Channel, self).basic_consume(queue, no_ack,
*args, **kwargs)
def basic_cancel(self, consumer_tag):
if consumer_tag in self._consumers:
queue = self._tag_to_queue[consumer_tag]
self._noack_queues.discard(queue)
return super(Channel, self).basic_cancel(consumer_tag)
def entity_name(self, name, table=CHARS_REPLACE_TABLE):
"""Format AMQP queue name into a legal SQS queue name."""
return unicode(safe_str(name)).translate(table)
def _new_queue(self, queue, **kwargs):
"""Ensures a queue exists in SQS."""
queue = self.queue_name_prefix + queue
try:
return self._queue_cache[queue]
except KeyError:
q = self._queue_cache[queue] = self.sqs.create_queue(
self.entity_name(queue),
self.visibility_timeout)
return q
def _queue_bind(self, *args):
"""Bind ``queue`` to ``exchange`` with routing key.
Route will be stored in SDB if so enabled.
"""
if self.supports_fanout:
self.table.queue_bind(*args)
def get_table(self, exchange):
"""Get routing table.
Retrieved from SDB if :attr:`supports_fanout`.
"""
if self.supports_fanout:
return [(r["routing_key"], r["pattern"], r["queue"])
for r in self.table.routes_for(exchange)]
return super(Channel, self).get_table(exchange)
def get_exchanges(self):
if self.supports_fanout:
return self.table.get_exchanges()
return super(Channel, self).get_exchanges()
def _delete(self, queue, *args):
"""delete queue by name."""
self._queue_cache.pop(queue, None)
if self.supports_fanout:
self.table.queue_delete(queue)
super(Channel, self)._delete(queue)
def exchange_delete(self, exchange, **kwargs):
"""Delete exchange by name."""
if self.supports_fanout:
self.table.exchange_delete(exchange)
super(Channel, self).exchange_delete(exchange, **kwargs)
def _has_queue(self, queue, **kwargs):
"""Returns True if ``queue`` has been previously declared."""
if self.supports_fanout:
return bool(self.table.get_queue(queue))
return super(Channel, self)._has_queue(queue)
def _put(self, queue, message, **kwargs):
"""Put message onto queue."""
q = self._new_queue(queue)
m = Message()
m.set_body(dumps(message))
q.write(m)
def _put_fanout(self, exchange, message, **kwargs):
"""Deliver fanout message to all queues in ``exchange``."""
for route in self.table.routes_for(exchange):
self._put(route["queue"], message, **kwargs)
def _get(self, queue):
"""Try to retrieve a single message off ``queue``."""
q = self._new_queue(queue)
rs = q.get_messages(1)
if rs:
m = rs[0]
payload = loads(rs[0].get_body())
if queue in self._noack_queues:
q.delete_message(m)
else:
payload["properties"]["delivery_info"].update({
"sqs_message": m, "sqs_queue": q, })
return payload
raise Empty()
def basic_ack(self, delivery_tag):
delivery_info = self.qos.get(delivery_tag).delivery_info
try:
queue = delivery_info["sqs_queue"]
except KeyError:
pass
else:
queue.delete_message(delivery_info["sqs_message"])
super(Channel, self).basic_ack(delivery_tag)
def _size(self, queue):
"""Returns the number of messages in a queue."""
return self._new_queue(queue).count()
def _purge(self, queue):
"""Deletes all current messages in a queue."""
q = self._new_queue(queue)
# SQS is slow at registering messages, so run for a few
# iterations to ensure messages are deleted.
size = 0
for i in xrange(10):
size += q.count()
if not size:
break
q.clear()
return size
def close(self):
super(Channel, self).close()
for conn in (self._sqs, self._sdb):
if conn:
try:
conn.close()
except AttributeError, exc: # FIXME ???
if "can't set attribute" not in str(exc):
raise
def _get_regioninfo(self, regions):
if self.region:
for _r in regions:
if _r.name == self.region:
return _r
def _aws_connect_to(self, fun, regions):
conninfo = self.conninfo
region = self._get_regioninfo(regions)
return fun(region=region,
aws_access_key_id=conninfo.userid,
aws_secret_access_key=conninfo.password,
port=conninfo.port)
def _next_delivery_tag(self):
return uuid() # See #73
@property
def sqs(self):
if self._sqs is None:
self._sqs = self._aws_connect_to(SQSConnection, _sqs.regions())
return self._sqs
@property
def sdb(self):
if self._sdb is None:
self._sdb = self._aws_connect_to(SDBConnection, _sdb.regions())
return self._sdb
@property
def table(self):
name = self.entity_name(self.domain_format % {
"vhost": self.conninfo.virtual_host})
d = self.sdb.get_object("CreateDomain", {"DomainName": name},
self.Table)
d.name = name
return d
@property
def conninfo(self):
return self.connection.client
@property
def transport_options(self):
return self.connection.client.transport_options
@cached_property
def visibility_timeout(self):
return self.transport_options.get("visibility_timeout")
@cached_property
def queue_name_prefix(self):
return self.transport_options.get("queue_name_prefix", '')
@cached_property
def supports_fanout(self):
return self.transport_options.get("sdb_persistence", False)
@cached_property
def region(self):
return self.transport_options.get("region") or self.default_region
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
default_port = None
connection_errors = (exception.SQSError, socket.error)
channel_errors = (exception.SQSDecodeError, StdChannelError)
| bsd-3-clause |
huangkuan/hack | lib/gcloud/monitoring/test_query.py | 7 | 23503 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
PROJECT = 'my-project'
METRIC_TYPE = 'compute.googleapis.com/instance/uptime'
METRIC_LABELS = {'instance_name': 'instance-1'}
METRIC_LABELS2 = {'instance_name': 'instance-2'}
RESOURCE_TYPE = 'gce_instance'
RESOURCE_LABELS = {
'project_id': 'my-project',
'zone': 'us-east1-a',
'instance_id': '1234567890123456789',
}
RESOURCE_LABELS2 = {
'project_id': 'my-project',
'zone': 'us-east1-b',
'instance_id': '9876543210987654321',
}
METRIC_KIND = 'DELTA'
VALUE_TYPE = 'DOUBLE'
TS0 = '2016-04-06T22:05:00.042Z'
TS1 = '2016-04-06T22:05:01.042Z'
TS2 = '2016-04-06T22:05:02.042Z'
class TestAligner(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.monitoring.query import Aligner
return Aligner
def test_one(self):
self.assertTrue(hasattr(self._getTargetClass(), 'ALIGN_RATE'))
def test_names(self):
for name in self._getTargetClass().__dict__:
if not name.startswith('_'):
self.assertEqual(getattr(self._getTargetClass(), name), name)
class TestReducer(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.monitoring.query import Reducer
return Reducer
def test_one(self):
self.assertTrue(hasattr(self._getTargetClass(),
'REDUCE_PERCENTILE_99'))
def test_names(self):
for name in self._getTargetClass().__dict__:
if not name.startswith('_'):
self.assertEqual(getattr(self._getTargetClass(), name), name)
class TestQuery(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.monitoring.query import Query
return Query
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor_minimal(self):
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client)
self.assertEqual(query._client, client)
self.assertEqual(query._filter.metric_type,
self._getTargetClass().DEFAULT_METRIC_TYPE)
self.assertIsNone(query._start_time)
self.assertIsNone(query._end_time)
self.assertIsNone(query._per_series_aligner)
self.assertIsNone(query._alignment_period_seconds)
self.assertIsNone(query._cross_series_reducer)
self.assertEqual(query._group_by_fields, ())
def test_constructor_maximal(self):
import datetime
T1 = datetime.datetime(2016, 4, 7, 2, 30, 30)
DAYS, HOURS, MINUTES = 1, 2, 3
T0 = T1 - datetime.timedelta(days=DAYS, hours=HOURS, minutes=MINUTES)
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE,
end_time=T1,
days=DAYS, hours=HOURS, minutes=MINUTES)
self.assertEqual(query._client, client)
self.assertEqual(query._filter.metric_type, METRIC_TYPE)
self.assertEqual(query._start_time, T0)
self.assertEqual(query._end_time, T1)
self.assertIsNone(query._per_series_aligner)
self.assertIsNone(query._alignment_period_seconds)
self.assertIsNone(query._cross_series_reducer)
self.assertEqual(query._group_by_fields, ())
def test_constructor_default_end_time(self):
import datetime
from gcloud._testing import _Monkey
from gcloud.monitoring import query as MUT
MINUTES = 5
NOW, T0, T1 = [
datetime.datetime(2016, 4, 7, 2, 30, 30),
datetime.datetime(2016, 4, 7, 2, 25, 0),
datetime.datetime(2016, 4, 7, 2, 30, 0),
]
client = _Client(project=PROJECT, connection=_Connection())
with _Monkey(MUT, _UTCNOW=lambda: NOW):
query = self._makeOne(client, METRIC_TYPE, minutes=MINUTES)
self.assertEqual(query._start_time, T0)
self.assertEqual(query._end_time, T1)
def test_constructor_nonzero_duration_illegal(self):
import datetime
T1 = datetime.datetime(2016, 4, 7, 2, 30, 30)
client = _Client(project=PROJECT, connection=_Connection())
with self.assertRaises(ValueError):
self._makeOne(client, METRIC_TYPE, end_time=T1)
def test_execution_without_interval_illegal(self):
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
with self.assertRaises(ValueError):
list(query)
def test_metric_type(self):
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
self.assertEqual(query.metric_type, METRIC_TYPE)
def test_filter(self):
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
expected = 'metric.type = "{type}"'.format(type=METRIC_TYPE)
self.assertEqual(query.filter, expected)
def test_filter_by_group(self):
GROUP = '1234567'
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
query = query.select_group(GROUP)
expected = (
'metric.type = "{type}"'
' AND group.id = "{group}"'
).format(type=METRIC_TYPE, group=GROUP)
self.assertEqual(query.filter, expected)
def test_filter_by_projects(self):
PROJECT1, PROJECT2 = 'project-1', 'project-2'
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
query = query.select_projects(PROJECT1, PROJECT2)
expected = (
'metric.type = "{type}"'
' AND project = "{project1}" OR project = "{project2}"'
).format(type=METRIC_TYPE, project1=PROJECT1, project2=PROJECT2)
self.assertEqual(query.filter, expected)
def test_filter_by_resources(self):
ZONE_PREFIX = 'europe-'
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
query = query.select_resources(zone_prefix=ZONE_PREFIX)
expected = (
'metric.type = "{type}"'
' AND resource.label.zone = starts_with("{prefix}")'
).format(type=METRIC_TYPE, prefix=ZONE_PREFIX)
self.assertEqual(query.filter, expected)
def test_filter_by_metrics(self):
INSTANCE = 'my-instance'
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
query = query.select_metrics(instance_name=INSTANCE)
expected = (
'metric.type = "{type}"'
' AND metric.label.instance_name = "{instance}"'
).format(type=METRIC_TYPE, instance=INSTANCE)
self.assertEqual(query.filter, expected)
def test_request_parameters_minimal(self):
import datetime
T1 = datetime.datetime(2016, 4, 7, 2, 30, 0)
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
query = query.select_interval(end_time=T1)
actual = list(query._build_query_params())
expected = [
('filter', 'metric.type = "{type}"'.format(type=METRIC_TYPE)),
('interval.endTime', T1.isoformat() + 'Z'),
]
self.assertEqual(actual, expected)
def test_request_parameters_maximal(self):
import datetime
T0 = datetime.datetime(2016, 4, 7, 2, 0, 0)
T1 = datetime.datetime(2016, 4, 7, 2, 30, 0)
ALIGNER = 'ALIGN_DELTA'
MINUTES, SECONDS, PERIOD = 1, 30, '90s'
REDUCER = 'REDUCE_MEAN'
FIELD1, FIELD2 = 'resource.zone', 'metric.instance_name'
PAGE_SIZE = 100
PAGE_TOKEN = 'second-page-please'
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
query = query.select_interval(start_time=T0, end_time=T1)
query = query.align(ALIGNER, minutes=MINUTES, seconds=SECONDS)
query = query.reduce(REDUCER, FIELD1, FIELD2)
actual = list(query._build_query_params(headers_only=True,
page_size=PAGE_SIZE,
page_token=PAGE_TOKEN))
expected = [
('filter', 'metric.type = "{type}"'.format(type=METRIC_TYPE)),
('interval.endTime', T1.isoformat() + 'Z'),
('interval.startTime', T0.isoformat() + 'Z'),
('aggregation.perSeriesAligner', ALIGNER),
('aggregation.alignmentPeriod', PERIOD),
('aggregation.crossSeriesReducer', REDUCER),
('aggregation.groupByFields', FIELD1),
('aggregation.groupByFields', FIELD2),
('view', 'HEADERS'),
('pageSize', PAGE_SIZE),
('pageToken', PAGE_TOKEN),
]
self.assertEqual(actual, expected)
def test_iteration(self):
import datetime
T0 = datetime.datetime(2016, 4, 6, 22, 5, 0)
T1 = datetime.datetime(2016, 4, 6, 22, 10, 0)
INTERVAL1 = {'startTime': TS0, 'endTime': TS1}
INTERVAL2 = {'startTime': TS1, 'endTime': TS2}
VALUE1 = 60 # seconds
VALUE2 = 60.001 # seconds
SERIES1 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
'points': [
{'interval': INTERVAL2, 'value': {'doubleValue': VALUE1}},
{'interval': INTERVAL1, 'value': {'doubleValue': VALUE1}},
],
}
SERIES2 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS2},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS2},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
'points': [
{'interval': INTERVAL2, 'value': {'doubleValue': VALUE2}},
{'interval': INTERVAL1, 'value': {'doubleValue': VALUE2}},
],
}
RESPONSE = {'timeSeries': [SERIES1, SERIES2]}
connection = _Connection(RESPONSE)
client = _Client(project=PROJECT, connection=connection)
query = self._makeOne(client, METRIC_TYPE)
query = query.select_interval(start_time=T0, end_time=T1)
response = list(query)
self.assertEqual(len(response), 2)
series1, series2 = response
self.assertEqual(series1.metric.labels, METRIC_LABELS)
self.assertEqual(series2.metric.labels, METRIC_LABELS2)
self.assertEqual(series1.resource.labels, RESOURCE_LABELS)
self.assertEqual(series2.resource.labels, RESOURCE_LABELS2)
self.assertEqual([p.value for p in series1.points], [VALUE1, VALUE1])
self.assertEqual([p.value for p in series2.points], [VALUE2, VALUE2])
self.assertEqual([p.end_time for p in series1.points], [TS1, TS2])
self.assertEqual([p.end_time for p in series2.points], [TS1, TS2])
expected_request = {
'method': 'GET',
'path': '/projects/{project}/timeSeries/'.format(project=PROJECT),
'query_params': [
('filter', 'metric.type = "{type}"'.format(type=METRIC_TYPE)),
('interval.endTime', T1.isoformat() + 'Z'),
('interval.startTime', T0.isoformat() + 'Z'),
],
}
request, = connection._requested
self.assertEqual(request, expected_request)
def test_iteration_paged(self):
import copy
import datetime
from gcloud.exceptions import NotFound
T0 = datetime.datetime(2016, 4, 6, 22, 5, 0)
T1 = datetime.datetime(2016, 4, 6, 22, 10, 0)
INTERVAL1 = {'startTime': TS0, 'endTime': TS1}
INTERVAL2 = {'startTime': TS1, 'endTime': TS2}
VALUE1 = 60 # seconds
VALUE2 = 60.001 # seconds
SERIES1 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
'points': [
{'interval': INTERVAL2, 'value': {'doubleValue': VALUE1}},
{'interval': INTERVAL1, 'value': {'doubleValue': VALUE1}},
],
}
SERIES2_PART1 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS2},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS2},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
'points': [
{'interval': INTERVAL2, 'value': {'doubleValue': VALUE2}},
],
}
SERIES2_PART2 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS2},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS2},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
'points': [
{'interval': INTERVAL1, 'value': {'doubleValue': VALUE2}},
],
}
TOKEN = 'second-page-please'
RESPONSE1 = {'timeSeries': [SERIES1, SERIES2_PART1],
'nextPageToken': TOKEN}
RESPONSE2 = {'timeSeries': [SERIES2_PART2]}
connection = _Connection(RESPONSE1, RESPONSE2)
client = _Client(project=PROJECT, connection=connection)
query = self._makeOne(client, METRIC_TYPE)
query = query.select_interval(start_time=T0, end_time=T1)
response = list(query)
self.assertEqual(len(response), 2)
series1, series2 = response
self.assertEqual(series1.metric.labels, METRIC_LABELS)
self.assertEqual(series2.metric.labels, METRIC_LABELS2)
self.assertEqual(series1.resource.labels, RESOURCE_LABELS)
self.assertEqual(series2.resource.labels, RESOURCE_LABELS2)
self.assertEqual([p.value for p in series1.points], [VALUE1, VALUE1])
self.assertEqual([p.value for p in series2.points], [VALUE2, VALUE2])
self.assertEqual([p.end_time for p in series1.points], [TS1, TS2])
self.assertEqual([p.end_time for p in series2.points], [TS1, TS2])
expected_request1 = {
'method': 'GET',
'path': '/projects/{project}/timeSeries/'.format(project=PROJECT),
'query_params': [
('filter', 'metric.type = "{type}"'.format(type=METRIC_TYPE)),
('interval.endTime', T1.isoformat() + 'Z'),
('interval.startTime', T0.isoformat() + 'Z'),
],
}
expected_request2 = copy.deepcopy(expected_request1)
expected_request2['query_params'].append(('pageToken', TOKEN))
request1, request2 = connection._requested
self.assertEqual(request1, expected_request1)
self.assertEqual(request2, expected_request2)
with self.assertRaises(NotFound):
list(query)
def test_iteration_empty(self):
import datetime
T0 = datetime.datetime(2016, 4, 6, 22, 5, 0)
T1 = datetime.datetime(2016, 4, 6, 22, 10, 0)
connection = _Connection({})
client = _Client(project=PROJECT, connection=connection)
query = self._makeOne(client, METRIC_TYPE)
query = query.select_interval(start_time=T0, end_time=T1)
response = list(query)
self.assertEqual(len(response), 0)
expected_request = {
'method': 'GET',
'path': '/projects/{project}/timeSeries/'.format(project=PROJECT),
'query_params': [
('filter', 'metric.type = "{type}"'.format(type=METRIC_TYPE)),
('interval.endTime', T1.isoformat() + 'Z'),
('interval.startTime', T0.isoformat() + 'Z'),
],
}
request, = connection._requested
self.assertEqual(request, expected_request)
def test_iteration_headers_only(self):
import datetime
T0 = datetime.datetime(2016, 4, 6, 22, 5, 0)
T1 = datetime.datetime(2016, 4, 6, 22, 10, 0)
SERIES1 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
}
SERIES2 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS2},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS2},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
}
RESPONSE = {'timeSeries': [SERIES1, SERIES2]}
connection = _Connection(RESPONSE)
client = _Client(project=PROJECT, connection=connection)
query = self._makeOne(client, METRIC_TYPE)
query = query.select_interval(start_time=T0, end_time=T1)
response = list(query.iter(headers_only=True))
self.assertEqual(len(response), 2)
series1, series2 = response
self.assertEqual(series1.metric.labels, METRIC_LABELS)
self.assertEqual(series2.metric.labels, METRIC_LABELS2)
self.assertEqual(series1.resource.labels, RESOURCE_LABELS)
self.assertEqual(series2.resource.labels, RESOURCE_LABELS2)
self.assertEqual(series1.points, [])
self.assertEqual(series2.points, [])
expected_request = {
'method': 'GET',
'path': '/projects/{project}/timeSeries/'.format(project=PROJECT),
'query_params': [
('filter', 'metric.type = "{type}"'.format(type=METRIC_TYPE)),
('interval.endTime', T1.isoformat() + 'Z'),
('interval.startTime', T0.isoformat() + 'Z'),
('view', 'HEADERS'),
],
}
request, = connection._requested
self.assertEqual(request, expected_request)
class Test_Filter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.monitoring.query import _Filter
return _Filter
def _makeOne(self, metric_type):
return self._getTargetClass()(metric_type)
def test_minimal(self):
obj = self._makeOne(METRIC_TYPE)
expected = 'metric.type = "{type}"'.format(type=METRIC_TYPE)
self.assertEqual(str(obj), expected)
def test_maximal(self):
obj = self._makeOne(METRIC_TYPE)
obj.group_id = '1234567'
obj.projects = 'project-1', 'project-2'
obj.select_resources(resource_type='some-resource',
resource_label='foo')
obj.select_metrics(metric_label_prefix='bar-')
expected = (
'metric.type = "{type}"'
' AND group.id = "1234567"'
' AND project = "project-1" OR project = "project-2"'
' AND resource.label.resource_label = "foo"'
' AND resource.type = "some-resource"'
' AND metric.label.metric_label = starts_with("bar-")'
).format(type=METRIC_TYPE)
self.assertEqual(str(obj), expected)
class Test__build_label_filter(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.monitoring.query import _build_label_filter
return _build_label_filter(*args, **kwargs)
def test_no_labels(self):
self.assertEqual(self._callFUT('resource'), '')
def test_label_is_none(self):
self.assertEqual(self._callFUT('resource', foo=None), '')
def test_metric_labels(self):
actual = self._callFUT(
'metric',
alpha_prefix='a-',
beta_gamma_suffix='-b',
delta_epsilon='xyz',
)
expected = (
'metric.label.alpha = starts_with("a-")'
' AND metric.label.beta_gamma = ends_with("-b")'
' AND metric.label.delta_epsilon = "xyz"'
)
self.assertEqual(actual, expected)
def test_resource_labels(self):
actual = self._callFUT(
'resource',
alpha_prefix='a-',
beta_gamma_suffix='-b',
delta_epsilon='xyz',
)
expected = (
'resource.label.alpha = starts_with("a-")'
' AND resource.label.beta_gamma = ends_with("-b")'
' AND resource.label.delta_epsilon = "xyz"'
)
self.assertEqual(actual, expected)
def test_raw_label_filters(self):
actual = self._callFUT(
'resource',
'resource.label.alpha = starts_with("a-")',
'resource.label.beta_gamma = ends_with("-b")',
'resource.label.delta_epsilon = "xyz"',
)
expected = (
'resource.label.alpha = starts_with("a-")'
' AND resource.label.beta_gamma = ends_with("-b")'
' AND resource.label.delta_epsilon = "xyz"'
)
self.assertEqual(actual, expected)
def test_resource_type(self):
actual = self._callFUT('resource', resource_type='foo')
expected = 'resource.type = "foo"'
self.assertEqual(actual, expected)
def test_resource_type_prefix(self):
actual = self._callFUT('resource', resource_type_prefix='foo-')
expected = 'resource.type = starts_with("foo-")'
self.assertEqual(actual, expected)
def test_resource_type_suffix(self):
actual = self._callFUT('resource', resource_type_suffix='-foo')
expected = 'resource.type = ends_with("-foo")'
self.assertEqual(actual, expected)
class Test__format_timestamp(unittest2.TestCase):
def _callFUT(self, timestamp):
from gcloud.monitoring.query import _format_timestamp
return _format_timestamp(timestamp)
def test_naive(self):
from datetime import datetime
TIMESTAMP = datetime(2016, 4, 5, 13, 30, 0)
timestamp = self._callFUT(TIMESTAMP)
self.assertEqual(timestamp, '2016-04-05T13:30:00Z')
def test_with_timezone(self):
from datetime import datetime
from gcloud._helpers import UTC
TIMESTAMP = datetime(2016, 4, 5, 13, 30, 0, tzinfo=UTC)
timestamp = self._callFUT(TIMESTAMP)
self.assertEqual(timestamp, '2016-04-05T13:30:00Z')
class _Connection(object):
def __init__(self, *responses):
self._responses = list(responses)
self._requested = []
def api_request(self, **kwargs):
from gcloud.exceptions import NotFound
self._requested.append(kwargs)
try:
return self._responses.pop(0)
except IndexError:
raise NotFound('miss')
class _Client(object):
def __init__(self, project, connection):
self.project = project
self.connection = connection
| apache-2.0 |
bitsauce/Sauce-Engine | 3rdparty/SDL/src/joystick/sort_controllers.py | 84 | 1974 | #!/usr/bin/env python
#
# Script to sort the game controller database entries in SDL_gamecontroller.c
import re
filename = "SDL_gamecontrollerdb.h"
input = open(filename)
output = open(filename + ".new", "w")
parsing_controllers = False
controllers = []
controller_guids = {}
split_pattern = re.compile(r'([^"]*")([^,]*,)([^,]*,)([^"]*)(".*)')
def save_controller(line):
global controllers
match = split_pattern.match(line)
entry = [ match.group(1), match.group(2), match.group(3) ]
bindings = sorted(match.group(4).split(","))
if (bindings[0] == ""):
bindings.pop(0)
entry.extend(",".join(bindings) + ",")
entry.append(match.group(5))
controllers.append(entry)
def write_controllers():
global controllers
global controller_guids
for entry in sorted(controllers, key=lambda entry: entry[2]):
line = "".join(entry) + "\n"
if not line.endswith(",\n") and not line.endswith("*/\n"):
print "Warning: '%s' is missing a comma at the end of the line" % (line)
if (entry[1] in controller_guids):
print "Warning: entry '%s' is duplicate of entry '%s'" % (entry[2], controller_guids[entry[1]][2])
controller_guids[entry[1]] = entry
output.write(line)
controllers = []
controller_guids = {}
for line in input:
if ( parsing_controllers ):
if (line.startswith("{")):
output.write(line)
elif (line.startswith("#endif")):
parsing_controllers = False
write_controllers()
output.write(line)
elif (line.startswith("#")):
print "Parsing " + line.strip()
write_controllers()
output.write(line)
else:
save_controller(line)
else:
if (line.startswith("static const char *s_ControllerMappings")):
parsing_controllers = True
output.write(line)
output.close()
print "Finished writing %s.new" % filename
| lgpl-2.1 |
drax68/graphite-web | webapp/graphite/metrics/urls.py | 5 | 1063 | """Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from django.conf.urls import url
from . import views
urlpatterns = [
url('^index\.json$', views.index_json, name='metrics_index'),
url('^find/?$', views.find_view, name='metrics_find'),
url('^expand/?$', views.expand_view, name='metrics_expand'),
url('^get-metadata/?$', views.get_metadata_view,
name='metrics_get_metadata'),
url('^set-metadata/?$', views.set_metadata_view,
name='metrics_set_metadata'),
url('', views.find_view, name='metrics'),
]
| apache-2.0 |
pku9104038/edx-platform | cms/envs/dev.py | 2 | 6563 | """
This config file runs the simplest dev environment"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .common import *
from logsettings import get_logger_config
DEBUG = True
TEMPLATE_DEBUG = DEBUG
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev",
tracking_filename="tracking.log",
dev_env=True,
debug=True)
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': GITHUB_REPO_ROOT,
'render_template': 'edxmako.shortcuts.render_to_string',
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
'direct': {
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
'split': {
'ENGINE': 'xmodule.modulestore.split_mongo.SplitMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
}
}
# cdodge: This is the specifier for the MongoDB (using GridFS) backed static content store
# This is for static content for courseware, not system static content (e.g. javascript, css, edX branding, etc)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'xcontent',
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "edx.db",
}
}
LMS_BASE = "localhost:8000"
FEATURES['PREVIEW_LMS_BASE'] = "localhost:8000"
REPOS = {
'edx4edx': {
'branch': 'master',
'origin': '[email protected]:MITx/edx4edx.git',
},
'content-mit-6002x': {
'branch': 'master',
# 'origin': '[email protected]:MITx/6002x-fall-2012.git',
'origin': '[email protected]:MITx/content-mit-6002x.git',
},
'6.00x': {
'branch': 'master',
'origin': '[email protected]:MITx/6.00x.git',
},
'7.00x': {
'branch': 'master',
'origin': '[email protected]:MITx/7.00x.git',
},
'3.091x': {
'branch': 'master',
'origin': '[email protected]:MITx/3.091x.git',
},
}
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/mongo_metadata_inheritance',
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'loc_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
},
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
################################ PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
################################# CELERY ######################################
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
# Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and
# Django=1.3.1/1.4 where requests to views get duplicated (your method gets
# hit twice). So you can uncomment when you need to diagnose performance
# problems, but you shouldn't leave it on.
# 'debug_toolbar.panels.profiling.ProfilingDebugPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
# To see stacktraces for MongoDB queries, set this to True.
# Stacktraces slow down page loads drastically (for pages with lots of queries).
DEBUG_TOOLBAR_MONGO_STACKTRACES = False
# disable NPS survey in dev mode
FEATURES['STUDIO_NPS_SURVEY'] = False
# Enable URL that shows information about the status of variuous services
FEATURES['ENABLE_SERVICE_STATUS'] = True
############################# SEGMENT-IO ##################################
# If there's an environment variable set, grab it and turn on Segment.io
# Note that this is the Studio key. There is a separate key for the LMS.
import os
SEGMENT_IO_KEY = os.environ.get('SEGMENT_IO_KEY')
if SEGMENT_IO_KEY:
FEATURES['SEGMENT_IO'] = True
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=F0401
except ImportError:
pass
| agpl-3.0 |
imsplitbit/nova | nova/tests/virt/test_block_device.py | 2 | 18934 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import block_device
from nova.conductor import api as conductor_api
from nova import context
from nova.openstack.common import jsonutils
from nova import test
from nova.tests import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.volume import cinder
class TestDriverBlockDevice(test.NoDBTestCase):
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
'image': driver_block_device.DriverImageBlockDevice
}
swap_bdm = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
snapshot_bdm = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
snapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
snapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
image_bdm = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
image_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
image_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.db_api = self.mox.CreateMock(conductor_api.API)
self.context = context.RequestContext('fake_user',
'fake_project')
def test_driver_block_device_base_class(self):
self.base_class_transform_called = False
class DummyBlockDevice(driver_block_device.DriverBlockDevice):
_fields = set(['foo', 'bar'])
_legacy_fields = set(['foo', 'baz'])
def _transform(inst, bdm):
self.base_class_transform_called = True
dummy_device = DummyBlockDevice({'foo': 'foo_val', 'id': 42})
self.assertTrue(self.base_class_transform_called)
self.assertThat(dummy_device, matchers.DictMatches(
{'foo': None, 'bar': None}))
self.assertEqual(dummy_device.id, 42)
self.assertThat(dummy_device.legacy(), matchers.DictMatches(
{'foo': None, 'baz': None}))
self.assertRaises(driver_block_device._NotTransformable,
DummyBlockDevice, {'no_device': True})
def _test_driver_device(self, name):
test_bdm = self.driver_classes[name](
getattr(self, "%s_bdm" % name))
self.assertThat(test_bdm, matchers.DictMatches(
getattr(self, "%s_driver_bdm" % name)))
self.assertThat(test_bdm.legacy(),
matchers.DictMatches(
getattr(self, "%s_legacy_driver_bdm" % name)))
# Make sure that all others raise _invalidType
for other_name, cls in self.driver_classes.iteritems():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("snapshot")
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
self.assertEqual(test_bdm.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('image')
test_bdm = self.driver_classes['image'](
self.image_bdm)
self.assertEqual(test_bdm.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('image')
bdm = self.image_bdm.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'], bdm)
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume = {'id': 'fake-volume-id-1'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {}}
expected_conn_info = {'data': {},
'serial': 'fake-volume-id-1'}
self.volume_api.get(self.context,
'fake-volume-id-1').AndReturn(volume)
self.volume_api.check_attach(self.context, volume,
instance=instance).AndReturn(None)
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
elevated_context, volume['id'],
connector).AndReturn(connection_info)
self.volume_api.attach(elevated_context, 'fake-volume-id-1',
'fake_uuid', '/dev/sda1').AndReturn(None)
self.db_api.block_device_mapping_update(elevated_context, 3,
{'connection_info': jsonutils.dumps(expected_conn_info)})
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver, self.db_api)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_refresh_connection(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {}}
expected_conn_info = {'data': {},
'serial': 'fake-volume-id-2'}
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
self.db_api.block_device_mapping_update(self.context, 4,
{'connection_info': jsonutils.dumps(expected_conn_info)})
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver,
self.db_api)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.snapshot_bdm.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
snapshot = {'id': 'fake-snapshot-id-1'}
volume = {'id': 'fake-volume-id-2'}
wait_func = self.mox.CreateMockAnything()
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3,
'', '', snapshot).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
self.db_api.block_device_mapping_update(
self.context, 4, {'volume_id': 'fake-volume-id-2'}).AndReturn(None)
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
self.mox.StubOutWithMock(self.db_api,
'block_device_mapping_update')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.image_bdm.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](no_volume_image)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2'}
wait_func = self.mox.CreateMockAnything()
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
self.volume_api.create(self.context, 1,
'', '', image_id=image['id']).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
self.db_api.block_device_mapping_update(
self.context, 5, {'volume_id': 'fake-volume-id-2'}).AndReturn(None)
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_volume(self):
test_bdm = self.driver_classes['image'](
self.image_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
self.mox.StubOutWithMock(self.db_api,
'block_device_mapping_update')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_convert_block_devices(self):
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'],
[self.volume_bdm, self.ephemeral_bdm])
self.assertEqual(converted, [self.volume_driver_bdm])
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['snapshot'](
self.snapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
self.snapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in xrange(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in xrange(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertEqual(no_swap, driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.snapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.image_bdm.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(local_image))
| apache-2.0 |
eerwitt/tensorflow | tensorflow/contrib/ndlstm/python/__init__.py | 135 | 1103 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Init file, giving convenient access to all ndlstm ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,g-importing-member
from tensorflow.contrib.ndlstm.python.lstm1d import *
from tensorflow.contrib.ndlstm.python.lstm2d import *
from tensorflow.contrib.ndlstm.python.misc import *
# pylint: enable=wildcard-import
| apache-2.0 |
tudarmstadt-lt/topicrawler | lt.lm/src/main/py/mr_ngram_count.py | 1 | 1297 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test:
cat data | map | sort | reduce
cat data | ./x.py -m | sort | ./x.py -r
hadoop jar /opt/cloudera/parcels/CDH/lib/hadoop-mapreduce/hadoop-streaming.jar \
-files x.py \
-mapper 'x.py -m' \
-reducer 'x.py -r' \
-input in \
-output out
@author: stevo
"""
from __future__ import print_function
from __future__ import division
import itertools as it
import sys
def readlines():
with sys.stdin as f:
for line in f:
if line.strip():
yield line
def mapper(lines):
for line in lines:
print('{}'.format(line.rstrip()))
def line2tuple(lines):
for line in lines:
splits = line.rstrip().split('\t')
yield splits
def reducer(lines, mincount=1):
for key, values in it.groupby(lines, lambda line : line.rstrip()):
num = reduce(lambda x, y: x + 1, values, 0)
if num >= mincount:
print('{}\t{}'.format(key, num))
if len(sys.argv) < 2:
raise Exception('specify mapper (-m) or reducer (-r) function')
t = sys.argv[1]
mincount = int(sys.argv[2]) if len(sys.argv) > 2 else 1
if '-m' == t:
mapper(readlines());
elif '-r' == t:
reducer(readlines(), mincount);
else:
raise Exception('specify mapper (-m) or reducer (-r) function') | apache-2.0 |
Brocade-OpenSource/OpenStack-DNRM-Nova | nova/api/openstack/compute/contrib/migrations.py | 3 | 2622 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
XMLNS = "http://docs.openstack.org/compute/ext/migrations/api/v2.0"
ALIAS = "os-migrations"
def authorize(context, action_name):
action = 'migrations:%s' % action_name
extensions.extension_authorizer('compute', action)(context)
class MigrationsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('migrations')
elem = xmlutil.SubTemplateElement(root, 'migration',
selector='migrations')
elem.set('id')
elem.set('source_node')
elem.set('dest_node')
elem.set('source_compute')
elem.set('dest_compute')
elem.set('dest_host')
elem.set('status')
elem.set('instance_uuid')
elem.set('old_instance_type_id')
elem.set('new_instance_type_id')
elem.set('created_at')
elem.set('updated_at')
return xmlutil.MasterTemplate(root, 1)
class MigrationsController(object):
"""Controller for accessing migrations in OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
@wsgi.serializers(xml=MigrationsTemplate)
def index(self, req):
"""Return all migrations in progress."""
context = req.environ['nova.context']
authorize(context, "index")
migrations = self.compute_api.get_migrations(context, req.GET)
return {'migrations': migrations}
class Migrations(extensions.ExtensionDescriptor):
"""Provide data on migrations."""
name = "Migrations"
alias = ALIAS
namespace = XMLNS
updated = "2013-05-30T00:00:00+00:00"
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('os-migrations',
MigrationsController())
resources.append(resource)
return resources
| apache-2.0 |
nirmeshk/oh-mainline | vendor/packages/python-social-auth/social/tests/backends/test_linkedin.py | 92 | 1050 | import json
from social.p3 import urlencode
from social.tests.backends.oauth import OAuth1Test, OAuth2Test
class BaseLinkedinTest(object):
user_data_url = 'https://api.linkedin.com/v1/people/~:' \
'(first-name,id,last-name)'
expected_username = 'FooBar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'lastName': 'Bar',
'id': '1010101010',
'firstName': 'Foo'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
class LinkedinOAuth1Test(BaseLinkedinTest, OAuth1Test):
backend_path = 'social.backends.linkedin.LinkedinOAuth'
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
class LinkedinOAuth2Test(BaseLinkedinTest, OAuth2Test):
backend_path = 'social.backends.linkedin.LinkedinOAuth2'
| agpl-3.0 |
ArcaniteSolutions/truffe2 | truffe2/truffe/management/commands/import_ndfs.py | 2 | 4367 | # -*- coding: utf-8 -*-
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand, CommandError
from django.utils.timezone import now
from accounting_core.models import CostCenter, AccountingYear, Account
from accounting_tools.models import ExpenseClaim, ExpenseClaimFile, ExpenseClaimLine, ExpenseClaimLogging, LinkedInfo
from app.ldaputils import get_attrs_of_sciper
from users.models import TruffeUser
import json
import os
import sys
class Command(BaseCommand):
""" Requirements : files in /media/uploads/_generic/ExpenseClaim/"""
help = 'Import notes de frais'
def handle(self, *args, **options):
data = json.loads(sys.stdin.read())
root_user = TruffeUser.objects.get(username=179189)
expenseclaim_ct = ContentType.objects.get(app_label="accounting_tools", model="expenseclaim")
status_mapping = {'1': '0_draft', '2': '2_agep_validable', '3': '4_archived'}
for ndf_data in data['data']:
try:
ay = AccountingYear.objects.get(name=ndf_data['accounting_year__name'])
except:
print u"AccountingYear not found !!", ndf_data['accounting_year__name']
ay = None
if ay:
try:
costcenter = CostCenter.objects.get(account_number=ndf_data['costcenter__account_number'], accounting_year=ay)
except:
print u"CostCenter not found !!", ndf_data['costcenter__account_number']
costcenter = None
if costcenter:
try:
user = TruffeUser.objects.get(username=ndf_data['creator_username'])
except TruffeUser.DoesNotExist:
print "Creation of user {!r}".format(ndf_data['creator_username'])
user = TruffeUser(username=ndf_data['creator_username'], is_active=True)
user.last_name, user.first_name, user.email = get_attrs_of_sciper(ndf_data['creator_username'])
user.save()
except Exception as e:
print "user is root_user", e
user = root_user
ndf, created = ExpenseClaim.objects.get_or_create(costcenter=costcenter, accounting_year=ay, user=user, status=status_mapping[ndf_data['status']],
comment=ndf_data['commentaire'], name=ndf_data['name'], nb_proofs=ndf_data['nb_just'])
if created:
ExpenseClaimLogging(who=user, what='imported', object=ndf).save()
print "+ {!r}".format(ndf.name)
if ndf_data['linked_info']:
linked, created = LinkedInfo.objects.get_or_create(object_id=ndf.pk, content_type=expenseclaim_ct, user_pk=user.pk, **ndf_data['linked_info'])
if created:
print " (I) {!r} {!r}".format(linked.first_name, linked.last_name)
for line_data in ndf_data['lines']:
account = Account.objects.get(account_number=line_data['account__account_number'], accounting_year=ay)
__, created = ExpenseClaimLine.objects.get_or_create(expense_claim=ndf, label=line_data['name'], account=account, proof=line_data['just'],
order=line_data['order'], value=line_data['amount'], value_ttc=line_data['amount'], tva=0)
if created:
print " (+) {!r}".format(line_data['name'])
for file_data in ndf_data['uploads']:
if not os.path.isfile(os.path.join('media', 'uploads', '_generic', 'ExpenseClaim', file_data.split('/')[-1])):
print " (!) Missing file {}".format(file_data)
else:
__, created = ExpenseClaimFile.objects.get_or_create(uploader=user, object=ndf, file=os.path.join('uploads', '_generic', 'ExpenseClaim', file_data.split('/')[-1]), defaults={'upload_date': now()})
if created:
print " (L) {!r}".format(file_data)
| bsd-2-clause |
pcsforeducation/incrowd | incrowd/notify/utils.py | 2 | 1296 | from __future__ import unicode_literals
import logging
from notify.models import Notification
logger = logging.getLogger(__name__)
def ping_filter(message, users, sending_user, notify_text, notify_type,
notify_id=None):
for user in users:
if username_in_message(message, user.username):
# Create notification
if user == sending_user:
continue
note = Notification(
text='{} {}: {}'.format(
sending_user.username, notify_text, message),
user=user,
from_user=sending_user,
type=notify_type,
identifier=notify_id)
note.save()
logger.info("Created notification for user {} from {}"
.format(note.user, note.from_user))
return message
def username_in_message(message, username):
message = message.lower()
username = username.lower()
# Check if @username in message. Edge case for username at the end of
# the message.
if '@' + username + ' ' in message.lower():
return True
try:
return (message.index('@' + username) ==
len(message.lower()) - len('@' + username))
except ValueError:
return False
| apache-2.0 |
Ziqi-Li/bknqgis | bokeh/bokeh/sphinxext/example_handler.py | 1 | 2905 | import sys
from ..application.handlers.code_runner import CodeRunner
from ..application.handlers.handler import Handler
from ..io import set_curdoc, curdoc
class ExampleHandler(Handler):
""" A stripped-down handler similar to CodeHandler but that does
some appropriate monkeypatching to
"""
_output_funcs = ['output_notebook', 'output_file', 'reset_output']
_io_funcs = ['show', 'save']
def __init__(self, source, filename):
super(ExampleHandler, self).__init__(self)
self._runner = CodeRunner(source, filename, [])
def modify_document(self, doc):
if self.failed:
return
module = self._runner.new_module()
sys.modules[module.__name__] = module
doc._modules.append(module)
old_doc = curdoc()
set_curdoc(doc)
old_io, old_doc = self._monkeypatch()
try:
self._runner.run(module, lambda: None)
finally:
self._unmonkeypatch(old_io, old_doc)
set_curdoc(old_doc)
def _monkeypatch(self):
def _pass(*args, **kw): pass
def _add_root(obj, *args, **kw):
from bokeh.io import curdoc
curdoc().add_root(obj)
def _curdoc(*args, **kw):
return curdoc()
# these functions are transitively imported from io into plotting,
# so we have to patch them all. Assumption is that no other patching
# has occurred, i.e. we can just save the funcs being patched once,
# from io, and use those as the originals to replace everywhere
import bokeh.io as io
import bokeh.plotting as p
mods = [io, p]
# TODO (bev) restore when bkcharts package is ready (but remove at 1.0 release)
# import bkcharts as c
# mods.append(c)
old_io = {}
for f in self._output_funcs + self._io_funcs:
old_io[f] = getattr(io, f)
for mod in mods:
for f in self._output_funcs:
setattr(mod, f, _pass)
for f in self._io_funcs:
setattr(mod, f, _add_root)
import bokeh.document as d
old_doc = d.Document
d.Document = _curdoc
return old_io, old_doc
def _unmonkeypatch(self, old_io, old_doc):
import bokeh.io as io
import bokeh.plotting as p
mods = [io, p]
# TODO (bev) restore when bkcharts package is ready (but remove at 1.0 release)
# import bkcharts as c
# mods.append(c)
for mod in mods:
for f in old_io:
setattr(mod, f, old_io[f])
import bokeh.document as d
d.Document = old_doc
@property
def failed(self):
return self._runner.failed
@property
def error(self):
return self._runner.error
@property
def error_detail(self):
return self._runner.error_detail
| gpl-2.0 |
BurningNetel/ctf-manager | CTFmanager/tests/views/event/test_event.py | 1 | 6138 | import json
from django.core.urlresolvers import reverse
from CTFmanager.tests.views.base import ViewTestCase
class EventPageAJAXJoinEventTest(ViewTestCase):
""" Tests that a user can join an event
A user should be able to join upcoming events.
And get a response without the page reloading
"""
def get_valid_event_join_post(self):
event = self.create_event()
response = self.client.post(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
return _json, event
def test_POST_returns_expected_json_on_valid_post(self):
_json, event = self.get_valid_event_join_post()
self.assertEqual(200, _json['status_code'])
def test_POST_gives_correct_user_count(self):
_json, event = self.get_valid_event_join_post()
self.assertEqual(1, _json['members'])
def test_logout_POST_gives_401_and_negative(self):
self.client.logout()
_json, event = self.get_valid_event_join_post()
self.assertEqual(-1, _json['members'])
self.assertEqual(401, _json['status_code'])
def test_duplicate_POST_gives_304_and_negative(self):
_json, event = self.get_valid_event_join_post()
response = self.client.post(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(-1, _json['members'])
self.assertEqual(304, _json['status_code'])
def test_valid_DELETE_gives_valid_json(self):
event = self.create_event_join_user()
response = self.client.delete(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(200, _json['status_code'])
self.assertEqual(0, _json['members'])
def test_duplicate_DELETE_gives_304_and_negative(self):
event = self.create_event_join_user()
self.client.delete(reverse('event_join', args=[event.name]))
response = self.client.delete(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(304, _json['status_code'])
self.assertEqual(-1, _json['members'])
def test_logout_then_DELTE_gives_401_and_negative(self):
event = self.create_event_join_user()
self.client.logout()
response = self.client.delete(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(401, _json['status_code'])
self.assertEqual(-1, _json['members'])
def create_event_join_user(self):
event = self.create_event()
event.join(self.user)
return event
class EventPageTest(ViewTestCase):
def test_events_page_requires_authentication(self):
self.client.logout()
response = self.client.get(reverse('events'))
self.assertRedirects(response, reverse('login') + '?next=' + reverse('events'))
def test_events_page_renders_events_template(self):
response = self.client.get(reverse('events'))
self.assertTemplateUsed(response, 'event/events.html')
def test_events_page_contains_new_event_button(self):
response = self.client.get(reverse('events'))
expected = 'id="btn_add_event" href="/events/new/">Add Event</a>'
self.assertContains(response, expected)
def test_events_page_displays_only_upcoming_events(self):
event_future = self.create_event("hatCTF", True)
event_past = self.create_event("RuCTF_2015", False)
response = self.client.get(reverse('events'))
_event = response.context['events']
self.assertEqual(_event[0], event_future)
self.assertEqual(len(_event), 1)
self.assertNotEqual(_event[0], event_past)
def test_events_page_has_correct_headers(self):
response = self.client.get(reverse('events'))
expected = 'Upcoming Events'
expected2 = 'Archive'
self.assertContains(response, expected)
self.assertContains(response, expected2)
def test_empty_events_set_shows_correct_message(self):
response = self.client.get(reverse('events'))
expected = 'No upcoming events!'
self.assertContains(response, expected)
def test_events_page_display_archive(self):
event_past = self.create_event('past_event', False)
response = self.client.get(reverse('events'))
archive = response.context['archive']
self.assertContains(response, '<table id="table_archive"')
self.assertContains(response, event_past.name)
self.assertEqual(archive[0], event_past)
def test_events_page_displays_error_message_when_nothing_in_archive(self):
response = self.client.get(reverse('events'))
archive = response.context['archive']
self.assertEqual(len(archive), 0)
self.assertContains(response, 'No past events!')
def test_event_page_displays_event_members_count(self):
event = self.create_event()
response = self.client.get(reverse('events'))
self.assertContains(response, '0 Participating')
event.members.add(self.user)
event.save()
response = self.client.get(reverse('events'))
self.assertContains(response, '1 Participating')
def test_event_page_displays_correct_button_text(self):
event = self.create_event()
response = self.client.get(reverse('events'))
self.assertContains(response, 'Join</button>')
event.join(self.user)
response = self.client.get(reverse('events'))
self.assertContains(response, 'Leave</button>')
def test_event_page_shows_username_in_popup(self):
event = self.create_event()
response = self.client.get(reverse('events'))
self.assertContains(response, self.user.username, 1)
self.assertContains(response, 'Nobody has joined yet!')
event.join(self.user)
response = self.client.get(reverse('events'))
self.assertContains(response, self.user.username, 2)
self.assertNotContains(response, 'Nobody has joined yet!') | gpl-3.0 |
crchemist/scioncc | src/pyon/core/test/test_thread.py | 2 | 3210 | #!/usr/bin/env python
__author__ = 'Adam R. Smith'
from pyon.core.thread import PyonThreadManager, PyonThread
from pyon.core.exception import ContainerError
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.unit_test import PyonTestCase
from unittest import SkipTest
from nose.plugins.attrib import attr
import time
@attr('UNIT', group='process')
class ProcessTest(PyonTestCase):
def setUp(self):
self.counter = 0
def increment(self, amount=1):
self.counter += amount
def test_proc(self):
self.counter = 0
proc = PyonThread(self.increment, 2)
proc.start()
self.assertEqual(self.counter, 0)
time.sleep(0.2)
proc.join()
self.assertGreaterEqual(self.counter, 2)
def test_supervisor(self):
self.counter = 0
sup = PyonThreadManager()
sup.start()
proc = sup.spawn(self.increment, amount=2)
self.assertEqual(self.counter, 0)
time.sleep(0.2)
sup.join_children()
self.assertGreaterEqual(self.counter, 2)
def test_supervisor_shutdown(self):
""" Test shutdown joining/forcing with timeouts. """
sup = PyonThreadManager()
sup.start()
import gevent
#Note: commented MM 7/2015. time.sleep seems not monkey-patched on Ubuntu?
#self.assertIs(time.sleep, gevent.hub.sleep)
# Test that it takes at least the given timeout to join_children, but not much more
proc_sleep_secs, proc_count = 0.01, 5
[sup.spawn(time.sleep, seconds=proc_sleep_secs) for i in xrange(5)]
elapsed = sup.shutdown(2*proc_sleep_secs)
# MM, 1/12: Ok, I loosened the timing boundaries. Do the tests still work?
# Enabled 0.2s of slack for all tests
self.assertLess(elapsed - proc_sleep_secs, 0.2)
# this could be trouble
self.assertLess(elapsed, 0.2 + proc_sleep_secs*3)
# Test that a small timeout forcibly shuts down without waiting
wait_secs = 0.0001
[sup.spawn(time.sleep, seconds=proc_sleep_secs) for i in xrange(5)]
elapsed = sup.shutdown(wait_secs)
self.assertLess(elapsed - wait_secs, 0.2)
# this could be trouble too
self.assertLess(elapsed, 0.2 + proc_sleep_secs)
# Test that no timeout waits until all finished
[sup.spawn(time.sleep, seconds=proc_sleep_secs) for i in xrange(5)]
elapsed = sup.shutdown()
self.assertLess(elapsed - proc_sleep_secs, 0.2)
def test_ensure_ready(self):
# GreenProcess by default will signal ready immediately, but we can still pass it through to make sure it's ok
sup = PyonThreadManager()
sup.start()
proc = sup.spawn(self.increment, amount=5)
sup.ensure_ready(proc)
self.assertEqual(self.counter, 5)
def test_ensure_ready_failed_proc(self):
# yes the error we print is intentional and annoying, sorry
def failboat():
self.increment(5, 1) # too many params, will fail
sup = PyonThreadManager()
sup.start()
proc = sup.spawn(failboat)
self.assertRaises(ContainerError, sup.ensure_ready, proc)
| bsd-2-clause |
sriprasanna/django-1.3.1 | django/utils/unittest/result.py | 570 | 6105 | """Test result object"""
import sys
import traceback
import unittest
from StringIO import StringIO
from django.utils.unittest import util
from django.utils.unittest.compatibility import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(unittest.TestResult):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_moduleSetUpFailed = False
def __init__(self):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = StringIO()
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
self._mirrorOutput = False
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return (len(self.failures) + len(self.errors) == 0)
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return "<%s run=%i errors=%i failures=%i>" % \
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures))
| bsd-3-clause |
sodafree/backend | build/lib.linux-i686-2.7/django/template/loaders/eggs.py | 103 | 1038 | # Wrapper for loading templates from eggs via pkg_resources.resource_string.
try:
from pkg_resources import resource_string
except ImportError:
resource_string = None
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.conf import settings
class Loader(BaseLoader):
is_usable = resource_string is not None
def load_template_source(self, template_name, template_dirs=None):
"""
Loads templates from Python eggs via pkg_resource.resource_string.
For every installed app, it tries to get the resource (app, template_name).
"""
if resource_string is not None:
pkg_name = 'templates/' + template_name
for app in settings.INSTALLED_APPS:
try:
return (resource_string(app, pkg_name).decode(settings.FILE_CHARSET), 'egg:%s:%s' % (app, pkg_name))
except:
pass
raise TemplateDoesNotExist(template_name)
_loader = Loader()
| bsd-3-clause |
Allow2CEO/browser-ios | brave/node_modules/bloom-filter-cpp/vendor/depot_tools/third_party/gsutil/pkg_util.py | 51 | 2026 | #!/usr/bin/env python
# Utilities to facilitate maintaining one master list of package contents
# in MANIFEST.in and allow us to import that list into various packaging
# tools (e.g. rpmbuid and setup.py).
# Define the file in which we maintain package contents. Rather than
# hard-coding our package contents, to ease maintenance we read the
# manifest file to obtain the list of files and directories to include.
MANIFEST_IN = 'MANIFEST.in'
# Define input and output files for customizing the rpm package spec.
SPEC_IN = 'gsutil.spec.in'
SPEC_OUT = 'gsutil.spec'
# Root of rpmbuild tree for file enumeration in gsutil.spec file.
RPM_ROOT = '%{_datadir}/%{name}/'
def parse_manifest(files, dirs):
'''Parse contents of manifest file and append results to passed lists
of files and directories.
'''
f = open(MANIFEST_IN, 'r')
for line in f:
line = line.strip()
# Skip empty or comment lines.
if (len(line) <= 0) or (line[0] == '#'):
continue
tokens = line.split()
if len(tokens) >= 0:
if tokens[0] == 'include':
files.extend(tokens[1:])
elif tokens[0] == 'recursive-include' and tokens[2] == '*':
dirs.append(tokens[1])
else:
err = 'Unsupported type ' + tokens[0] + ' in ' + MANIFEST_IN + ' file.'
raise Exception(err)
f.close()
# When executed as a separate script, create a dynamically generated rpm
# spec file. Otherwise, when loaded as a module by another script, no
# specific actions are taken, other than making utility functions available
# to the loading script.
if __name__ == '__main__':
# Running as main so generate a new rpm spec file.
files = []
dirs = []
parse_manifest(files, dirs)
fin = open(SPEC_IN, 'r')
fout = open(SPEC_OUT, 'w')
for line in fin:
if line.strip() == '###FILES_GO_HERE###':
for file in files:
fout.write(RPM_ROOT + file + '\n')
for dir in dirs:
fout.write(RPM_ROOT + dir + '/\n')
else:
fout.write(line)
fout.close()
fin.close()
| mpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.