repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
leppa/home-assistant | homeassistant/components/push/camera.py | 3 | 5616 | """Camera platform that receives images through HTTP POST."""
import asyncio
from collections import deque
from datetime import timedelta
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.camera import (
PLATFORM_SCHEMA,
STATE_IDLE,
STATE_RECORDING,
Camera,
)
from homeassistant.components.camera.const import DOMAIN
from homeassistant.const import CONF_NAME, CONF_TIMEOUT, CONF_WEBHOOK_ID
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import async_track_point_in_utc_time
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_BUFFER_SIZE = "buffer"
CONF_IMAGE_FIELD = "field"
DEFAULT_NAME = "Push Camera"
ATTR_FILENAME = "filename"
ATTR_LAST_TRIP = "last_trip"
PUSH_CAMERA_DATA = "push_camera"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_BUFFER_SIZE, default=1): cv.positive_int,
vol.Optional(CONF_TIMEOUT, default=timedelta(seconds=5)): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_IMAGE_FIELD, default="image"): cv.string,
vol.Required(CONF_WEBHOOK_ID): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Push Camera platform."""
if PUSH_CAMERA_DATA not in hass.data:
hass.data[PUSH_CAMERA_DATA] = {}
webhook_id = config.get(CONF_WEBHOOK_ID)
cameras = [
PushCamera(
hass,
config[CONF_NAME],
config[CONF_BUFFER_SIZE],
config[CONF_TIMEOUT],
config[CONF_IMAGE_FIELD],
webhook_id,
)
]
async_add_entities(cameras)
async def handle_webhook(hass, webhook_id, request):
"""Handle incoming webhook POST with image files."""
try:
with async_timeout.timeout(5):
data = dict(await request.post())
except (asyncio.TimeoutError, aiohttp.web.HTTPException) as error:
_LOGGER.error("Could not get information from POST <%s>", error)
return
camera = hass.data[PUSH_CAMERA_DATA][webhook_id]
if camera.image_field not in data:
_LOGGER.warning("Webhook call without POST parameter <%s>", camera.image_field)
return
await camera.update_image(
data[camera.image_field].file.read(), data[camera.image_field].filename
)
class PushCamera(Camera):
"""The representation of a Push camera."""
def __init__(self, hass, name, buffer_size, timeout, image_field, webhook_id):
"""Initialize push camera component."""
super().__init__()
self._name = name
self._last_trip = None
self._filename = None
self._expired_listener = None
self._state = STATE_IDLE
self._timeout = timeout
self.queue = deque([], buffer_size)
self._current_image = None
self._image_field = image_field
self.webhook_id = webhook_id
self.webhook_url = hass.components.webhook.async_generate_url(webhook_id)
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[PUSH_CAMERA_DATA][self.webhook_id] = self
try:
self.hass.components.webhook.async_register(
DOMAIN, self.name, self.webhook_id, handle_webhook
)
except ValueError:
_LOGGER.error(
"In <%s>, webhook_id <%s> already used", self.name, self.webhook_id
)
@property
def image_field(self):
"""HTTP field containing the image file."""
return self._image_field
@property
def state(self):
"""Return current state of the camera."""
return self._state
async def update_image(self, image, filename):
"""Update the camera image."""
if self._state == STATE_IDLE:
self._state = STATE_RECORDING
self._last_trip = dt_util.utcnow()
self.queue.clear()
self._filename = filename
self.queue.appendleft(image)
@callback
def reset_state(now):
"""Set state to idle after no new images for a period of time."""
self._state = STATE_IDLE
self._expired_listener = None
_LOGGER.debug("Reset state")
self.async_schedule_update_ha_state()
if self._expired_listener:
self._expired_listener()
self._expired_listener = async_track_point_in_utc_time(
self.hass, reset_state, dt_util.utcnow() + self._timeout
)
self.async_schedule_update_ha_state()
async def async_camera_image(self):
"""Return a still image response."""
if self.queue:
if self._state == STATE_IDLE:
self.queue.rotate(1)
self._current_image = self.queue[0]
return self._current_image
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def motion_detection_enabled(self):
"""Camera Motion Detection Status."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
name: value
for name, value in (
(ATTR_LAST_TRIP, self._last_trip),
(ATTR_FILENAME, self._filename),
)
if value is not None
}
| apache-2.0 |
hmgaudecker/econ-project-templates | docs/bld/example/r/r_example/.mywaflib/waflib/extras/parallel_debug.py | 16 | 11991 | #! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2007-2010 (ita)
"""
Debugging helper for parallel compilation, outputs
a file named pdebug.svg in the source directory::
def options(opt):
opt.load('parallel_debug')
def build(bld):
...
"""
import re, sys, threading, time, traceback
try:
from Queue import Queue
except:
from queue import Queue
from waflib import Runner, Options, Task, Logs, Errors
SVG_TEMPLATE = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.0"
x="${project.x}" y="${project.y}" width="${project.width}" height="${project.height}" id="svg602" xml:space="preserve">
<style type='text/css' media='screen'>
g.over rect { stroke:#FF0000; fill-opacity:0.4 }
</style>
<script type='text/javascript'><![CDATA[
var svg = document.getElementsByTagName('svg')[0];
svg.addEventListener('mouseover', function(e) {
var g = e.target.parentNode;
var x = document.getElementById('r_' + g.id);
if (x) {
g.setAttribute('class', g.getAttribute('class') + ' over');
x.setAttribute('class', x.getAttribute('class') + ' over');
showInfo(e, g.id, e.target.attributes.tooltip.value);
}
}, false);
svg.addEventListener('mouseout', function(e) {
var g = e.target.parentNode;
var x = document.getElementById('r_' + g.id);
if (x) {
g.setAttribute('class', g.getAttribute('class').replace(' over', ''));
x.setAttribute('class', x.getAttribute('class').replace(' over', ''));
hideInfo(e);
}
}, false);
function showInfo(evt, txt, details) {
${if project.tooltip}
tooltip = document.getElementById('tooltip');
var t = document.getElementById('tooltiptext');
t.firstChild.data = txt + " " + details;
var x = evt.clientX + 9;
if (x > 250) { x -= t.getComputedTextLength() + 16; }
var y = evt.clientY + 20;
tooltip.setAttribute("transform", "translate(" + x + "," + y + ")");
tooltip.setAttributeNS(null, "visibility", "visible");
var r = document.getElementById('tooltiprect');
r.setAttribute('width', t.getComputedTextLength() + 6);
${endif}
}
function hideInfo(evt) {
var tooltip = document.getElementById('tooltip');
tooltip.setAttributeNS(null,"visibility","hidden");
}
]]></script>
<!-- inkscape requires a big rectangle or it will not export the pictures properly -->
<rect
x='${project.x}' y='${project.y}' width='${project.width}' height='${project.height}'
style="font-size:10;fill:#ffffff;fill-opacity:0.01;fill-rule:evenodd;stroke:#ffffff;"></rect>
${if project.title}
<text x="${project.title_x}" y="${project.title_y}"
style="font-size:15px; text-anchor:middle; font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans">${project.title}</text>
${endif}
${for cls in project.groups}
<g id='${cls.classname}'>
${for rect in cls.rects}
<rect x='${rect.x}' y='${rect.y}' width='${rect.width}' height='${rect.height}' tooltip='${rect.name}' style="font-size:10;fill:${rect.color};fill-rule:evenodd;stroke:#000000;stroke-width:0.4;" />
${endfor}
</g>
${endfor}
${for info in project.infos}
<g id='r_${info.classname}'>
<rect x='${info.x}' y='${info.y}' width='${info.width}' height='${info.height}' style="font-size:10;fill:${info.color};fill-rule:evenodd;stroke:#000000;stroke-width:0.4;" />
<text x="${info.text_x}" y="${info.text_y}"
style="font-size:12px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
>${info.text}</text>
</g>
${endfor}
${if project.tooltip}
<g transform="translate(0,0)" visibility="hidden" id="tooltip">
<rect id="tooltiprect" y="-15" x="-3" width="1" height="20" style="stroke:black;fill:#edefc2;stroke-width:1"/>
<text id="tooltiptext" style="font-family:Arial; font-size:12;fill:black;"> </text>
</g>
${endif}
</svg>
"""
COMPILE_TEMPLATE = '''def f(project):
lst = []
def xml_escape(value):
return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">")
%s
return ''.join(lst)
'''
reg_act = re.compile(r"(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<code>[^}]*?)\})", re.M)
def compile_template(line):
extr = []
def repl(match):
g = match.group
if g('dollar'):
return "$"
elif g('backslash'):
return "\\"
elif g('subst'):
extr.append(g('code'))
return "<<|@|>>"
return None
line2 = reg_act.sub(repl, line)
params = line2.split('<<|@|>>')
assert(extr)
indent = 0
buf = []
app = buf.append
def app(txt):
buf.append(indent * '\t' + txt)
for x in range(len(extr)):
if params[x]:
app("lst.append(%r)" % params[x])
f = extr[x]
if f.startswith(('if', 'for')):
app(f + ':')
indent += 1
elif f.startswith('py:'):
app(f[3:])
elif f.startswith(('endif', 'endfor')):
indent -= 1
elif f.startswith(('else', 'elif')):
indent -= 1
app(f + ':')
indent += 1
elif f.startswith('xml:'):
app('lst.append(xml_escape(%s))' % f[4:])
else:
#app('lst.append((%s) or "cannot find %s")' % (f, f))
app('lst.append(str(%s))' % f)
if extr:
if params[-1]:
app("lst.append(%r)" % params[-1])
fun = COMPILE_TEMPLATE % "\n\t".join(buf)
# uncomment the following to debug the template
#for i, x in enumerate(fun.splitlines()):
# print i, x
return Task.funex(fun)
# red #ff4d4d
# green #4da74d
# lila #a751ff
color2code = {
'GREEN' : '#4da74d',
'YELLOW' : '#fefe44',
'PINK' : '#a751ff',
'RED' : '#cc1d1d',
'BLUE' : '#6687bb',
'CYAN' : '#34e2e2',
}
mp = {}
info = [] # list of (text,color)
def map_to_color(name):
if name in mp:
return mp[name]
try:
cls = Task.classes[name]
except KeyError:
return color2code['RED']
if cls.color in mp:
return mp[cls.color]
if cls.color in color2code:
return color2code[cls.color]
return color2code['RED']
def process(self):
m = self.generator.bld.producer
try:
# TODO another place for this?
del self.generator.bld.task_sigs[self.uid()]
except KeyError:
pass
self.generator.bld.producer.set_running(1, self)
try:
ret = self.run()
except Exception:
self.err_msg = traceback.format_exc()
self.hasrun = Task.EXCEPTION
# TODO cleanup
m.error_handler(self)
return
if ret:
self.err_code = ret
self.hasrun = Task.CRASHED
else:
try:
self.post_run()
except Errors.WafError:
pass
except Exception:
self.err_msg = traceback.format_exc()
self.hasrun = Task.EXCEPTION
else:
self.hasrun = Task.SUCCESS
if self.hasrun != Task.SUCCESS:
m.error_handler(self)
self.generator.bld.producer.set_running(-1, self)
Task.Task.process_back = Task.Task.process
Task.Task.process = process
old_start = Runner.Parallel.start
def do_start(self):
try:
Options.options.dband
except AttributeError:
self.bld.fatal('use def options(opt): opt.load("parallel_debug")!')
self.taskinfo = Queue()
old_start(self)
if self.dirty:
make_picture(self)
Runner.Parallel.start = do_start
lock_running = threading.Lock()
def set_running(self, by, tsk):
with lock_running:
try:
cache = self.lock_cache
except AttributeError:
cache = self.lock_cache = {}
i = 0
if by > 0:
vals = cache.values()
for i in range(self.numjobs):
if i not in vals:
cache[tsk] = i
break
else:
i = cache[tsk]
del cache[tsk]
self.taskinfo.put( (i, id(tsk), time.time(), tsk.__class__.__name__, self.processed, self.count, by, ",".join(map(str, tsk.outputs))) )
Runner.Parallel.set_running = set_running
def name2class(name):
return name.replace(' ', '_').replace('.', '_')
def make_picture(producer):
# first, cast the parameters
if not hasattr(producer.bld, 'path'):
return
tmp = []
try:
while True:
tup = producer.taskinfo.get(False)
tmp.append(list(tup))
except:
pass
try:
ini = float(tmp[0][2])
except:
return
if not info:
seen = []
for x in tmp:
name = x[3]
if not name in seen:
seen.append(name)
else:
continue
info.append((name, map_to_color(name)))
info.sort(key=lambda x: x[0])
thread_count = 0
acc = []
for x in tmp:
thread_count += x[6]
acc.append("%d %d %f %r %d %d %d %s" % (x[0], x[1], x[2] - ini, x[3], x[4], x[5], thread_count, x[7]))
data_node = producer.bld.path.make_node('pdebug.dat')
data_node.write('\n'.join(acc))
tmp = [lst[:2] + [float(lst[2]) - ini] + lst[3:] for lst in tmp]
st = {}
for l in tmp:
if not l[0] in st:
st[l[0]] = len(st.keys())
tmp = [ [st[lst[0]]] + lst[1:] for lst in tmp ]
THREAD_AMOUNT = len(st.keys())
st = {}
for l in tmp:
if not l[1] in st:
st[l[1]] = len(st.keys())
tmp = [ [lst[0]] + [st[lst[1]]] + lst[2:] for lst in tmp ]
BAND = Options.options.dband
seen = {}
acc = []
for x in range(len(tmp)):
line = tmp[x]
id = line[1]
if id in seen:
continue
seen[id] = True
begin = line[2]
thread_id = line[0]
for y in range(x + 1, len(tmp)):
line = tmp[y]
if line[1] == id:
end = line[2]
#print id, thread_id, begin, end
#acc.append( ( 10*thread_id, 10*(thread_id+1), 10*begin, 10*end ) )
acc.append( (BAND * begin, BAND*thread_id, BAND*end - BAND*begin, BAND, line[3], line[7]) )
break
if Options.options.dmaxtime < 0.1:
gwidth = 1
for x in tmp:
m = BAND * x[2]
if m > gwidth:
gwidth = m
else:
gwidth = BAND * Options.options.dmaxtime
ratio = float(Options.options.dwidth) / gwidth
gwidth = Options.options.dwidth
gheight = BAND * (THREAD_AMOUNT + len(info) + 1.5)
# simple data model for our template
class tobject(object):
pass
model = tobject()
model.x = 0
model.y = 0
model.width = gwidth + 4
model.height = gheight + 4
model.tooltip = not Options.options.dnotooltip
model.title = Options.options.dtitle
model.title_x = gwidth / 2
model.title_y = gheight + - 5
groups = {}
for (x, y, w, h, clsname, name) in acc:
try:
groups[clsname].append((x, y, w, h, name))
except:
groups[clsname] = [(x, y, w, h, name)]
# groups of rectangles (else js highlighting is slow)
model.groups = []
for cls in groups:
g = tobject()
model.groups.append(g)
g.classname = name2class(cls)
g.rects = []
for (x, y, w, h, name) in groups[cls]:
r = tobject()
g.rects.append(r)
r.x = 2 + x * ratio
r.y = 2 + y
r.width = w * ratio
r.height = h
r.name = name
r.color = map_to_color(cls)
cnt = THREAD_AMOUNT
# caption
model.infos = []
for (text, color) in info:
inf = tobject()
model.infos.append(inf)
inf.classname = name2class(text)
inf.x = 2 + BAND
inf.y = 5 + (cnt + 0.5) * BAND
inf.width = BAND/2
inf.height = BAND/2
inf.color = color
inf.text = text
inf.text_x = 2 + 2 * BAND
inf.text_y = 5 + (cnt + 0.5) * BAND + 10
cnt += 1
# write the file...
template1 = compile_template(SVG_TEMPLATE)
txt = template1(model)
node = producer.bld.path.make_node('pdebug.svg')
node.write(txt)
Logs.warn('Created the diagram %r', node)
def options(opt):
opt.add_option('--dtitle', action='store', default='Parallel build representation for %r' % ' '.join(sys.argv),
help='title for the svg diagram', dest='dtitle')
opt.add_option('--dwidth', action='store', type='int', help='diagram width', default=800, dest='dwidth')
opt.add_option('--dtime', action='store', type='float', help='recording interval in seconds', default=0.009, dest='dtime')
opt.add_option('--dband', action='store', type='int', help='band width', default=22, dest='dband')
opt.add_option('--dmaxtime', action='store', type='float', help='maximum time, for drawing fair comparisons', default=0, dest='dmaxtime')
opt.add_option('--dnotooltip', action='store_true', help='disable tooltips', default=False, dest='dnotooltip')
| bsd-3-clause |
qedi-r/home-assistant | homeassistant/components/brunt/cover.py | 1 | 5303 | """Support for Brunt Blind Engine covers."""
import logging
from brunt import BruntAPI
import voluptuous as vol
from homeassistant.components.cover import (
ATTR_POSITION,
PLATFORM_SCHEMA,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
CoverDevice,
)
from homeassistant.const import ATTR_ATTRIBUTION, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
COVER_FEATURES = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
DEVICE_CLASS = "window"
ATTR_REQUEST_POSITION = "request_position"
NOTIFICATION_ID = "brunt_notification"
NOTIFICATION_TITLE = "Brunt Cover Setup"
ATTRIBUTION = "Based on an unofficial Brunt SDK."
CLOSED_POSITION = 0
OPEN_POSITION = 100
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the brunt platform."""
# pylint: disable=no-name-in-module
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
bapi = BruntAPI(username=username, password=password)
try:
things = bapi.getThings()["things"]
if not things:
_LOGGER.error("No things present in account.")
else:
add_entities(
[
BruntDevice(bapi, thing["NAME"], thing["thingUri"])
for thing in things
],
True,
)
except (TypeError, KeyError, NameError, ValueError) as ex:
_LOGGER.error("%s", ex)
hass.components.persistent_notification.create(
"Error: {}<br />"
"You will need to restart hass after fixing."
"".format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
class BruntDevice(CoverDevice):
"""
Representation of a Brunt cover device.
Contains the common logic for all Brunt devices.
"""
def __init__(self, bapi, name, thing_uri):
"""Init the Brunt device."""
self._bapi = bapi
self._name = name
self._thing_uri = thing_uri
self._state = {}
self._available = None
@property
def name(self):
"""Return the name of the device as reported by tellcore."""
return self._name
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._available
@property
def current_cover_position(self):
"""
Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
pos = self._state.get("currentPosition")
return int(pos) if pos else None
@property
def request_cover_position(self):
"""
Return request position of cover.
The request position is the position of the last request
to Brunt, at times there is a diff of 1 to current
None is unknown, 0 is closed, 100 is fully open.
"""
pos = self._state.get("requestPosition")
return int(pos) if pos else None
@property
def move_state(self):
"""
Return current moving state of cover.
None is unknown, 0 when stopped, 1 when opening, 2 when closing
"""
mov = self._state.get("moveState")
return int(mov) if mov else None
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self.move_state == 1
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self.move_state == 2
@property
def device_state_attributes(self):
"""Return the detailed device state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_REQUEST_POSITION: self.request_cover_position,
}
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS
@property
def supported_features(self):
"""Flag supported features."""
return COVER_FEATURES
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return self.current_cover_position == CLOSED_POSITION
def update(self):
"""Poll the current state of the device."""
try:
self._state = self._bapi.getState(thingUri=self._thing_uri).get("thing")
self._available = True
except (TypeError, KeyError, NameError, ValueError) as ex:
_LOGGER.error("%s", ex)
self._available = False
def open_cover(self, **kwargs):
"""Set the cover to the open position."""
self._bapi.changeRequestPosition(OPEN_POSITION, thingUri=self._thing_uri)
def close_cover(self, **kwargs):
"""Set the cover to the closed position."""
self._bapi.changeRequestPosition(CLOSED_POSITION, thingUri=self._thing_uri)
def set_cover_position(self, **kwargs):
"""Set the cover to a specific position."""
self._bapi.changeRequestPosition(
kwargs[ATTR_POSITION], thingUri=self._thing_uri
)
| apache-2.0 |
adamchainz/ansible | lib/ansible/modules/monitoring/boundary_meter.py | 66 | 8559 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to add boundary meters.
(c) 2013, curtis <[email protected]>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: boundary_meter
short_description: Manage boundary meters
description:
- This module manages boundary meters
version_added: "1.3"
author: "curtis (@ccollicutt)"
requirements:
- Boundary API access
- bprobe is required to send data, but not to register a meter
options:
name:
description:
- meter name
required: true
state:
description:
- Whether to create or remove the client from boundary
required: false
default: true
choices: ["present", "absent"]
apiid:
description:
- Organizations boundary API ID
required: true
apikey:
description:
- Organizations boundary API KEY
required: true
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
notes:
- This module does not yet support boundary tags.
'''
EXAMPLES='''
- name: Create meter
boundary_meter:
apiid: AAAAAA
apikey: BBBBBB
state: present
name: '{{ inventory_hostname }}'
- name: Delete meter
boundary_meter:
apiid: AAAAAA
apikey: BBBBBB
state: absent
name: '{{ inventory_hostname }}'
'''
import base64
import os
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
api_host = "api.boundary.com"
config_directory = "/etc/bprobe"
# "resource" like thing or apikey?
def auth_encode(apikey):
auth = base64.standard_b64encode(apikey)
auth.replace("\n", "")
return auth
def build_url(name, apiid, action, meter_id=None, cert_type=None):
if action == "create":
return 'https://%s/%s/meters' % (api_host, apiid)
elif action == "search":
return "https://%s/%s/meters?name=%s" % (api_host, apiid, name)
elif action == "certificates":
return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type)
elif action == "tags":
return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id)
elif action == "delete":
return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id)
def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None):
if meter_id is None:
url = build_url(name, apiid, action)
else:
if cert_type is None:
url = build_url(name, apiid, action, meter_id)
else:
url = build_url(name, apiid, action, meter_id, cert_type)
headers = dict()
headers["Authorization"] = "Basic %s" % auth_encode(apikey)
headers["Content-Type"] = "application/json"
return fetch_url(module, url, data=data, headers=headers)
def create_meter(module, name, apiid, apikey):
meters = search_meter(module, name, apiid, apikey)
if len(meters) > 0:
# If the meter already exists, do nothing
module.exit_json(status="Meter " + name + " already exists",changed=False)
else:
# If it doesn't exist, create it
body = '{"name":"' + name + '"}'
response, info = http_request(module, name, apiid, apikey, data=body, action="create")
if info['status'] != 200:
module.fail_json(msg="Failed to connect to api host to create meter")
# If the config directory doesn't exist, create it
if not os.path.exists(config_directory):
try:
os.makedirs(config_directory)
except:
module.fail_json(msg="Could not create " + config_directory)
# Download both cert files from the api host
types = ['key', 'cert']
for cert_type in types:
try:
# If we can't open the file it's not there, so we should download it
cert_file = open('%s/%s.pem' % (config_directory,cert_type))
except IOError:
# Now download the file...
rc = download_request(module, name, apiid, apikey, cert_type)
if rc is False:
module.fail_json(msg="Download request for " + cert_type + ".pem failed")
return 0, "Meter " + name + " created"
def search_meter(module, name, apiid, apikey):
response, info = http_request(module, name, apiid, apikey, action="search")
if info['status'] != 200:
module.fail_json(msg="Failed to connect to api host to search for meter")
# Return meters
return json.loads(response.read())
def get_meter_id(module, name, apiid, apikey):
# In order to delete the meter we need its id
meters = search_meter(module, name, apiid, apikey)
if len(meters) > 0:
return meters[0]['id']
else:
return None
def delete_meter(module, name, apiid, apikey):
meter_id = get_meter_id(module, name, apiid, apikey)
if meter_id is None:
return 1, "Meter does not exist, so can't delete it"
else:
response, info = http_request(module, name, apiid, apikey, action, meter_id)
if info['status'] != 200:
module.fail_json(msg="Failed to delete meter")
# Each new meter gets a new key.pem and ca.pem file, so they should be deleted
types = ['cert', 'key']
for cert_type in types:
try:
cert_file = '%s/%s.pem' % (config_directory,cert_type)
os.remove(cert_file)
except OSError:
module.fail_json(msg="Failed to remove " + cert_type + ".pem file")
return 0, "Meter " + name + " deleted"
def download_request(module, name, apiid, apikey, cert_type):
meter_id = get_meter_id(module, name, apiid, apikey)
if meter_id is not None:
action = "certificates"
response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type)
if info['status'] != 200:
module.fail_json(msg="Failed to connect to api host to download certificate")
if result:
try:
cert_file_path = '%s/%s.pem' % (config_directory,cert_type)
body = response.read()
cert_file = open(cert_file_path, 'w')
cert_file.write(body)
cert_file.close()
os.chmod(cert_file_path, int('0600', 8))
except:
module.fail_json(msg="Could not write to certificate file")
return True
else:
module.fail_json(msg="Could not get meter id")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=False),
apikey=dict(required=True),
apiid=dict(required=True),
validate_certs = dict(default='yes', type='bool'),
)
)
state = module.params['state']
name= module.params['name']
apikey = module.params['api_key']
apiid = module.params['api_id']
if state == "present":
(rc, result) = create_meter(module, name, apiid, apikey)
if state == "absent":
(rc, result) = delete_meter(module, name, apiid, apikey)
if rc != 0:
module.fail_json(msg=result)
module.exit_json(status=result,changed=True)
if __name__ == '__main__':
main()
| gpl-3.0 |
AutorestCI/azure-sdk-for-python | azure-mgmt-monitor/azure/mgmt/monitor/models/webhook_receiver.py | 2 | 1166 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WebhookReceiver(Model):
"""A webhook receiver.
:param name: The name of the webhook receiver. Names must be unique across
all receivers within an action group.
:type name: str
:param service_uri: The URI where webhooks should be sent.
:type service_uri: str
"""
_validation = {
'name': {'required': True},
'service_uri': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'service_uri': {'key': 'serviceUri', 'type': 'str'},
}
def __init__(self, name, service_uri):
self.name = name
self.service_uri = service_uri
| mit |
alu042/edx-platform | common/test/acceptance/pages/studio/component_editor.py | 73 | 5069 | from bok_choy.page_object import PageObject
from selenium.webdriver.common.keys import Keys
from ..common.utils import click_css
from selenium.webdriver.support.ui import Select
class BaseComponentEditorView(PageObject):
"""
A base :class:`.PageObject` for the component and visibility editors.
This class assumes that the editor is our default editor as displayed for xmodules.
"""
BODY_SELECTOR = '.xblock-editor'
def __init__(self, browser, locator):
"""
Args:
browser (selenium.webdriver): The Selenium-controlled browser that this page is loaded in.
locator (str): The locator that identifies which xblock this :class:`.xblock-editor` relates to.
"""
super(BaseComponentEditorView, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `ComponentEditorView` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
def url(self):
"""
Returns None because this is not directly accessible via URL.
"""
return None
def save(self):
"""
Clicks save button.
"""
click_css(self, 'a.action-save')
def cancel(self):
"""
Clicks cancel button.
"""
click_css(self, 'a.action-cancel', require_notification=False)
class ComponentEditorView(BaseComponentEditorView):
"""
A :class:`.PageObject` representing the rendered view of a component editor.
"""
def get_setting_element(self, label):
"""
Returns the index of the setting entry with given label (display name) within the Settings modal.
"""
settings_button = self.q(css='.edit-xblock-modal .editor-modes .settings-button')
if settings_button.is_present():
settings_button.click()
setting_labels = self.q(css=self._bounded_selector('.metadata_edit .wrapper-comp-setting .setting-label'))
for index, setting in enumerate(setting_labels):
if setting.text == label:
return self.q(css=self._bounded_selector('.metadata_edit div.wrapper-comp-setting .setting-input'))[index]
return None
def set_field_value_and_save(self, label, value):
"""
Sets the text field with given label (display name) to the specified value, and presses Save.
"""
elem = self.get_setting_element(label)
# Clear the current value, set the new one, then
# Tab to move to the next field (so change event is triggered).
elem.clear()
elem.send_keys(value)
elem.send_keys(Keys.TAB)
self.save()
def set_select_value_and_save(self, label, value):
"""
Sets the select with given label (display name) to the specified value, and presses Save.
"""
elem = self.get_setting_element(label)
select = Select(elem)
select.select_by_value(value)
self.save()
def get_selected_option_text(self, label):
"""
Returns the text of the first selected option for the select with given label (display name).
"""
elem = self.get_setting_element(label)
if elem:
select = Select(elem)
return select.first_selected_option.text
else:
return None
class ComponentVisibilityEditorView(BaseComponentEditorView):
"""
A :class:`.PageObject` representing the rendered view of a component visibility editor.
"""
OPTION_SELECTOR = '.modal-section-content .field'
@property
def all_options(self):
"""
Return all visibility options.
"""
return self.q(css=self._bounded_selector(self.OPTION_SELECTOR)).results
@property
def selected_options(self):
"""
Return all selected visibility options.
"""
results = []
for option in self.all_options:
button = option.find_element_by_css_selector('input.input')
if button.is_selected():
results.append(option)
return results
def select_option(self, label_text, save=True):
"""
Click the first option which has a label matching `label_text`.
Arguments:
label_text (str): Text of a label accompanying the input
which should be clicked.
save (boolean): Whether the "save" button should be clicked
afterwards.
Returns:
bool: Whether the label was found and clicked.
"""
for option in self.all_options:
if label_text in option.text:
option.click()
if save:
self.save()
return True
return False
| agpl-3.0 |
Nick-Hall/gramps | gramps/gui/editors/edittaglist.py | 4 | 4647 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Tag editing module for Gramps.
"""
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from ..managedwindow import ManagedWindow
from gramps.gen.const import URL_MANUAL_PAGE
from ..display import display_help
from ..listmodel import ListModel, TOGGLE
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Filters' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('Tag_selection_dialog', 'manual')
#-------------------------------------------------------------------------
#
# EditTagList
#
#-------------------------------------------------------------------------
class EditTagList(ManagedWindow):
"""
Dialog to allow the user to edit a list of tags.
"""
def __init__(self, tag_list, full_list, uistate, track):
"""
Initiate and display the dialog.
"""
ManagedWindow.__init__(self, uistate, track, self, modal=True)
# the self.window.run() below makes Gtk make it modal, so any change
# to the previous line's "modal" would require that line to be changed
self.namemodel = None
top = self._create_dialog()
self.set_window(top, None, _('Tag selection'))
self.setup_configs('interface.edittaglist', 360, 400)
for tag in full_list:
self.namemodel.add([tag[0], tag in tag_list, tag[1]])
self.namemodel.connect_model()
# The dialog is modal. We don't want to have several open dialogs of
# this type, since then the user will loose track of which is which.
self.return_list = None
self.show()
while True:
# the self.window.run() makes Gtk make it modal, so any change to
# that line means the ManagedWindow.__init__ must be changed also
response = self.window.run()
if response == Gtk.ResponseType.HELP:
display_help(webpage=WIKI_HELP_PAGE,
section=WIKI_HELP_SEC)
elif response == Gtk.ResponseType.DELETE_EVENT:
break
else:
if response == Gtk.ResponseType.OK:
self.return_list = [(row[0], row[2])
for row in self.namemodel.model
if row[1]]
self.close()
break
def _create_dialog(self):
"""
Create a dialog box to select tags.
"""
# pylint: disable-msg=E1101
top = Gtk.Dialog(transient_for=self.uistate.window)
top.vbox.set_spacing(5)
columns = [('', -1, 300),
(' ', -1, 25, TOGGLE, True, None),
(_('Tag'), -1, 300)]
view = Gtk.TreeView()
self.namemodel = ListModel(view, columns)
slist = Gtk.ScrolledWindow()
slist.add(view)
slist.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
top.vbox.pack_start(slist, 1, 1, 5)
top.add_button(_('_Help'), Gtk.ResponseType.HELP)
top.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
top.add_button(_('_OK'), Gtk.ResponseType.OK)
return top
def build_menu_names(self, obj): # meaningless while it's modal
"""
Define the menu entry for the ManagedWindows.
"""
return (_("Tag selection"), None)
| gpl-2.0 |
rlowrance/re-local-linear | ege_date.py | 1 | 33937 | '''create files contains estimated generalization errors for model
sys.argv: year month day
The sale_date of the models. Uses data up to the day before the sale date
Files created:
year-month-day-MODEL-SCOPE-T[-MODELPARAMS].foldResult
where:
MODEL is one of the models {ols, lasso, ridge, rf, xt}
SCOPE is one of {global,zDDDDD} (global or zip-code specific)
T is the number of days in the training period
MODELPARAMS depends on the model and may be empty
'''
import collections
import cPickle as pickle
import datetime
import numpy as np
import pandas as pd
import pdb
from pprint import pprint
from sklearn import cross_validation
from sklearn import linear_model
from sklearn import ensemble
import sys
import warnings
from Bunch import Bunch
from directory import directory
from Logger import Logger
def usage():
print 'usage: python ege-date.py yyyy-mm-dd'
sys.exit(1)
def make_control(argv):
'Return control Bunch'''
script_name = argv[0]
base_name = script_name.split('.')[0]
random_seed = 123
now = datetime.datetime.now()
log_file_name = base_name + '.' + now.isoformat('T') + '.log'
if len(argv) < 2:
print 'missing date argument'
usage()
if len(argv) > 2:
print 'extra args'
usage()
year, month, day = argv[1].split('-')
sale_date = datetime.date(int(year), int(month), int(day))
# prior work found that the assessment was not useful
# just the census and tax roll features
# predictors with transformation to log domain
predictors = { # the columns in the x_arrays are in this order
'fraction.owner.occupied': None,
'FIREPLACE.NUMBER': 'log1p',
'BEDROOMS': 'log1p',
'BASEMENT.SQUARE.FEET': 'log1p',
'LAND.SQUARE.FOOTAGE': 'log',
'zip5.has.industry': None,
'census.tract.has.industry': None,
'census.tract.has.park': None,
'STORIES.NUMBER': 'log1p',
'census.tract.has.school': None,
'TOTAL.BATHS.CALCULATED': 'log1p',
'median.household.income': 'log', # not log feature in earlier version
'LIVING.SQUARE.FEET': 'log',
'has.pool': None,
'zip5.has.retail': None,
'census.tract.has.retail': None,
'is.new.construction': None,
'avg.commute': None,
'zip5.has.park': None,
'PARKING.SPACES': 'log1p',
'zip5.has.school': None,
'TOTAL.ROOMS': 'log1p',
'age': None,
'age2': None,
'effective.age': None,
'effective.age2': None}
debug = False
b = Bunch(
path_in=directory('working') + 'transactions-subset2.pickle',
path_log=directory('log') + log_file_name,
path_out=directory('working') + base_name + '-%s' % sale_date + '.pickle',
arg_date=sale_date,
random_seed=random_seed,
sale_date=sale_date,
models={'rf': Rf(), 'ols': Ols()} if not debug else {'rf': Rf()},
scopes=['global', 'zip'],
training_days=(7, 366) if debug else range(7, 366, 7),
n_folds=10,
predictors=predictors,
price_column='SALE.AMOUNT',
debug=debug)
return b
def x(mode, df, control):
'''return 2D np.array, with df x values possibly transformed to log
RETURNS array: np.array 2D
'''
def transform(v, mode, transformation):
if mode is None:
return v
if mode == 'linear':
return v
if mode == 'log':
if transformation is None:
return v
if transformation == 'log':
return np.log(v)
if transformation == 'log1p':
return np.log1p(v)
raise RuntimeError('bad transformation: ' + str(transformation))
raise RuntimeError('bad mode:' + str(mode))
array = np.empty(shape=(df.shape[0], len(control.predictors)),
dtype=np.float64).T
# build up in transposed form
index = 0
for predictor_name, transformation in control.predictors.iteritems():
v = transform(df[predictor_name].values, mode, transformation)
array[index] = v
index += 1
return array.T
def y(mode, df, control):
'''return np.array 1D with transformed price column from df'''
df2 = df.copy(deep=True)
if mode == 'log':
df2[control.price_column] = \
pd.Series(np.log(df[control.price_column]),
index=df.index)
array = np.array(df2[control.price_column].as_matrix(), np.float64)
return array
def demode(v, mode):
'convert log domain to normal'
if v is None:
return None
result = np.exp(v) if mode == 'log' else v
return result
def errors(model_result):
'return median_absolute_error and median_relative_absolute_error'
pprint(model_result)
actuals = model_result['actuals']
estimates = model_result['estimates']
abs_error = np.abs(actuals - estimates)
median_abs_error = np.median(abs_error)
rel_abs_error = abs_error / actuals
median_rel_abs_error = np.median(rel_abs_error)
return median_abs_error, median_rel_abs_error
class ReportOls(object):
'report generation with y_mode and x_mode in key'
# NOTE: perhaps reusable for any model with y and x modes
def __init__(self):
self.format_global_fold = '%10s %2d %3s %6s %3s %3s f%d %6.0f %3.2f'
self.format_zip_fold = '%10s %2d %3s %6d %3s %3s f%d %6.0f %3.2f'
self.format_global = '%10s %2d %3s %6s %3s %3s median %6.0f %3.2f'
self.format_zip = '%10s %2d %3s %6d %3s %3s median %6.0f %3.2f'
def global_fold_line(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(scope == 'global')
for result_key, result_value in result.iteritems():
y_mode = result_key[1][:3]
x_mode = result_key[3][:3]
median_abs_error, median_rel_abs_error = errors(result_value)
line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
y_mode,
x_mode,
fold_number,
median_abs_error,
median_rel_abs_error)
return line
def zip_fold_line(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(isinstance(scope, tuple))
assert(scope[0] == 'zip')
zip_code = scope[1]
for result_key, result_value in result.iteritems():
y_mode = result_key[1][:3]
x_mode = result_key[3][:3]
median_abs_error, median_rel_abs_error = errors(result_value)
line = self.format_zip_fold % (sale_date,
training_days,
model_name,
zip_code,
y_mode,
x_mode,
fold_number,
median_abs_error,
median_rel_abs_error)
return line
def summarize_global(self,
sale_date,
training_days,
model_name,
all_results,
control):
scope = 'global'
for y_mode in ('log', 'linear'):
y_mode_print = y_mode[:3]
for x_mode in ('log', 'linear'):
x_mode_print = x_mode[:3]
median_errors = np.zeros(control.n_folds, dtype=np.float64)
median_rel_errors = np.zeros(control.n_folds, dtype=np.float64)
for fold_number in xrange(control.n_folds):
# determine errors in the fold
key = (fold_number, sale_date, training_days, model_name, scope)
if key not in all_results:
print 'key', key
print 'not in result'
continue
result = all_results[key]
model_result = result[('y_mode', y_mode, 'x_mode', x_mode)]
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
y_mode_print,
x_mode_print,
fold_number,
median_abs_error,
median_rel_abs_error)
print fold_line
median_errors[fold_number] = median_abs_error
median_rel_errors[fold_number] = median_rel_abs_error
all_folds_line = self.format_global % (sale_date,
training_days,
model_name,
scope,
y_mode_print,
x_mode_print,
np.median(median_errors),
np.median(median_rel_errors))
print all_folds_line
def summarize_zip(self, sale_date, training_days, model_name,
all_results, control):
def list_median(lst):
assert(len(lst) > 0)
return np.median(np.array(lst, dtype=np.float64))
def report_zip_code(zip_code, keys):
for y_mode in ('log', 'linear'):
y_mode_print = y_mode[:3]
for x_mode in ('log', 'linear'):
x_mode_print = x_mode[:3]
mode_key = ('y_mode', y_mode, 'x_mode', x_mode)
median_abs_errors = []
median_rel_abs_errors = []
for key in keys:
model_result = all_results[key][mode_key]
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_zip_fold % (sale_date,
training_days,
model_name,
zip_code,
y_mode_print,
x_mode_print,
key[0], # fold number
median_abs_error,
median_rel_abs_error)
print fold_line
median_abs_errors.append(median_abs_error)
median_rel_abs_errors.append(median_rel_abs_error)
all_folds_line = self.format_zip % (sale_date,
training_days,
model_name,
zip_code,
y_mode_print,
x_mode_print,
list_median(median_abs_errors),
list_median(median_rel_abs_errors))
print all_folds_line
# determine all zip codes in the specified lines
zip_codes = collections.defaultdict(set)
for key in all_results.keys():
key_fold_number, key_sale_date, key_training_days, key_model_name, key_scope = key
if key_scope == 'global':
# examine only zip code scopes
continue
if key_sale_date == sale_date and key_training_days == training_days and key_model_name == model_name:
key_zip_code = key_scope[1]
zip_codes[key_zip_code].add(key)
# process each zip code
for zip_code, keys in zip_codes.iteritems():
report_zip_code(zip_code, keys)
def summarize(self, sale_date, training_days, model_name,
all_results, control):
self.summarize_global(sale_date, training_days, model_name,
all_results, control)
self.summarize_zip(sale_date, training_days, model_name,
all_results, control)
class Ols(object):
'Ordinary least squares via sklearn'
def __init__(self):
self.Model_Constructor = linear_model.LinearRegression
def reporter(self):
return ReportOls
def run(self, train, test, control):
'''fit on training data and test
ARGS
train : dataframe
test : dataframe
control: Bunch
RETURN dict of values
dict key = (x_mode, y_mode)
values = dict with keys 'actuals', 'estimates', 'fitted', x_names
'''
# implement variants
verbose = False
def variant(x_mode, y_mode):
train_x = x(x_mode, train, control)
test_x = x(x_mode, test, control)
train_y = y(y_mode, train, control)
model = self.Model_Constructor(fit_intercept=True,
normalize=True,
copy_X=True)
fitted_model = model.fit(train_x, train_y)
# if the model cannot be fitted, LinearRegressor returns
# the mean of the train_y values
estimates = fitted_model.predict(test_x)
value = {
'coef': fitted_model.coef_,
'intercept_': fitted_model.intercept_,
'estimates': demode(estimates, y_mode),
'actuals': y('linear', test, control)
}
# check results
if verbose:
print 'x_mode, y_mode: ', x_mode, y_mode
print 'actuals: ', value['actuals']
print 'estimates: ', value['estimates']
return value
all_variants = {}
for x_mode in ('log', 'linear'):
for y_mode in ('log', 'linear'):
variant_value = variant(x_mode, y_mode)
key = ('y_mode', y_mode, 'x_mode', x_mode)
all_variants[key] = variant_value
return all_variants
class ReportRf(object):
'report generation w no variants (for now'
def __init__(self):
'sale_date days model global fold error abs_error'
self.format_global_fold = '%10s %2d %3s %6s f%d %6.0f %3.2f'
self.format_zip_fold = '%10s %2d %3s %6d f%d %6.0f %3.2f'
self.format_global = '%10s %2d %3s %6s median %6.0f %3.2f'
self.format_zip = '%10s %2d %3s %6d median %6.0f %3.2f'
def global_fold_line(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(scope == 'global')
median_abs_error, median_rel_abs_error = errors(result)
line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
fold_number,
median_abs_error,
median_rel_abs_error)
return line
def zip_fold_line(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(isinstance(scope, tuple))
assert(scope[0] == 'zip')
zip_code = scope[1]
median_abs_error, median_rel_abs_error = errors(result)
line = self.format_global_fold % (sale_date,
training_days,
model_name,
zip_code,
fold_number,
median_abs_error,
median_rel_abs_error)
return line
def summarize_global(self, sale_date, training_days, model_name, all_results, control):
scope = 'global'
median_errors = np.zeros(control.n_folds, dtype=np.float64)
median_rel_errors = np.zeros(control.n_folds, dtype=np.float64)
for fold_number in xrange(control.n_folds):
key = (fold_number, sale_date, training_days, model_name, scope)
if key not in all_results:
# can happen when a model could not be fit
print 'model_result missing key', key
continue
model_result = all_results[key]
if len(model_result['actuals']) == 0:
continue
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
fold_number,
median_abs_error,
median_rel_abs_error)
print fold_line
median_errors[fold_number] = median_abs_error
median_rel_errors[fold_number] = median_rel_abs_error
all_folds_line = self.format_global % (sale_date,
training_days,
model_name,
scope,
np.median(median_errors),
np.median(median_rel_errors))
print all_folds_line
def summarize_zip(self, sale_date, training_days, model_name, all_results, control):
def list_median(lst):
assert(len(lst) > 0)
return np.median(np.array(lst, dtype=np.float64))
def report_zip_code(zip_code, keys):
median_abs_errors = []
median_rel_abs_errors = []
for key in keys:
model_result = all_results[key]
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_zip_fold % (sale_date,
training_days,
model_name,
zip_code,
key[0], # fold number
median_abs_error,
median_rel_abs_error)
print fold_line
median_abs_errors.append(median_abs_error)
median_rel_abs_errors.append(median_rel_abs_error)
all_folds_line = self.format_zip % (sale_date,
training_days,
model_name,
zip_code,
list_median(median_abs_errors),
list_median(median_rel_abs_errors))
print all_folds_line
# determine all zip codes in the specified lines
zip_codes = collections.defaultdict(set)
for key in all_results.keys():
key_fold_number, key_sale_date, key_training_days, key_model_name, key_scope = key
if key_scope == 'global':
# examine only zip code scopes
continue
if key_sale_date == sale_date and key_training_days == training_days and key_model_name == model_name:
key_zip_code = key_scope[1]
zip_codes[key_zip_code].add(key)
# process each zip code
for zip_code, keys in zip_codes.iteritems():
report_zip_code(zip_code, keys)
def summarize(self, sale_date, training_days, model_name, all_results, control):
self.summarize_global(sale_date, training_days, model_name, all_results, control)
self.summarize_zip(sale_date, training_days, model_name, all_results, control)
class Rf(object):
'Random forests via sklearn'
def __init__(self):
self.Model_Constructor = ensemble.RandomForestRegressor
def reporter(self):
return ReportRf
def run(self, train, test, control):
'''fit on train, test on test, return dict of variants
The variants are defined by the number of trees in the forest
RETURN dict with key = variant_description
'''
verbose = False
def variant(n_trees):
train_x = x(None, train, control) # no transformation
test_x = x(None, test, control)
train_y = y(None, train, control)
model = self.Model_Constructor(n_estimators=n_trees,
random_state=control.random_seed)
fitted_model = model.fit(train_x, train_y)
estimates = fitted_model.predict(test_x)
# return selected fitted results
result = {
'feature_importances': fitted_model.feature_importances_,
'estimates': estimates,
'actuals': y('None', test, control)}
if verbose:
for k, v in result.iteritems():
print k, v
return result
all_variants = {}
for n_trees in (10, 100, 300, 1000):
variant_value = variant(n_trees)
key = ('n_trees', n_trees)
all_variants[key] = variant_value
return all_variants
def within(sale_date, training_days, df):
'return indices of samples up to training_days before the sale_date'
assert(training_days > 0)
# one training day means use samples on the sale date only
first_ok_sale_date = sale_date - datetime.timedelta(training_days - 1)
date_column = 'sale.python_date'
after = df[date_column] >= first_ok_sale_date
before = df[date_column] <= sale_date
ok_indices = np.logical_and(after, before)
return ok_indices
def on_sale_date(sale_date, df):
'''return indices of sample on the sale date'''
date_column = 'sale.python_date'
result = df[date_column] == sale_date
return result
def add_age(df, sale_date):
'Return new df with extra columns for age and effective age'
column_names = df.columns.tolist()
if 'age' in column_names:
print column_names
print 'age in column_names'
pdb.set_trace()
assert('age' not in column_names)
assert('age2' not in column_names)
assert('effective.age' not in column_names)
assert('effective.age2' not in column_names)
sale_year = df['sale.year']
def age(column_name):
'age from sale_date to specified column'
age_in_years = sale_year - df[column_name].values
return pd.Series(age_in_years, index=df.index)
result = df.copy(deep=True)
result['age'] = age('YEAR.BUILT')
result['effective.age'] = age('EFFECTIVE.YEAR.BUILT')
result['age2'] = result['age'] * result['age']
result['effective.age2'] = result['effective.age'] * result['effective.age']
return result
def unique_zip_codes(df):
'yield each unique zip code in the dataframe'
unique_zip_codes = df['zip5'].unique()
for i in xrange(len(unique_zip_codes)):
yield unique_zip_codes[i]
def zip_codes(df, a_zip_code):
'return new dataframe containing just the specified zip code'
df_copy = df.copy(deep=True)
result = df_copy[df_copy['zip5'] == a_zip_code]
return result
def make_train_model(df, sale_date, training_days):
'return df of transactions no more than training_days before the sale_date'
just_before_sale_date = within(sale_date, training_days, df)
train_model = add_age(df[just_before_sale_date], sale_date)
return train_model
def make_test_model(df, sale_date):
'return df of transactions on the sale_date'
selected_indices = on_sale_date(sale_date, df)
test_model = add_age(df[selected_indices], sale_date)
return test_model
def determine_most_popular_zip_code(df, control):
'return the zip_code that occurs most ofen in the dataframe'
zip_code_counter = collections.Counter()
for _, zip_code in df.zip5.iteritems():
zip_code_counter[zip_code] += 1
most_common_zip_code, count = zip_code_counter.most_common(1)[0]
print 'most common zip_code', most_common_zip_code, 'occurs', count
# assert: the most common zip code is in each fold
fold_number = -1
folds_for_zip_code = collections.defaultdict(set)
kf = cross_validation.KFold(n=(len(df)),
n_folds=control.n_folds,
shuffle=True,
random_state=control.random_seed)
for train_indices, test_indices in kf:
fold_number += 1
train = df.iloc[train_indices].copy(deep=True)
test = df.iloc[test_indices].copy(deep=True)
if most_common_zip_code not in test.zip5.values:
print most_common_zip_code, 'not in', fold_number
for zip_code in unique_zip_codes(test):
assert(zip_code in test.zip5.values)
if zip_code not in train.zip5.values:
print 'fold %d zip_code %d in test and not train' % (
fold_number,
zip_code)
folds_for_zip_code[zip_code].add(fold_number)
assert(len(folds_for_zip_code[most_common_zip_code]) == 10)
# print zip_code not in each test set
count_in_10 = 0
count_not_in_10 = 0
for zip_code, set_folds in folds_for_zip_code.iteritems():
if len(set_folds) != 10:
print 'zip_code %d in only %d folds' % (zip_code, len(set_folds))
count_not_in_10 += 1
else:
count_in_10 += 1
print 'all other zip codes are in 10 folds'
print 'in 10: %d not in 10: %d' % (count_in_10, count_not_in_10)
print 'NOTE: all this analysis is before training samples are selected'
return most_common_zip_code
def read_training_data(control):
'return dataframe'
def squeeze(result, verbose=False):
'replace float64 with float32'
def is_np_array_float64(x):
return isinstance(x, np.ndarray) and x.dtype == np.float64
def is_np_scalar_float64(x):
return isinstance(x, np.float64)
if verbose:
pprint(result)
assert(isinstance(result, dict))
new_result = {}
for k, v in result.iteritems():
if isinstance(k, str):
# rf result
if is_np_array_float64(v):
# e.g., actual, estimate, other info in a vector
new_result[k] = np.array(v, dtype=np.float32)
else:
print k, v
raise RuntimeError('unexpected')
elif isinstance(k, tuple):
# ols result
new_ols_result = {}
for ols_key, ols_value in v.iteritems():
if is_np_array_float64(ols_value):
new_ols_result[ols_key] = np.array(ols_value, dtype=np.float32)
elif is_np_scalar_float64(ols_value):
new_ols_result[ols_key] = np.float32(ols_value)
else:
print ols_key, ols_value
raise RuntimeError('unexpected')
new_result[k] = new_ols_result
else:
# unexpected
print k, v
raise RuntimeError('unexpected')
if verbose:
pprint(new_result)
return new_result
def fit_and_test_models(df, control):
'''Return all_results dict and list of feature names
all_results has results for each fold, sale date, training period model, scope
'''
verbose = False
all_results = {}
fold_number = -1
on_sale_date = df['sale.python_date'] == control.sale_date
num_test_samples = sum(on_sale_date)
print 'sale_date %s has %d samples' % (control.sale_date, num_test_samples)
assert num_test_samples >= control.n_folds, 'unable to form folds'
skf = cross_validation.StratifiedKFold(on_sale_date, control.n_folds)
for train_indices, test_indices in skf:
fold_number += 1
# don't create views (just to be careful)
train = df.iloc[train_indices].copy(deep=True)
test = df.iloc[test_indices].copy(deep=True)
for training_days in control.training_days:
train_model = make_train_model(train, control.sale_date, training_days)
if len(train_model) == 0:
print 'no training data fold %d sale_date %s training_days %d' % (
fold_number, control.sale_date, training_days)
sys.exit(1)
test_model = make_test_model(test, control.sale_date)
if len(test_model) == 0:
print 'no testing data fold %d sale_date %s training_days %d' % (
fold_number, control.sale_date, training_days)
continue
for model_name, model in control.models.iteritems():
print fold_number, control.sale_date, training_days, model_name
def make_key(scope):
return (fold_number, control.sale_date, training_days, model_name, scope)
# determine global results (for all areas)
if len(test_model) == 0 or len(train_model) == 0:
print 'skipping global zero length', len(test_model), len(train_model)
else:
global_result = model.run(train=train_model,
test=test_model,
control=control)
key = make_key(scope='global')
all_results[key] = squeeze(global_result)
report = model.reporter()() # instantiate report class
if verbose:
print report.global_fold_line(key, global_result)
# determine results for each zip code in test data
for zip_code in unique_zip_codes(test_model):
train_model_zip = zip_codes(train_model, zip_code)
test_model_zip = zip_codes(test_model, zip_code)
if len(train_model_zip) == 0 or len(test_model_zip) == 0:
print 'skipping zip zero length', zip_code, len(test_model_zip), len(train_model_zip)
else:
zip_code_result = model.run(train=train_model_zip,
test=test_model_zip,
control=control)
key = make_key(scope=('zip', zip_code))
all_results[key] = squeeze(zip_code_result)
if verbose:
print report.zip_fold_line(key, zip_code_result)
print 'num test samples across all folds:', num_test_samples
return all_results
def print_results(all_results, control):
for training_days in control.training_days:
for model_name, model in control.models.iteritems():
report = model.reporter()() # how to print is in the model result
report.summarize(control.sale_date,
training_days,
model_name,
all_results,
control)
def main(argv):
warnings.filterwarnings('error') # convert warnings to errors
control = make_control(argv)
sys.stdout = Logger(logfile_path=control.path_log) # print also write to log file
print control
# read input
f = open(control.path_in, 'rb')
df_loaded = pickle.load(f)
f.close()
df_loaded_copy = df_loaded.copy(deep=True) # used for debugging
if False:
most_popular_zip_code = determine_most_popular_zip_code(df_loaded.copy(), control)
print most_popular_zip_code
all_results = fit_and_test_models(df_loaded, control)
assert(df_loaded.equals(df_loaded_copy))
if False:
print_results(all_results, control)
# write result
print 'writing results to', control.path_out
result = {'control': control, # control.predictors orders the x values
'all_results': all_results}
f = open(control.path_out, 'wb')
pickle.dump(result, f)
print 'ok'
if __name__ == "__main__":
if False:
# quite pyflakes warnings
pdb.set_trace()
pprint(None)
np.all()
pd.Series()
main(sys.argv)
| mit |
ericholscher/django | django/core/checks/compatibility/django_1_6_0.py | 30 | 2329 | from __future__ import unicode_literals
from django.db import models
def check_test_runner():
"""
Checks if the user has *not* overridden the ``TEST_RUNNER`` setting &
warns them about the default behavior changes.
If the user has overridden that setting, we presume they know what they're
doing & avoid generating a message.
"""
from django.conf import settings
new_default = 'django.test.runner.DiscoverRunner'
test_runner_setting = getattr(settings, 'TEST_RUNNER', new_default)
if test_runner_setting == new_default:
message = [
"Django 1.6 introduced a new default test runner ('%s')" % new_default,
"You should ensure your tests are all running & behaving as expected. See",
"https://docs.djangoproject.com/en/dev/releases/1.6/#discovery-of-tests-in-any-test-module",
"for more information.",
]
return ' '.join(message)
def check_boolean_field_default_value():
"""
Checks if there are any BooleanFields without a default value, &
warns the user that the default has changed from False to Null.
"""
fields = []
for cls in models.get_models():
opts = cls._meta
for f in opts.local_fields:
if isinstance(f, models.BooleanField) and not f.has_default():
fields.append(
'%s.%s: "%s"' % (opts.app_label, opts.object_name, f.name)
)
if fields:
fieldnames = ", ".join(fields)
message = [
"You have not set a default value for one or more BooleanFields:",
"%s." % fieldnames,
"In Django 1.6 the default value of BooleanField was changed from",
"False to Null when Field.default isn't defined. See",
"https://docs.djangoproject.com/en/1.6/ref/models/fields/#booleanfield"
"for more information."
]
return ' '.join(message)
def run_checks():
"""
Required by the ``check`` management command, this returns a list of
messages from all the relevant check functions for this version of Django.
"""
checks = [
check_test_runner(),
check_boolean_field_default_value(),
]
# Filter out the ``None`` or empty strings.
return [output for output in checks if output]
| bsd-3-clause |
sklnet/opendroid-enigma2 | lib/python/Components/Renderer/valioSystem.py | 13 | 1658 | # -*- coding: utf-8 -*-
#
# System Data Renderer for Dreambox/Enigma-2
# Version: 1.0
# Coded by Vali (c)2010-2011
#
#######################################################################
from Components.VariableText import VariableText
from Components.Sensors import sensors
from Tools.HardwareInfo import HardwareInfo
from enigma import eLabel
from Renderer import Renderer
class valioSystem(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
if "8000" in HardwareInfo().get_device_name() or "500" in HardwareInfo().get_device_name() or "800se" in HardwareInfo().get_device_name():
self.ZeigeTemp = True
else:
self.ZeigeTemp = False
GUI_WIDGET = eLabel
def changed(self, what):
if not self.suspended:
maxtemp = 0
try:
templist = sensors.getSensorsList(sensors.TYPE_TEMPERATURE)
tempcount = len(templist)
for count in range(tempcount):
id = templist[count]
tt = sensors.getSensorValue(id)
if tt > maxtemp:
maxtemp = tt
except:
pass
loada = "0"
try:
out_line = open("/proc/loadavg").readline()
if self.ZeigeTemp:
loada = out_line[:4]
else:
loada = out_line[:9]
loada = loada.replace(" ","\n")
except:
pass
fan = 0
try:
fanid = sensors.getSensorsList(sensors.TYPE_FAN_RPM)[0]
fan = sensors.getSensorValue(fanid)
except:
pass
if self.ZeigeTemp:
self.text = "cpu "+loada+"\ntmp "+str(maxtemp)+"°C\nfan "+str(int(fan/2))
else:
self.text = "cpu\n"+loada
def onShow(self):
self.suspended = False
self.changed(None)
def onHide(self):
self.suspended = True
| gpl-2.0 |
LuisAlejandro/tribus | tribus/common/charms/repository.py | 2 | 4659 | import json
import logging
import os
import tempfile
import urllib
import urlparse
import yaml
from tribus.common.charms.provider import get_charm_from_path
from tribus.common.charms.url import CharmURL, CharmCollection
from tribus.common.errors import FileNotFound
from tribus.common.utils import list_dirs
from tribus.common import under
from tribus.common.charms.errors import (
CharmNotFound, CharmError, RepositoryNotFound, ServiceConfigValueError)
log = logging.getLogger("tribus.common.charms")
CS_STORE_URL = "https://store.juju.ubuntu.com"
def _makedirs(path):
try:
os.makedirs(path)
except OSError:
pass
def _cache_key(charm_url):
charm_url.assert_revision()
return under.quote("%s.charm" % charm_url)
class LocalCharmRepository(object):
"""Charm repository in a local directory."""
type = "local"
def __init__(self, path):
if path is None or not os.path.isdir(path):
raise RepositoryNotFound(path)
self.path = path
def list(self):
schema = "local"
col = CharmCollection(schema)
charms = []
for l in list_dirs(self.path):
charmurl = CharmURL(col, l, None)
charm = self.find(charmurl)
if charm:
charms.append(charm)
return charms
def _collection(self, collection):
path = os.path.join(self.path, collection.series)
if not os.path.exists(path):
return
for dentry in os.listdir(path):
if dentry.startswith("."):
continue
dentry_path = os.path.join(path, dentry)
try:
yield get_charm_from_path(dentry_path)
except FileNotFound:
continue
# There is a broken charm in the repo, but that
# shouldn't stop us from continuing
except yaml.YAMLError, e:
# Log yaml errors for feedback to developers.
log.warning("Charm %r has a YAML error: %s", dentry, e)
continue
except (CharmError, ServiceConfigValueError), e:
# Log invalid config.yaml and metadata.yaml semantic errors
log.warning("Charm %r has an error: %r %s", dentry, e, e)
continue
except CharmNotFound:
# This could just be a random directory/file in the repo
continue
except Exception, e:
# Catch all (perms, unknowns, etc)
log.warning(
"Unexpected error while processing %s: %r",
dentry, e)
def find(self, charm_url):
"""Find a charm with the given name.
If multiple charms are found with different versions, the most
recent one (greatest revision) will be returned.
"""
assert charm_url.collection.schema == "local", "schema mismatch"
latest = None
for charm in self._collection(charm_url.collection):
if charm.metadata.name == charm_url.name:
if charm.get_revision() == charm_url.revision:
return charm
if (latest is None or
latest.get_revision() < charm.get_revision()):
latest = charm
if latest is None or charm_url.revision is not None:
return CharmNotFound(self.path, charm_url)
return latest
def latest(self, charm_url):
d = self.find(charm_url.with_revision(None))
d.addCallback(lambda c: c.get_revision())
return d
def __str__(self):
return "local charm repository: %s" % self.path
def resolve(vague_name, repository_path, default_series):
"""Get a Charm and associated identifying information
:param str vague_name: a lazily specified charm name, suitable for use with
:meth:`CharmURL.infer`
:param repository_path: where on the local filesystem to find a repository
(only currently meaningful when `charm_name` is specified with
`"local:"`)
:type repository_path: str or None
:param str default_series: the Ubuntu series to insert when `charm_name` is
inadequately specified.
:return: a tuple of a :class:`tribus.common.charms.url.CharmURL` and a
:class:`tribus.common.charms.base.CharmBase` subclass, which together contain
both the charm's data and all information necessary to specify its
source.
"""
url = CharmURL.infer(vague_name, default_series)
repo = LocalCharmRepository(repository_path)
return repo, url
| gpl-3.0 |
AshivDhondea/SORADSIM | scenarios/main_057_iss_14.py | 1 | 9993 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 01 14:15:39 2017
@author: Ashiv Dhondea
"""
import AstroFunctions as AstFn
import TimeHandlingFunctions as THF
import math
import numpy as np
# Importing what's needed for nice plots.
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Helvetica']})
rc('text', usetex=True)
params = {'text.latex.preamble' : [r'\usepackage{amsmath}', r'\usepackage{amssymb}']}
plt.rcParams.update(params)
from mpl_toolkits.basemap import Basemap
# Libraries needed for time keeping and formatting
import datetime as dt
import pytz
import aniso8601
import pandas as pd # for loading MeerKAT dishes' latlon
# --------------------------------------------------------------------------- #
print 'Loading MeerKAT positions'
dframe = pd.read_excel("MeerKAT64v36.wgs84.64x4_edited.xlsx",sheetname="Sheet1")
dframe = dframe.reset_index()
meerkat_id = dframe['ID'][0:64]
meerkat_lat = dframe['Lat'][0:64]
meerkat_lon = dframe['Lon'][0:64]
# --------------------------------------------------------------------------- #
print 'Sorting out lat lon'
# Location of MeerKAT
lat_meerkat_00 = float(meerkat_lat[0]);
lon_meerkat_00 = float(meerkat_lon[0]);
altitude_meerkat = 1.038; # [km]
lat_meerkat_01 = float(meerkat_lat[1]);
lon_meerkat_01 = float(meerkat_lon[1]);
lat_meerkat_02 = float(meerkat_lat[2]);
lon_meerkat_02 = float(meerkat_lon[2]);
# Location of Denel Bredasdorp
lat_denel = -34.6; # [deg]
lon_denel = 20.316666666666666; # [deg]
altitude_denel = 0.018;#[km]
# --------------------------------------------------------------------------- #
with open('main_057_iss_00_visibility.txt') as fp:
for line in fp:
if 'visibility interval in Tx' in line:
good_index = line.index('=')
visibility_interval = line[good_index+1:-1];
good_index = visibility_interval.index('/');
start_timestring=visibility_interval[:good_index];
end_timestring = visibility_interval[good_index+1:];
fp.close();
# --------------------------------------------------------------------------- #
print 'Loading data'
timevec = np.load('main_057_iss_02_timevec.npy'); # timevector
lat_sgp4 = np.load('main_057_iss_02_lat_sgp4.npy');
lon_sgp4 = np.load('main_057_iss_02_lon_sgp4.npy');
# discretization step length/PRF
delta_t = timevec[2]-timevec[1];
# time stamps
experiment_timestamps = [None]*len(timevec)
index=0;
with open('main_057_iss_02_experiment_timestamps.txt') as fp:
for line in fp:
modified_timestring = line[:-1];
experiment_timestamps[index] = aniso8601.parse_datetime(modified_timestring);
index+=1;
fp.close();
experiment_timestamps[0] = experiment_timestamps[0].replace(tzinfo=None)
experiment_timestamps[-1] = experiment_timestamps[-1].replace(tzinfo=None)
title_string = str(experiment_timestamps[0].isoformat())+'/'+str(experiment_timestamps[-1].isoformat());
norad_id = '25544'
# --------------------------------------------------------------------------- #
time_index = np.load('main_057_iss_02_time_index.npy');
tx_el_min_index = time_index[0];
tx_el_max_index = time_index[1];
print 'Tx FoV limits %d and %d' %(tx_el_min_index,tx_el_max_index)
# --------------------------------------------------------------------------- #
tx_beam_indices_best = np.load('main_057_iss_04_tx_beam_indices_best.npy');
tx_beam_index_down = tx_beam_indices_best[0];
tx_bw_time_max = tx_beam_indices_best[1];
tx_beam_index_up = tx_beam_indices_best[2];
tx_beam_index_down_lat = math.degrees(lat_sgp4[tx_beam_index_down]);
tx_beam_index_down_lon = math.degrees(lon_sgp4[tx_beam_index_down]);
tx_beam_index_up_lat = math.degrees(lat_sgp4[tx_beam_index_up]);
tx_beam_index_up_lon = math.degrees(lon_sgp4[tx_beam_index_up]);
tx_el_min_index_lat = math.degrees(lat_sgp4[tx_el_min_index]);
tx_el_min_index_lon = math.degrees(lon_sgp4[tx_el_min_index]);
tx_el_max_index_lat = math.degrees(lat_sgp4[tx_el_max_index]);
tx_el_max_index_lon = math.degrees(lon_sgp4[tx_el_max_index]);
# --------------------------------------------------------------------------- #
print 'Finding the relevant epochs'
start_epoch_test = THF.fnCalculate_DatetimeEpoch(timevec,0,experiment_timestamps[0]);
end_epoch_test = THF.fnCalculate_DatetimeEpoch(timevec,len(timevec)-1,experiment_timestamps[0]);
tx_el_min_index_test = THF.fnCalculate_DatetimeEpoch(timevec,tx_el_min_index,experiment_timestamps[0]);
tx_el_max_index_test = THF.fnCalculate_DatetimeEpoch(timevec,tx_el_max_index,experiment_timestamps[0]);
end_epoch_test = end_epoch_test.replace(tzinfo=None);
start_epoch_test = start_epoch_test.replace(tzinfo=None);
tx_el_min_index_test = tx_el_min_index_test.replace(tzinfo=None);
tx_el_max_index_test = tx_el_max_index_test.replace(tzinfo=None);
tx_beam_index_down_epoch = THF.fnCalculate_DatetimeEpoch(timevec,tx_beam_index_down,experiment_timestamps[0]);
tx_beam_index_up_epoch = THF.fnCalculate_DatetimeEpoch(timevec,tx_beam_index_up,experiment_timestamps[0]);
tx_bw_time_max_epoch = THF.fnCalculate_DatetimeEpoch(timevec,tx_bw_time_max,experiment_timestamps[0]);
tx_beam_index_down_epoch = tx_beam_index_down_epoch.replace(tzinfo=None);
tx_beam_index_up_epoch = tx_beam_index_up_epoch.replace(tzinfo=None);
tx_bw_time_max_epoch = tx_bw_time_max_epoch .replace(tzinfo=None);
title_string = str(start_epoch_test.isoformat())+'Z/'+str(end_epoch_test .isoformat())+'Z';
# --------------------------------------------------------------------------- #
print 'plotting results'
fig = plt.figure(1);ax = fig.gca();
plt.rc('text', usetex=True)
plt.rc('font', family='serif');
plt.rc('font',family='helvetica');
params = {'legend.fontsize': 8,
'legend.handlelength': 2}
plt.rcParams.update(params)
map = Basemap(llcrnrlon=10.0,llcrnrlat=-52.0,urcrnrlon=37.,urcrnrlat=-11.,resolution='i', projection='cass', lat_0 = 0.0, lon_0 = 0.0)
map.drawcoastlines()
map.drawcountries()
lon =np.rad2deg(lon_sgp4);
lat = np.rad2deg(lat_sgp4);
x,y = map(lon, lat)
map.plot(x, y, color="blue", latlon=False,linewidth=1.3)
map.drawgreatcircle(tx_el_min_index_lon,tx_el_min_index_lat,lon_denel,lat_denel,linewidth=1,color='crimson')
map.drawgreatcircle(tx_el_max_index_lon,tx_el_max_index_lat,lon_denel,lat_denel,linewidth=1,color='crimson')
map.drawgreatcircle(tx_beam_index_down_lon,tx_beam_index_down_lat,lon_denel,lat_denel,linewidth=1,color='green')
map.drawgreatcircle(tx_beam_index_up_lon,tx_beam_index_up_lat,lon_denel,lat_denel,linewidth=1,color='green')
x,y = map(lon_denel,lat_denel)
map.plot(x,y,marker='o',color='green'); # Denel Bredasdorp lat lon
x2,y2 = map(19.1,-36.2)
plt.annotate(r"\textbf{Tx}", xy=(x2, y2),color='green')
x,y = map(lon_meerkat_00,lat_meerkat_00)
map.plot(x,y,marker='o',color='blue'); # rx lat lon
x2,y2 = map(22,-30)
plt.annotate(r"\textbf{Rx}", xy=(x2, y2),color='blue')
parallels = np.arange(-81.,0.,5.)
# labels = [left,right,top,bottom]
map.drawparallels(parallels,labels=[False,True,False,False],labelstyle='+/-',linewidth=0.2)
meridians = np.arange(10.,351.,10.)
map.drawmeridians(meridians,labels=[True,False,False,True],labelstyle='+/-',linewidth=0.2)
plt.title(r'\textbf{Object %s trajectory during the interval %s}' %(norad_id,title_string), fontsize=12)
#plt.legend(loc='upper right',title=r"Transit through Tx FoV");
#ax.get_legend().get_title().set_fontsize('10')
fig.savefig('main_057_iss_14_map.pdf',bbox_inches='tight',pad_inches=0.05,dpi=10)
# --------------------------------------------------------------------------- #
fig = plt.figure(2);ax = fig.gca();
plt.rc('text', usetex=True)
plt.rc('font', family='serif');
plt.rc('font',family='helvetica');
params = {'legend.fontsize': 8,
'legend.handlelength': 2}
plt.rcParams.update(params)
map = Basemap(llcrnrlon=10.0,llcrnrlat=-52.0,urcrnrlon=37.,urcrnrlat=-11.,resolution='i', projection='cass', lat_0 = 0.0, lon_0 = 0.0)
map.drawcoastlines()
map.drawcountries()
lon =np.rad2deg(lon_sgp4);
lat = np.rad2deg(lat_sgp4);
x,y = map(lon, lat)
map.plot(x, y, color="blue", latlon=False,linewidth=1.3)
map.drawgreatcircle(tx_el_min_index_lon,tx_el_min_index_lat,lon_denel,lat_denel,linewidth=1,color='crimson')
map.drawgreatcircle(tx_el_max_index_lon,tx_el_max_index_lat,lon_denel,lat_denel,linewidth=1,color='crimson')
x,y = map(tx_el_min_index_lon,tx_el_min_index_lat)
map.scatter(x,y,marker='*',color='orangered',label=r"%s" %str(tx_el_min_index_test.isoformat()+'Z'));
x,y = map(tx_el_max_index_lon,tx_el_max_index_lat)
map.scatter(x,y,marker='>',color='purple',label=r"%s" %str(tx_el_max_index_test.isoformat()+'Z'));
#map.drawgreatcircle(tx_beam_index_down_lon,tx_beam_index_down_lat,lon_denel,lat_denel,linewidth=1,color='green')
#map.drawgreatcircle(tx_beam_index_up_lon,tx_beam_index_up_lat,lon_denel,lat_denel,linewidth=1,color='green')
x,y = map(lon_denel,lat_denel)
map.scatter(x,y,marker='o',color='green'); # Denel Bredasdorp lat lon
x2,y2 = map(19.1,-36.2)
plt.annotate(r"\textbf{Tx}", xy=(x2, y2),color='green')
x,y = map(lon_meerkat_00,lat_meerkat_00)
map.scatter(x,y,marker='o',color='blue'); # rx lat lon
x2,y2 = map(22,-30)
plt.annotate(r"\textbf{Rx}", xy=(x2, y2),color='blue')
parallels = np.arange(-81.,0.,5.)
# labels = [left,right,top,bottom]
map.drawparallels(parallels,labels=[False,True,False,False],labelstyle='+/-',linewidth=0.2)
meridians = np.arange(10.,351.,10.)
map.drawmeridians(meridians,labels=[True,False,False,True],labelstyle='+/-',linewidth=0.2)
plt.title(r'\textbf{Object %s trajectory during the interval %s}' %(norad_id,title_string), fontsize=12)
plt.legend(loc='upper right',title=r"\textbf{Transit through Tx FoR}");
ax.get_legend().get_title().set_fontsize('11')
fig.savefig('main_057_iss_14_map2.pdf',bbox_inches='tight',pad_inches=0.05,dpi=10) | mit |
Befera/portfolio | node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py | 1407 | 47697 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
| mit |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/smtpd.py | 174 | 18543 | #! /usr/bin/env python
"""An RFC 2821 smtp proxy.
Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
Options:
--nosetuid
-n
This program generally tries to setuid `nobody', unless this flag is
set. The setuid call will fail if this program is not run as root (in
which case, use this flag).
--version
-V
Print the version number and exit.
--class classname
-c classname
Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
default.
--debug
-d
Turn on debugging prints.
--help
-h
Print this message and exit.
Version: %(__version__)s
If localhost is not given then `localhost' is used, and if localport is not
given then 8025 is used. If remotehost is not given then `localhost' is used,
and if remoteport is not given, then 25 is used.
"""
# Overview:
#
# This file implements the minimal SMTP protocol as defined in RFC 821. It
# has a hierarchy of classes which implement the backend functionality for the
# smtpd. A number of classes are provided:
#
# SMTPServer - the base class for the backend. Raises NotImplementedError
# if you try to use it.
#
# DebuggingServer - simply prints each message it receives on stdout.
#
# PureProxy - Proxies all messages to a real smtpd which does final
# delivery. One known problem with this class is that it doesn't handle
# SMTP errors from the backend server at all. This should be fixed
# (contributions are welcome!).
#
# MailmanProxy - An experimental hack to work with GNU Mailman
# <www.list.org>. Using this server as your real incoming smtpd, your
# mailhost will automatically recognize and accept mail destined to Mailman
# lists when those lists are created. Every message not destined for a list
# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
# are not handled correctly yet.
#
# Please note that this script requires Python 2.0
#
# Author: Barry Warsaw <[email protected]>
#
# TODO:
#
# - support mailbox delivery
# - alias files
# - ESMTP
# - handle error codes from the backend smtpd
import sys
import os
import errno
import getopt
import time
import socket
import asyncore
import asynchat
__all__ = ["SMTPServer","DebuggingServer","PureProxy","MailmanProxy"]
program = sys.argv[0]
__version__ = 'Python SMTP proxy version 0.2'
class Devnull:
def write(self, msg): pass
def flush(self): pass
DEBUGSTREAM = Devnull()
NEWLINE = '\n'
EMPTYSTRING = ''
COMMASPACE = ', '
def usage(code, msg=''):
print >> sys.stderr, __doc__ % globals()
if msg:
print >> sys.stderr, msg
sys.exit(code)
class SMTPChannel(asynchat.async_chat):
COMMAND = 0
DATA = 1
def __init__(self, server, conn, addr):
asynchat.async_chat.__init__(self, conn)
self.__server = server
self.__conn = conn
self.__addr = addr
self.__line = []
self.__state = self.COMMAND
self.__greeting = 0
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__fqdn = socket.getfqdn()
try:
self.__peer = conn.getpeername()
except socket.error, err:
# a race condition may occur if the other end is closing
# before we can get the peername
self.close()
if err[0] != errno.ENOTCONN:
raise
return
print >> DEBUGSTREAM, 'Peer:', repr(self.__peer)
self.push('220 %s %s' % (self.__fqdn, __version__))
self.set_terminator('\r\n')
# Overrides base class for convenience
def push(self, msg):
asynchat.async_chat.push(self, msg + '\r\n')
# Implementation of base class abstract method
def collect_incoming_data(self, data):
self.__line.append(data)
# Implementation of base class abstract method
def found_terminator(self):
line = EMPTYSTRING.join(self.__line)
print >> DEBUGSTREAM, 'Data:', repr(line)
self.__line = []
if self.__state == self.COMMAND:
if not line:
self.push('500 Error: bad syntax')
return
method = None
i = line.find(' ')
if i < 0:
command = line.upper()
arg = None
else:
command = line[:i].upper()
arg = line[i+1:].strip()
method = getattr(self, 'smtp_' + command, None)
if not method:
self.push('502 Error: command "%s" not implemented' % command)
return
method(arg)
return
else:
if self.__state != self.DATA:
self.push('451 Internal confusion')
return
# Remove extraneous carriage returns and de-transparency according
# to RFC 821, Section 4.5.2.
data = []
for text in line.split('\r\n'):
if text and text[0] == '.':
data.append(text[1:])
else:
data.append(text)
self.__data = NEWLINE.join(data)
status = self.__server.process_message(self.__peer,
self.__mailfrom,
self.__rcpttos,
self.__data)
self.__rcpttos = []
self.__mailfrom = None
self.__state = self.COMMAND
self.set_terminator('\r\n')
if not status:
self.push('250 Ok')
else:
self.push(status)
# SMTP and ESMTP commands
def smtp_HELO(self, arg):
if not arg:
self.push('501 Syntax: HELO hostname')
return
if self.__greeting:
self.push('503 Duplicate HELO/EHLO')
else:
self.__greeting = arg
self.push('250 %s' % self.__fqdn)
def smtp_NOOP(self, arg):
if arg:
self.push('501 Syntax: NOOP')
else:
self.push('250 Ok')
def smtp_QUIT(self, arg):
# args is ignored
self.push('221 Bye')
self.close_when_done()
# factored
def __getaddr(self, keyword, arg):
address = None
keylen = len(keyword)
if arg[:keylen].upper() == keyword:
address = arg[keylen:].strip()
if not address:
pass
elif address[0] == '<' and address[-1] == '>' and address != '<>':
# Addresses can be in the form <[email protected]> but watch out
# for null address, e.g. <>
address = address[1:-1]
return address
def smtp_MAIL(self, arg):
print >> DEBUGSTREAM, '===> MAIL', arg
address = self.__getaddr('FROM:', arg) if arg else None
if not address:
self.push('501 Syntax: MAIL FROM:<address>')
return
if self.__mailfrom:
self.push('503 Error: nested MAIL command')
return
self.__mailfrom = address
print >> DEBUGSTREAM, 'sender:', self.__mailfrom
self.push('250 Ok')
def smtp_RCPT(self, arg):
print >> DEBUGSTREAM, '===> RCPT', arg
if not self.__mailfrom:
self.push('503 Error: need MAIL command')
return
address = self.__getaddr('TO:', arg) if arg else None
if not address:
self.push('501 Syntax: RCPT TO: <address>')
return
self.__rcpttos.append(address)
print >> DEBUGSTREAM, 'recips:', self.__rcpttos
self.push('250 Ok')
def smtp_RSET(self, arg):
if arg:
self.push('501 Syntax: RSET')
return
# Resets the sender, recipients, and data, but not the greeting
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__state = self.COMMAND
self.push('250 Ok')
def smtp_DATA(self, arg):
if not self.__rcpttos:
self.push('503 Error: need RCPT command')
return
if arg:
self.push('501 Syntax: DATA')
return
self.__state = self.DATA
self.set_terminator('\r\n.\r\n')
self.push('354 End data with <CR><LF>.<CR><LF>')
class SMTPServer(asyncore.dispatcher):
def __init__(self, localaddr, remoteaddr):
self._localaddr = localaddr
self._remoteaddr = remoteaddr
asyncore.dispatcher.__init__(self)
try:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(localaddr)
self.listen(5)
except:
# cleanup asyncore.socket_map before raising
self.close()
raise
else:
print >> DEBUGSTREAM, \
'%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
self.__class__.__name__, time.ctime(time.time()),
localaddr, remoteaddr)
def handle_accept(self):
pair = self.accept()
if pair is not None:
conn, addr = pair
print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
channel = SMTPChannel(self, conn, addr)
# API for "doing something useful with the message"
def process_message(self, peer, mailfrom, rcpttos, data):
"""Override this abstract method to handle messages from the client.
peer is a tuple containing (ipaddr, port) of the client that made the
socket connection to our smtp port.
mailfrom is the raw address the client claims the message is coming
from.
rcpttos is a list of raw addresses the client wishes to deliver the
message to.
data is a string containing the entire full text of the message,
headers (if supplied) and all. It has been `de-transparencied'
according to RFC 821, Section 4.5.2. In other words, a line
containing a `.' followed by other text has had the leading dot
removed.
This function should return None, for a normal `250 Ok' response;
otherwise it returns the desired response string in RFC 821 format.
"""
raise NotImplementedError
class DebuggingServer(SMTPServer):
# Do something with the gathered message
def process_message(self, peer, mailfrom, rcpttos, data):
inheaders = 1
lines = data.split('\n')
print '---------- MESSAGE FOLLOWS ----------'
for line in lines:
# headers first
if inheaders and not line:
print 'X-Peer:', peer[0]
inheaders = 0
print line
print '------------ END MESSAGE ------------'
class PureProxy(SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
lines = data.split('\n')
# Look for the last header
i = 0
for line in lines:
if not line:
break
i += 1
lines.insert(i, 'X-Peer: %s' % peer[0])
data = NEWLINE.join(lines)
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got some refusals:', refused
def _deliver(self, mailfrom, rcpttos, data):
import smtplib
refused = {}
try:
s = smtplib.SMTP()
s.connect(self._remoteaddr[0], self._remoteaddr[1])
try:
refused = s.sendmail(mailfrom, rcpttos, data)
finally:
s.quit()
except smtplib.SMTPRecipientsRefused, e:
print >> DEBUGSTREAM, 'got SMTPRecipientsRefused'
refused = e.recipients
except (socket.error, smtplib.SMTPException), e:
print >> DEBUGSTREAM, 'got', e.__class__
# All recipients were refused. If the exception had an associated
# error code, use it. Otherwise,fake it with a non-triggering
# exception code.
errcode = getattr(e, 'smtp_code', -1)
errmsg = getattr(e, 'smtp_error', 'ignore')
for r in rcpttos:
refused[r] = (errcode, errmsg)
return refused
class MailmanProxy(PureProxy):
def process_message(self, peer, mailfrom, rcpttos, data):
from cStringIO import StringIO
from Mailman import Utils
from Mailman import Message
from Mailman import MailList
# If the message is to a Mailman mailing list, then we'll invoke the
# Mailman script directly, without going through the real smtpd.
# Otherwise we'll forward it to the local proxy for disposition.
listnames = []
for rcpt in rcpttos:
local = rcpt.lower().split('@')[0]
# We allow the following variations on the theme
# listname
# listname-admin
# listname-owner
# listname-request
# listname-join
# listname-leave
parts = local.split('-')
if len(parts) > 2:
continue
listname = parts[0]
if len(parts) == 2:
command = parts[1]
else:
command = ''
if not Utils.list_exists(listname) or command not in (
'', 'admin', 'owner', 'request', 'join', 'leave'):
continue
listnames.append((rcpt, listname, command))
# Remove all list recipients from rcpttos and forward what we're not
# going to take care of ourselves. Linear removal should be fine
# since we don't expect a large number of recipients.
for rcpt, listname, command in listnames:
rcpttos.remove(rcpt)
# If there's any non-list destined recipients left,
print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
if rcpttos:
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got refusals:', refused
# Now deliver directly to the list commands
mlists = {}
s = StringIO(data)
msg = Message.Message(s)
# These headers are required for the proper execution of Mailman. All
# MTAs in existence seem to add these if the original message doesn't
# have them.
if not msg.getheader('from'):
msg['From'] = mailfrom
if not msg.getheader('date'):
msg['Date'] = time.ctime(time.time())
for rcpt, listname, command in listnames:
print >> DEBUGSTREAM, 'sending message to', rcpt
mlist = mlists.get(listname)
if not mlist:
mlist = MailList.MailList(listname, lock=0)
mlists[listname] = mlist
# dispatch on the type of command
if command == '':
# post
msg.Enqueue(mlist, tolist=1)
elif command == 'admin':
msg.Enqueue(mlist, toadmin=1)
elif command == 'owner':
msg.Enqueue(mlist, toowner=1)
elif command == 'request':
msg.Enqueue(mlist, torequest=1)
elif command in ('join', 'leave'):
# TBD: this is a hack!
if command == 'join':
msg['Subject'] = 'subscribe'
else:
msg['Subject'] = 'unsubscribe'
msg.Enqueue(mlist, torequest=1)
class Options:
setuid = 1
classname = 'PureProxy'
def parseargs():
global DEBUGSTREAM
try:
opts, args = getopt.getopt(
sys.argv[1:], 'nVhc:d',
['class=', 'nosetuid', 'version', 'help', 'debug'])
except getopt.error, e:
usage(1, e)
options = Options()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print >> sys.stderr, __version__
sys.exit(0)
elif opt in ('-n', '--nosetuid'):
options.setuid = 0
elif opt in ('-c', '--class'):
options.classname = arg
elif opt in ('-d', '--debug'):
DEBUGSTREAM = sys.stderr
# parse the rest of the arguments
if len(args) < 1:
localspec = 'localhost:8025'
remotespec = 'localhost:25'
elif len(args) < 2:
localspec = args[0]
remotespec = 'localhost:25'
elif len(args) < 3:
localspec = args[0]
remotespec = args[1]
else:
usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
# split into host/port pairs
i = localspec.find(':')
if i < 0:
usage(1, 'Bad local spec: %s' % localspec)
options.localhost = localspec[:i]
try:
options.localport = int(localspec[i+1:])
except ValueError:
usage(1, 'Bad local port: %s' % localspec)
i = remotespec.find(':')
if i < 0:
usage(1, 'Bad remote spec: %s' % remotespec)
options.remotehost = remotespec[:i]
try:
options.remoteport = int(remotespec[i+1:])
except ValueError:
usage(1, 'Bad remote port: %s' % remotespec)
return options
if __name__ == '__main__':
options = parseargs()
# Become nobody
classname = options.classname
if "." in classname:
lastdot = classname.rfind(".")
mod = __import__(classname[:lastdot], globals(), locals(), [""])
classname = classname[lastdot+1:]
else:
import __main__ as mod
class_ = getattr(mod, classname)
proxy = class_((options.localhost, options.localport),
(options.remotehost, options.remoteport))
if options.setuid:
try:
import pwd
except ImportError:
print >> sys.stderr, \
'Cannot import module "pwd"; try running with -n option.'
sys.exit(1)
nobody = pwd.getpwnam('nobody')[2]
try:
os.setuid(nobody)
except OSError, e:
if e.errno != errno.EPERM: raise
print >> sys.stderr, \
'Cannot setuid "nobody"; try running with -n option.'
sys.exit(1)
try:
asyncore.loop()
except KeyboardInterrupt:
pass
| gpl-2.0 |
eusi/MissionPlanerHM | Lib/lib2to3/fixes/fix_urllib.py | 325 | 8385 | """Fix changes imports of urllib which are now incompatible.
This is rather similar to fix_imports, but because of the more
complex nature of the fixing for urllib, it has its own fixer.
"""
# Author: Nick Edds
# Local imports
from lib2to3.fixes.fix_imports import alternates, FixImports
from lib2to3 import fixer_base
from lib2to3.fixer_util import (Name, Comma, FromImport, Newline,
find_indentation, Node, syms)
MAPPING = {"urllib": [
("urllib.request",
["URLopener", "FancyURLopener", "urlretrieve",
"_urlopener", "urlopen", "urlcleanup",
"pathname2url", "url2pathname"]),
("urllib.parse",
["quote", "quote_plus", "unquote", "unquote_plus",
"urlencode", "splitattr", "splithost", "splitnport",
"splitpasswd", "splitport", "splitquery", "splittag",
"splittype", "splituser", "splitvalue", ]),
("urllib.error",
["ContentTooShortError"])],
"urllib2" : [
("urllib.request",
["urlopen", "install_opener", "build_opener",
"Request", "OpenerDirector", "BaseHandler",
"HTTPDefaultErrorHandler", "HTTPRedirectHandler",
"HTTPCookieProcessor", "ProxyHandler",
"HTTPPasswordMgr",
"HTTPPasswordMgrWithDefaultRealm",
"AbstractBasicAuthHandler",
"HTTPBasicAuthHandler", "ProxyBasicAuthHandler",
"AbstractDigestAuthHandler",
"HTTPDigestAuthHandler", "ProxyDigestAuthHandler",
"HTTPHandler", "HTTPSHandler", "FileHandler",
"FTPHandler", "CacheFTPHandler",
"UnknownHandler"]),
("urllib.error",
["URLError", "HTTPError"]),
]
}
# Duplicate the url parsing functions for urllib2.
MAPPING["urllib2"].append(MAPPING["urllib"][1])
def build_pattern():
bare = set()
for old_module, changes in MAPPING.items():
for change in changes:
new_module, members = change
members = alternates(members)
yield """import_name< 'import' (module=%r
| dotted_as_names< any* module=%r any* >) >
""" % (old_module, old_module)
yield """import_from< 'from' mod_member=%r 'import'
( member=%s | import_as_name< member=%s 'as' any > |
import_as_names< members=any* >) >
""" % (old_module, members, members)
yield """import_from< 'from' module_star=%r 'import' star='*' >
""" % old_module
yield """import_name< 'import'
dotted_as_name< module_as=%r 'as' any > >
""" % old_module
# bare_with_attr has a special significance for FixImports.match().
yield """power< bare_with_attr=%r trailer< '.' member=%s > any* >
""" % (old_module, members)
class FixUrllib(FixImports):
def build_pattern(self):
return "|".join(build_pattern())
def transform_import(self, node, results):
"""Transform for the basic import case. Replaces the old
import name with a comma separated list of its
replacements.
"""
import_mod = results.get("module")
pref = import_mod.prefix
names = []
# create a Node list of the replacement modules
for name in MAPPING[import_mod.value][:-1]:
names.extend([Name(name[0], prefix=pref), Comma()])
names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref))
import_mod.replace(names)
def transform_member(self, node, results):
"""Transform for imports of specific module elements. Replaces
the module to be imported from with the appropriate new
module.
"""
mod_member = results.get("mod_member")
pref = mod_member.prefix
member = results.get("member")
# Simple case with only a single member being imported
if member:
# this may be a list of length one, or just a node
if isinstance(member, list):
member = member[0]
new_name = None
for change in MAPPING[mod_member.value]:
if member.value in change[1]:
new_name = change[0]
break
if new_name:
mod_member.replace(Name(new_name, prefix=pref))
else:
self.cannot_convert(node, "This is an invalid module element")
# Multiple members being imported
else:
# a dictionary for replacements, order matters
modules = []
mod_dict = {}
members = results["members"]
for member in members:
# we only care about the actual members
if member.type == syms.import_as_name:
as_name = member.children[2].value
member_name = member.children[0].value
else:
member_name = member.value
as_name = None
if member_name != u",":
for change in MAPPING[mod_member.value]:
if member_name in change[1]:
if change[0] not in mod_dict:
modules.append(change[0])
mod_dict.setdefault(change[0], []).append(member)
new_nodes = []
indentation = find_indentation(node)
first = True
def handle_name(name, prefix):
if name.type == syms.import_as_name:
kids = [Name(name.children[0].value, prefix=prefix),
name.children[1].clone(),
name.children[2].clone()]
return [Node(syms.import_as_name, kids)]
return [Name(name.value, prefix=prefix)]
for module in modules:
elts = mod_dict[module]
names = []
for elt in elts[:-1]:
names.extend(handle_name(elt, pref))
names.append(Comma())
names.extend(handle_name(elts[-1], pref))
new = FromImport(module, names)
if not first or node.parent.prefix.endswith(indentation):
new.prefix = indentation
new_nodes.append(new)
first = False
if new_nodes:
nodes = []
for new_node in new_nodes[:-1]:
nodes.extend([new_node, Newline()])
nodes.append(new_nodes[-1])
node.replace(nodes)
else:
self.cannot_convert(node, "All module elements are invalid")
def transform_dot(self, node, results):
"""Transform for calls to module members in code."""
module_dot = results.get("bare_with_attr")
member = results.get("member")
new_name = None
if isinstance(member, list):
member = member[0]
for change in MAPPING[module_dot.value]:
if member.value in change[1]:
new_name = change[0]
break
if new_name:
module_dot.replace(Name(new_name,
prefix=module_dot.prefix))
else:
self.cannot_convert(node, "This is an invalid module element")
def transform(self, node, results):
if results.get("module"):
self.transform_import(node, results)
elif results.get("mod_member"):
self.transform_member(node, results)
elif results.get("bare_with_attr"):
self.transform_dot(node, results)
# Renaming and star imports are not supported for these modules.
elif results.get("module_star"):
self.cannot_convert(node, "Cannot handle star imports.")
elif results.get("module_as"):
self.cannot_convert(node, "This module is now multiple modules")
| gpl-3.0 |
vedujoshi/os_tempest | tempest/api/object_storage/test_account_services_negative.py | 4 | 1981 | # Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Author: Joe H. Rahme <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.object_storage import base
from tempest import clients
from tempest import exceptions
from tempest import test
class AccountNegativeTest(base.BaseObjectTest):
@test.attr(type=['negative', 'gate'])
def test_list_containers_with_non_authorized_user(self):
# list containers using non-authorized user
# create user
self.data.setup_test_user()
test_os = clients.Manager(self.data.test_credentials)
test_auth_provider = test_os.auth_provider
# Get auth for the test user
test_auth_provider.auth_data
# Get fresh auth for test user and set it to next auth request for
# custom_account_client
delattr(test_auth_provider, 'auth_data')
test_auth_new_data = test_auth_provider.auth_data
self.custom_account_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=test_auth_new_data
)
params = {'format': 'json'}
# list containers with non-authorized user token
self.assertRaises(exceptions.Unauthorized,
self.custom_account_client.list_account_containers,
params=params)
# delete the user which was created
self.data.teardown_all()
| apache-2.0 |
yangw1234/BigDL | pyspark/test/bigdl/test_simple_integration.py | 2 | 28580 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.nn.layer import *
from bigdl.nn.initialization_method import *
from bigdl.nn.criterion import *
from bigdl.optim.optimizer import *
from bigdl.util.common import *
from bigdl.util.common import _py2java
from bigdl.nn.initialization_method import *
from bigdl.dataset import movielens
import numpy as np
import tempfile
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from bigdl.util.engine import compare_version
from bigdl.transform.vision.image import *
from bigdl.models.utils.model_broadcast import broadcast_model
from bigdl.dataset.dataset import *
np.random.seed(1337) # for reproducibility
class TestSimple():
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
sparkConf = create_spark_conf().setMaster("local[4]").setAppName("test model")
self.sc = get_spark_context(sparkConf)
init_engine()
def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method
call.
"""
self.sc.stop()
def test_training(self):
cadd = CAdd([5, 1])
y = np.ones([5, 4])
bf = np.ones([5, 4])
for i in range(y.shape[0]):
bf[i] = i + 1
def grad_update(mlp, x, y, criterion, learning_rate):
pred = mlp.forward(x)
err = criterion.forward(pred, y)
grad_criterion = criterion.backward(pred, y)
mlp.zero_grad_parameters()
mlp.backward(x, grad_criterion)
mlp.update_parameters(learning_rate)
return err
mse = MSECriterion(self)
for i in range(0, 1000):
x = np.random.random((5, 4))
y = x.copy()
y = y + bf
err = grad_update(cadd, x, y, mse, 0.01)
print(cadd.get_weights()[0])
assert_allclose(cadd.get_weights()[0],
np.array([1, 2, 3, 4, 5]).reshape((5, 1)),
rtol=1.e-1)
def test_load_keras_model_of(self):
from bigdl.nn.keras.topology import Model as KModel
from bigdl.nn.keras.layer import Input as KInput
from bigdl.nn.keras.layer import Dense
input = KInput(shape=[2, 3])
fc1 = Dense(2)(input)
model = KModel(input, fc1)
tmp_path = tempfile.mktemp()
model.save(tmp_path, True)
model_loaded = KModel.load(tmp_path)
assert "bigdl.nn.keras.topology.Model" in str(type(model_loaded))
assert len(model_loaded.layers) == 2
def test_load_keras_seq_of(self):
from bigdl.nn.keras.topology import Sequential as KSequential
from bigdl.nn.keras.layer import Dense
model = KSequential()
fc1 = Dense(2, input_shape=[2, 3])
model.add(fc1)
tmp_path = tempfile.mktemp()
model.save(tmp_path, True)
model_loaded = KSequential.load(tmp_path)
assert "bigdl.nn.keras.topology.Sequential" in str(type(model_loaded))
assert len(model_loaded.layers) == 1
def test_load_model_of(self):
input = Input()
fc1 = Linear(4, 2)(input)
model = Model(input, fc1)
tmp_path = tempfile.mktemp()
model.save(tmp_path, True)
model_loaded = Model.load(tmp_path)
assert "Model" in str(type(model_loaded))
assert len(model_loaded.layers) == 2
def test_load_sequential_of(self):
fc1 = Linear(4, 2)
model = Sequential()
model.add(fc1)
tmp_path = tempfile.mktemp()
model.save(tmp_path, True)
model_loaded = Model.load(tmp_path)
assert "Sequential" in str(type(model_loaded))
assert len(model_loaded.layers) == 1
def test_load_model(self):
fc1 = Linear(4, 2)
fc1.set_weights([np.ones((4, 2)), np.ones((2,))])
tmp_path = tempfile.mktemp()
fc1.save(tmp_path, True)
fc1_loaded = Model.load(tmp_path)
assert_allclose(fc1_loaded.get_weights()[0],
fc1.get_weights()[0])
def test_load_model_proto(self):
fc1 = Linear(4, 2)
fc1.set_weights([np.ones((4, 2)), np.ones((2,))])
tmp_path = tempfile.mktemp()
fc1.saveModel(tmp_path, None, True)
fc1_loaded = Model.loadModel(tmp_path)
assert_allclose(fc1_loaded.get_weights()[0],
fc1.get_weights()[0])
def test_load_optim_method(self):
FEATURES_DIM = 2
data_len = 100
batch_size = 32
epoch_num = 5
def gen_rand_sample():
features = np.random.uniform(0, 1, (FEATURES_DIM))
label = (2 * features).sum() + 0.4
return Sample.from_ndarray(features, label)
trainingData = self.sc.parallelize(range(0, data_len)).map(lambda i: gen_rand_sample())
model = Sequential()
l1 = Linear(FEATURES_DIM, 1).set_init_method(Xavier(), Zeros()).set_name("linear1")
model.add(l1)
sgd = SGD(learningrate=0.01, learningrate_decay=0.0002, weightdecay=0.0,
momentum=0.0, dampening=0.0, nesterov=False,
leaningrate_schedule=Poly(0.5, int((data_len / batch_size) * epoch_num)))
tmp_path = tempfile.mktemp()
sgd.save(tmp_path, True)
optim_method = OptimMethod.load(tmp_path)
assert optim_method.learningRate() == sgd.value.learningRate()
assert optim_method.momentum() == sgd.value.momentum()
assert optim_method.nesterov() == sgd.value.nesterov()
optimizer = Optimizer(
model=model,
training_rdd=trainingData,
criterion=MSECriterion(),
optim_method=optim_method,
end_trigger=MaxEpoch(epoch_num),
batch_size=batch_size)
optimizer.optimize()
def test_create_node(self):
import numpy as np
fc1 = Linear(4, 2)()
fc2 = Linear(4, 2)()
cadd = CAddTable()([fc1, fc2])
output1 = ReLU()(cadd)
model = Model([fc1, fc2], [output1])
fc1.element().set_weights([np.ones((4, 2)), np.ones((2,))])
fc2.element().set_weights([np.ones((4, 2)), np.ones((2,))])
output = model.forward([np.array([0.1, 0.2, -0.3, -0.4]),
np.array([0.5, 0.4, -0.2, -0.1])])
assert_allclose(output,
np.array([2.2, 2.2]))
def test_graph_backward(self):
fc1 = Linear(4, 2)()
fc2 = Linear(4, 2)()
cadd = CAddTable()([fc1, fc2])
output1 = ReLU()(cadd)
output2 = Threshold(10.0)(cadd)
model = Model([fc1, fc2], [output1, output2])
fc1.element().set_weights([np.ones((4, 2)), np.ones((2,))])
fc2.element().set_weights([np.ones((4, 2)) * 2, np.ones((2,)) * 2])
output = model.forward([np.array([0.1, 0.2, -0.3, -0.4]),
np.array([0.5, 0.4, -0.2, -0.1])])
gradInput = model.backward([np.array([0.1, 0.2, -0.3, -0.4]),
np.array([0.5, 0.4, -0.2, -0.1])],
[np.array([1.0, 2.0]),
np.array([3.0, 4.0])])
assert_allclose(gradInput[0],
np.array([3.0, 3.0, 3.0, 3.0]))
assert_allclose(gradInput[1],
np.array([6.0, 6.0, 6.0, 6.0]))
def test_get_node(self):
fc1 = Linear(4, 2)()
fc2 = Linear(4, 2)()
fc1.element().set_name("fc1")
cadd = CAddTable()([fc1, fc2])
output1 = ReLU()(cadd)
model = Model([fc1, fc2], [output1])
res = model.node("fc1")
assert res.element().name() == "fc1"
def test_save_graph_topology(self):
fc1 = Linear(4, 2)()
fc2 = Linear(4, 2)()
cadd = CAddTable()([fc1, fc2])
output1 = ReLU()(cadd)
output2 = Threshold(10.0)(cadd)
model = Model([fc1, fc2], [output1, output2])
model.save_graph_topology(tempfile.mkdtemp())
def test_graph_preprocessor(self):
fc1 = Linear(4, 2)()
fc2 = Linear(4, 2)()
cadd = CAddTable()([fc1, fc2])
preprocessor = Model([fc1, fc2], [cadd])
relu = ReLU()()
fc3 = Linear(2, 1)(relu)
trainable = Model([relu], [fc3])
model = Model(preprocessor, trainable)
model.forward([np.array([0.1, 0.2, -0.3, -0.4]), np.array([0.5, 0.4, -0.2, -0.1])])
model.backward([np.array([0.1, 0.2, -0.3, -0.4]), np.array([0.5, 0.4, -0.2, -0.1])],
np.array([1.0]))
def test_load_zip_conf(self):
from bigdl.util.common import get_bigdl_conf
import sys
sys_path_back = sys.path
sys.path = [path for path in sys.path if "spark-bigdl.conf" not in path]
sys.path.insert(0, os.path.join(os.path.split(__file__)[0],
"resources/conf/python-api.zip")) # noqa
sys.path.insert(0, os.path.join(os.path.split(__file__)[0],
"resources/conf/invalid-python-api.zip")) # noqa
result = get_bigdl_conf()
assert result.get("spark.executorEnv.OMP_WAIT_POLICY"), "passive"
sys.path = sys_path_back
def test_set_seed(self):
w_init = Xavier()
b_init = Zeros()
l1 = Linear(10, 20).set_init_method(w_init, b_init).set_name("linear1").set_seed(
1234).reset() # noqa
l2 = Linear(10, 20).set_init_method(w_init, b_init).set_name("linear2").set_seed(
1234).reset() # noqa
p1 = l1.parameters()
p2 = l2.parameters()
assert (p1["linear1"]["weight"] == p2["linear2"]["weight"]).all() # noqa
def test_simple_flow(self):
FEATURES_DIM = 2
data_len = 100
batch_size = 32
epoch_num = 5
def gen_rand_sample():
features = np.random.uniform(0, 1, (FEATURES_DIM))
label = np.array((2 * features).sum() + 0.4)
return Sample.from_ndarray(features, label)
trainingData = self.sc.parallelize(range(0, data_len)).map(
lambda i: gen_rand_sample())
model_test = Sequential()
l1_test = Linear(FEATURES_DIM, 1).set_init_method(Xavier(), Zeros()) \
.set_name("linear1_test")
assert "linear1_test" == l1_test.name()
model_test.add(l1_test)
model_test.add(Sigmoid())
model = Sequential()
l1 = Linear(FEATURES_DIM, 1).set_init_method(Xavier(), Zeros()).set_name("linear1")
assert "linear1" == l1.name()
model.add(l1)
optim_method = SGD(learningrate=0.01, learningrate_decay=0.0002, weightdecay=0.0,
momentum=0.0, dampening=0.0, nesterov=False,
leaningrate_schedule=Poly(0.5, int((data_len / batch_size) * epoch_num)))
optimizer = Optimizer.create(
model=model_test,
training_set=trainingData,
criterion=MSECriterion(),
optim_method=optim_method,
end_trigger=MaxEpoch(epoch_num),
batch_size=batch_size)
optimizer.set_validation(
batch_size=batch_size,
val_rdd=trainingData,
trigger=EveryEpoch(),
val_method=[Top1Accuracy()]
)
optimizer.optimize()
optimizer.set_model(model=model)
tmp_dir = tempfile.mkdtemp()
optimizer.set_checkpoint(SeveralIteration(1), tmp_dir)
train_summary = TrainSummary(log_dir=tmp_dir,
app_name="run1")
train_summary.set_summary_trigger("LearningRate", SeveralIteration(1))
val_summary = ValidationSummary(log_dir=tmp_dir,
app_name="run1")
optimizer.set_train_summary(train_summary)
optimizer.set_val_summary(val_summary)
optimizer.set_end_when(MaxEpoch(epoch_num * 2))
trained_model = optimizer.optimize()
lr_result = train_summary.read_scalar("LearningRate")
top1_result = val_summary.read_scalar("Top1Accuracy")
# TODO: add result validation
parameters = trained_model.parameters()
assert parameters["linear1"] is not None
print("parameters %s" % parameters["linear1"])
predict_result = trained_model.predict(trainingData)
p = predict_result.take(2)
print("predict predict: \n")
for i in p:
print(str(i) + "\n")
print(len(p))
test_results = trained_model.evaluate(trainingData, 32, [Top1Accuracy()])
for test_result in test_results:
print(test_result)
def test_multiple_input(self):
"""
Test training data of samples with several tensors as feature
using a sequential model with multiple inputs.
"""
FEATURES_DIM = 2
data_len = 100
batch_size = 32
epoch_num = 5
def gen_rand_sample():
features1 = np.random.uniform(0, 1, (FEATURES_DIM))
features2 = np.random.uniform(0, 1, (FEATURES_DIM))
label = np.array((2 * (features1 + features2)).sum() + 0.4)
return Sample.from_ndarray([features1, features2], label)
trainingData = self.sc.parallelize(range(0, data_len)).map(
lambda i: gen_rand_sample())
model_test = Sequential()
branches = ParallelTable()
branch1 = Sequential().add(Linear(FEATURES_DIM, 1)).add(ReLU())
branch2 = Sequential().add(Linear(FEATURES_DIM, 1)).add(ReLU())
branches.add(branch1).add(branch2)
model_test.add(branches).add(CAddTable())
optim_method = SGD(learningrate=0.01, learningrate_decay=0.0002, weightdecay=0.0,
momentum=0.0, dampening=0.0, nesterov=False,
leaningrate_schedule=Poly(0.5, int((data_len / batch_size) * epoch_num)))
optimizer = Optimizer.create(
model=model_test,
training_set=trainingData,
criterion=MSECriterion(),
optim_method=optim_method,
end_trigger=MaxEpoch(epoch_num),
batch_size=batch_size)
optimizer.set_validation(
batch_size=batch_size,
val_rdd=trainingData,
trigger=EveryEpoch(),
val_method=[Top1Accuracy()]
)
optimizer.optimize()
def test_table_label(self):
"""
Test for table as label in Sample.
For test purpose only.
"""
def gen_rand_sample():
features1 = np.random.uniform(0, 1, 3)
features2 = np.random.uniform(0, 1, 3)
label = np.array((2 * (features1 + features2)).sum() + 0.4)
return Sample.from_ndarray([features1, features2], [label, label])
training_data = self.sc.parallelize(range(0, 50)).map(
lambda i: gen_rand_sample())
model_test = Sequential()
branches = ParallelTable()
branch1 = Sequential().add(Linear(3, 1)).add(Tanh())
branch2 = Sequential().add(Linear(3, 1)).add(ReLU())
branches.add(branch1).add(branch2)
model_test.add(branches)
optimizer = Optimizer.create(
model=model_test,
training_set=training_data,
criterion=MarginRankingCriterion(),
optim_method=SGD(),
end_trigger=MaxEpoch(5),
batch_size=32)
optimizer.optimize()
def test_forward_backward(self):
from bigdl.nn.layer import Linear
rng = RNG()
rng.set_seed(100)
linear = Linear(4, 5)
input = rng.uniform(0.0, 1.0, [4])
output = linear.forward(input)
assert_allclose(output,
np.array([0.41366524,
0.009532653,
-0.677581,
0.07945433,
-0.5742568]),
atol=1e-6, rtol=0)
mse = MSECriterion()
target = rng.uniform(0.0, 1.0, [5])
loss = mse.forward(output, target)
print("loss: " + str(loss))
grad_output = mse.backward(output, rng.uniform(0.0, 1.0, [5]))
l_grad_output = linear.backward(input, grad_output)
def test_forward_multiple(self):
from bigdl.nn.layer import Linear
rng = RNG()
rng.set_seed(100)
input = [rng.uniform(0.0, 0.1, [2]),
rng.uniform(0.0, 0.1, [2]) + 0.2]
grad_output = [rng.uniform(0.0, 0.1, [3]),
rng.uniform(0.0, 0.1, [3]) + 0.2]
linear1 = Linear(2, 3)
linear2 = Linear(2, 3)
module = ParallelTable()
module.add(linear1)
module.add(linear2)
module.forward(input)
module.backward(input, grad_output)
def test_init_method(self):
initializers = [
Zeros(),
Ones(),
ConstInitMethod(5),
RandomUniform(-1, 1),
RandomNormal(0, 1),
None
]
special_initializers = [
MsraFiller(False),
Xavier(),
RandomUniform(),
]
layers = [
SpatialConvolution(6, 12, 5, 5),
SpatialShareConvolution(1, 1, 1, 1),
LookupTable(1, 1, 1e-5, 1e-5, 1e-5, True),
Bilinear(1, 1, 1, True),
Cosine(2, 3),
SpatialFullConvolution(1, 1, 1, 1),
Add(1),
Linear(100, 10),
CMul([1, 2]),
Mul(),
PReLU(1),
Euclidean(1, 1, True),
SpatialDilatedConvolution(1, 1, 1, 1),
SpatialBatchNormalization(1),
BatchNormalization(1, 1e-5, 1e-5, True),
]
special_layers = [
SpatialConvolution(6, 12, 5, 5),
SpatialShareConvolution(1, 1, 1, 1),
Cosine(2, 3),
SpatialFullConvolution(1, 1, 1, 1),
Add(1),
Linear(100, 10),
CMul([1, 2]),
Mul(),
PReLU(1),
Euclidean(1, 1, True),
SpatialDilatedConvolution(1, 1, 1, 1),
SpatialBatchNormalization(1),
BatchNormalization(1, 1e-5, 1e-5, True),
]
for layer in layers:
for init1 in initializers:
for init2 in initializers:
layer.set_init_method(init1, init2)
for layer in special_layers:
for init1 in special_initializers:
for init2 in special_initializers:
layer.set_init_method(init1, init2)
SpatialFullConvolution(1, 1, 1, 1).set_init_method(BilinearFiller(), Zeros())
def test_predict(self):
np.random.seed(100)
total_length = 6
features = np.random.uniform(0, 1, (total_length, 2))
label = (features).sum() + 0.4
predict_data = self.sc.parallelize(range(0, total_length)).map(
lambda i: Sample.from_ndarray(features[i], label))
model = Linear(2, 1).set_init_method(Xavier(), Zeros()) \
.set_name("linear1").set_seed(1234).reset()
predict_result = model.predict(predict_data)
p = predict_result.take(6)
ground_label = np.array([[-0.47596836], [-0.37598032], [-0.00492062],
[-0.5906958], [-0.12307882], [-0.77907401]], dtype="float32")
for i in range(0, total_length):
assert_allclose(p[i], ground_label[i], atol=1e-6, rtol=0)
predict_result_with_batch = model.predict(features=predict_data,
batch_size=4)
p_with_batch = predict_result_with_batch.take(6)
for i in range(0, total_length):
assert_allclose(p_with_batch[i], ground_label[i], atol=1e-6, rtol=0)
predict_class = model.predict_class(predict_data)
predict_labels = predict_class.take(6)
for i in range(0, total_length):
assert predict_labels[i] == 1
def test_predict_image(self):
resource_path = os.path.join(os.path.split(__file__)[0], "resources")
image_path = os.path.join(resource_path, "pascal/000025.jpg")
image_frame = ImageFrame.read(image_path, self.sc)
transformer = Pipeline([Resize(256, 256), CenterCrop(224, 224),
ChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
MatToTensor(), ImageFrameToSample()])
image_frame.transform(transformer)
model = Sequential()
model.add(SpatialConvolution(3, 6, 5, 5))
model.add(Tanh())
image_frame = model.predict_image(image_frame)
predicts = image_frame.get_predict()
predicts.collect()
def test_predict_image_local(self):
resource_path = os.path.join(os.path.split(__file__)[0], "resources")
image_path = os.path.join(resource_path, "pascal/000025.jpg")
image_frame = ImageFrame.read(image_path)
transformer = Pipeline([Resize(256, 256), CenterCrop(224, 224),
ChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
MatToTensor(), ImageFrameToSample()])
image_frame.transform(transformer)
model = Sequential()
model.add(SpatialConvolution(3, 6, 5, 5))
model.add(Tanh())
image_frame = model.predict_image(image_frame)
predicts = image_frame.get_predict()
def test_rng(self):
rng = RNG()
rng.set_seed(100)
result = rng.uniform(0.1, 0.2, [2, 3])
ground_label = np.array([[0.15434049, 0.16711557, 0.12783694],
[0.14120464, 0.14245176, 0.15263824]])
assert result.shape == (2, 3)
data = result
for i in range(0, 2):
assert_allclose(data[i], ground_label[i], atol=1e-6, rtol=0)
rng.set_seed(100)
result2 = rng.uniform(0.1, 0.2, [2, 3])
data2 = result2
for i in range(0, 2):
assert_allclose(data[i], data2[i])
def test_save_jtensor_dict(self):
tensors = {}
tensors["tensor1"] = JTensor.from_ndarray(np.random.rand(3, 2))
tensors["tensor2"] = JTensor.from_ndarray(np.random.rand(3, 2))
# in old impl, this will throw an exception
_py2java(self.sc._gateway, tensors)
def test_compare_version(self):
assert compare_version("2.1.1", "2.2.0") == -1
assert compare_version("2.2.0", "1.6.2") == 1
assert compare_version("2.2.0", "2.2.0") == 0
assert compare_version("1.6.0", "2.1.0") == -1
assert compare_version("2.1.0", "2.1.1") == -1
assert compare_version("2.0.1", "1.5.2") == 1
def test_local_optimizer_predict(self):
feature_num = 2
data_len = 1000
batch_size = 32
epoch_num = 500
X_ = np.random.uniform(0, 1, (data_len, feature_num))
y_ = (2 * X_).sum(1) + 0.4
model = Sequential()
l1 = Linear(feature_num, 1)
model.add(l1)
localOptimizer = Optimizer.create(
model=model,
training_set=(X_, y_),
criterion=MSECriterion(),
optim_method=SGD(learningrate=1e-2),
end_trigger=MaxEpoch(epoch_num),
batch_size=batch_size)
trained_model = localOptimizer.optimize()
trained_model = model
w = trained_model.get_weights()
assert_allclose(w[0], np.array([2, 2]).reshape([1, 2]), rtol=1e-1)
assert_allclose(w[1], np.array([0.4]), rtol=1e-1)
predict_result = trained_model.predict_local(X_)
assert_allclose(y_, predict_result.reshape((data_len,)), rtol=1e-1)
def test_local_predict_class(self):
feature_num = 2
data_len = 3
X_ = np.random.uniform(-1, 1, (data_len, feature_num))
model = Sequential()
l1 = Linear(feature_num, 1)
model.add(l1)
model.add(Sigmoid())
model.set_seed(1234).reset()
predict_result = model.predict_class(X_)
assert_array_equal(predict_result, np.ones([3]))
def test_local_predict_multiple_input(self):
l1 = Linear(3, 2)()
l2 = Linear(3, 3)()
joinTable = JoinTable(dimension=1, n_input_dims=1)([l1, l2])
model = Model(inputs=[l1, l2], outputs=joinTable)
result = model.predict_local([np.ones([4, 3]), np.ones([4, 3])])
assert result.shape == (4, 5)
result2 = model.predict_class([np.ones([4, 3]), np.ones([4, 3])])
assert result2.shape == (4,)
result3 = model.predict_local([JTensor.from_ndarray(np.ones([4, 3])),
JTensor.from_ndarray(np.ones([4, 3]))])
assert result3.shape == (4, 5)
result4 = model.predict_class([JTensor.from_ndarray(np.ones([4, 3])),
JTensor.from_ndarray(np.ones([4, 3]))])
assert result4.shape == (4,)
result5 = model.predict_local([JTensor.from_ndarray(np.ones([4, 3])),
JTensor.from_ndarray(np.ones([4, 3]))], batch_size=2)
assert result5.shape == (4, 5)
def test_model_broadcast(self):
init_executor_gateway(self.sc)
model = Linear(3, 2)
broadcasted = broadcast_model(self.sc, model)
input_data = np.random.rand(3)
output = self.sc.parallelize([input_data], 1)\
.map(lambda x: broadcasted.value.forward(x)).first()
expected = model.forward(input_data)
assert_allclose(output, expected)
def test_train_DataSet(self):
batch_size = 8
epoch_num = 5
images = []
labels = []
for i in range(0, 8):
features = np.random.uniform(0, 1, (200, 200, 3))
label = np.array([2])
images.append(features)
labels.append(label)
image_frame = DistributedImageFrame(self.sc.parallelize(images),
self.sc.parallelize(labels))
transformer = Pipeline([BytesToMat(), Resize(256, 256), CenterCrop(224, 224),
ChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
MatToTensor(), ImageFrameToSample(target_keys=['label'])])
data_set = DataSet.image_frame(image_frame).transform(transformer)
model = Sequential()
model.add(SpatialConvolution(3, 1, 5, 5))
model.add(View([1 * 220 * 220]))
model.add(Linear(1 * 220 * 220, 20))
model.add(LogSoftMax())
optim_method = SGD(learningrate=0.01)
optimizer = Optimizer.create(
model=model,
training_set=data_set,
criterion=ClassNLLCriterion(),
optim_method=optim_method,
end_trigger=MaxEpoch(epoch_num),
batch_size=batch_size)
optimizer.set_validation(
batch_size=batch_size,
val_rdd=data_set,
trigger=EveryEpoch(),
val_method=[Top1Accuracy()]
)
trained_model = optimizer.optimize()
predict_result = trained_model.predict_image(image_frame.transform(transformer))
assert_allclose(predict_result.get_predict().count(), 8)
def test_get_node_and_core_num(self):
node, core = get_node_and_core_number()
assert node == 1
assert core == 4
def tes_read_image_frame(self):
init_engine()
resource_path = os.path.join(os.path.split(__file__)[0], "resources")
image_path = os.path.join(resource_path, "pascal/000025.jpg")
image_frame = ImageFrame.read(image_path, self.sc)
count = image_frame.get_image().count()
assert count == 1
if __name__ == "__main__":
pytest.main([__file__])
| apache-2.0 |
jallohm/django | tests/sites_tests/tests.py | 251 | 10760 | from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.contrib.sites import models
from django.contrib.sites.management import create_default_site
from django.contrib.sites.middleware import CurrentSiteMiddleware
from django.contrib.sites.models import Site, clear_site_cache
from django.contrib.sites.requests import RequestSite
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models.signals import post_migrate
from django.http import HttpRequest
from django.test import TestCase, modify_settings, override_settings
from django.test.utils import captured_stdout
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class SitesFrameworkTests(TestCase):
multi_db = True
def setUp(self):
self.site = Site(
id=settings.SITE_ID,
domain="example.com",
name="example.com",
)
self.site.save()
def tearDown(self):
Site.objects.clear_cache()
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertIsInstance(s, Site)
s.delete()
self.assertRaises(ObjectDoesNotExist, Site.objects.get_current)
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual("example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual("Example site", site.name)
def test_delete_all_sites_clears_cache(self):
# When all site objects are deleted the cache should also
# be cleared and get_current() should raise a DoesNotExist.
self.assertIsInstance(Site.objects.get_current(), Site)
Site.objects.all().delete()
self.assertRaises(Site.DoesNotExist, Site.objects.get_current)
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_current_site(self):
# Test that the correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertIsInstance(site, Site)
self.assertEqual(site.id, settings.SITE_ID)
# Test that an exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
self.assertRaises(ObjectDoesNotExist, get_current_site, request)
# A RequestSite is returned if the sites framework is not installed
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
site = get_current_site(request)
self.assertIsInstance(site, RequestSite)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com'])
def test_get_current_site_no_site_id(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
del settings.SITE_ID
site = get_current_site(request)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com', 'example.net'])
def test_get_current_site_no_site_id_and_handle_port_fallback(self):
request = HttpRequest()
s1 = self.site
s2 = Site.objects.create(domain='example.com:80', name='example.com:80')
# Host header without port
request.META = {'HTTP_HOST': 'example.com'}
site = get_current_site(request)
self.assertEqual(site, s1)
# Host header with port - match, no fallback without port
request.META = {'HTTP_HOST': 'example.com:80'}
site = get_current_site(request)
self.assertEqual(site, s2)
# Host header with port - no match, fallback without port
request.META = {'HTTP_HOST': 'example.com:81'}
site = get_current_site(request)
self.assertEqual(site, s1)
# Host header with non-matching domain
request.META = {'HTTP_HOST': 'example.net'}
self.assertRaises(ObjectDoesNotExist, get_current_site, request)
# Ensure domain for RequestSite always matches host header
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
request.META = {'HTTP_HOST': 'example.com'}
site = get_current_site(request)
self.assertEqual(site.name, 'example.com')
request.META = {'HTTP_HOST': 'example.com:80'}
site = get_current_site(request)
self.assertEqual(site.name, 'example.com:80')
def test_domain_name_with_whitespaces(self):
# Regression for #17320
# Domain names are not allowed contain whitespace characters
site = Site(name="test name", domain="test test")
self.assertRaises(ValidationError, site.full_clean)
site.domain = "test\ttest"
self.assertRaises(ValidationError, site.full_clean)
site.domain = "test\ntest"
self.assertRaises(ValidationError, site.full_clean)
def test_clear_site_cache(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
self.assertEqual(models.SITE_CACHE, {})
get_current_site(request)
expected_cache = {self.site.id: self.site}
self.assertEqual(models.SITE_CACHE, expected_cache)
with self.settings(SITE_ID=''):
get_current_site(request)
expected_cache.update({self.site.domain: self.site})
self.assertEqual(models.SITE_CACHE, expected_cache)
clear_site_cache(Site, instance=self.site, using='default')
self.assertEqual(models.SITE_CACHE, {})
@override_settings(SITE_ID='')
def test_clear_site_cache_domain(self):
site = Site.objects.create(name='example2.com', domain='example2.com')
request = HttpRequest()
request.META = {
"SERVER_NAME": "example2.com",
"SERVER_PORT": "80",
}
get_current_site(request) # prime the models.SITE_CACHE
expected_cache = {site.domain: site}
self.assertEqual(models.SITE_CACHE, expected_cache)
# Site exists in 'default' database so using='other' shouldn't clear.
clear_site_cache(Site, instance=site, using='other')
self.assertEqual(models.SITE_CACHE, expected_cache)
# using='default' should clear.
clear_site_cache(Site, instance=site, using='default')
self.assertEqual(models.SITE_CACHE, {})
def test_unique_domain(self):
site = Site(domain=self.site.domain)
msg = 'Site with this Domain name already exists.'
with self.assertRaisesMessage(ValidationError, msg):
site.validate_unique()
class JustOtherRouter(object):
def allow_migrate(self, db, app_label, **hints):
return db == 'other'
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class CreateDefaultSiteTests(TestCase):
multi_db = True
def setUp(self):
self.app_config = apps.get_app_config('sites')
# Delete the site created as part of the default migration process.
Site.objects.all().delete()
def test_basic(self):
"""
#15346, #15573 - create_default_site() creates an example site only if
none exist.
"""
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertIn("Creating example.com", stdout.getvalue())
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertEqual("", stdout.getvalue())
@override_settings(DATABASE_ROUTERS=[JustOtherRouter()])
def test_multi_db_with_router(self):
"""
#16353, #16828 - The default site creation should respect db routing.
"""
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertFalse(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_multi_db(self):
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertTrue(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_save_another(self):
"""
#17415 - Another site can be created right after the default one.
On some backends the sequence needs to be reset after saving with an
explicit ID. Test that there isn't a sequence collisions by saving
another site. This test is only meaningful with databases that use
sequences for automatic primary keys such as PostgreSQL and Oracle.
"""
create_default_site(self.app_config, verbosity=0)
Site(domain='example2.com', name='example2.com').save()
def test_signal(self):
"""
#23641 - Sending the ``post_migrate`` signal triggers creation of the
default site.
"""
post_migrate.send(sender=self.app_config, app_config=self.app_config, verbosity=0)
self.assertTrue(Site.objects.exists())
@override_settings(SITE_ID=35696)
def test_custom_site_id(self):
"""
#23945 - The configured ``SITE_ID`` should be respected.
"""
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 35696)
@override_settings() # Restore original ``SITE_ID`` afterwards.
def test_no_site_id(self):
"""
#24488 - The pk should default to 1 if no ``SITE_ID`` is configured.
"""
del settings.SITE_ID
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 1)
class MiddlewareTest(TestCase):
def test_request(self):
""" Makes sure that the request has correct `site` attribute. """
middleware = CurrentSiteMiddleware()
request = HttpRequest()
middleware.process_request(request)
self.assertEqual(request.site.id, settings.SITE_ID)
| bsd-3-clause |
djudd/avro | lang/py3/avro/schema.py | 14 | 34817 | #!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representation of Avro schemas.
A schema may be one of:
- A record, mapping field names to field value data;
- An error, equivalent to a record;
- An enum, containing one of a small set of symbols;
- An array of values, all of the same schema;
- A map containing string/value pairs, each of a declared schema;
- A union of other schemas;
- A fixed sized binary object;
- A unicode string;
- A sequence of bytes;
- A 32-bit signed int;
- A 64-bit signed long;
- A 32-bit floating-point float;
- A 64-bit floating-point double;
- A boolean;
- Null.
"""
import abc
import collections
import json
import logging
import re
# ------------------------------------------------------------------------------
# Constants
# Log level more verbose than DEBUG=10, INFO=20, etc.
DEBUG_VERBOSE=5
NULL = 'null'
BOOLEAN = 'boolean'
STRING = 'string'
BYTES = 'bytes'
INT = 'int'
LONG = 'long'
FLOAT = 'float'
DOUBLE = 'double'
FIXED = 'fixed'
ENUM = 'enum'
RECORD = 'record'
ERROR = 'error'
ARRAY = 'array'
MAP = 'map'
UNION = 'union'
# Request and error unions are part of Avro protocols:
REQUEST = 'request'
ERROR_UNION = 'error_union'
PRIMITIVE_TYPES = frozenset([
NULL,
BOOLEAN,
STRING,
BYTES,
INT,
LONG,
FLOAT,
DOUBLE,
])
NAMED_TYPES = frozenset([
FIXED,
ENUM,
RECORD,
ERROR,
])
VALID_TYPES = frozenset.union(
PRIMITIVE_TYPES,
NAMED_TYPES,
[
ARRAY,
MAP,
UNION,
REQUEST,
ERROR_UNION,
],
)
SCHEMA_RESERVED_PROPS = frozenset([
'type',
'name',
'namespace',
'fields', # Record
'items', # Array
'size', # Fixed
'symbols', # Enum
'values', # Map
'doc',
])
FIELD_RESERVED_PROPS = frozenset([
'default',
'name',
'doc',
'order',
'type',
])
VALID_FIELD_SORT_ORDERS = frozenset([
'ascending',
'descending',
'ignore',
])
# ------------------------------------------------------------------------------
# Exceptions
class Error(Exception):
"""Base class for errors in this module."""
pass
class AvroException(Error):
"""Generic Avro schema error."""
pass
class SchemaParseException(AvroException):
"""Error while parsing a JSON schema descriptor."""
pass
# ------------------------------------------------------------------------------
class ImmutableDict(dict):
"""Dictionary guaranteed immutable.
All mutations raise an exception.
Behaves exactly as a dict otherwise.
"""
def __init__(self, items=None, **kwargs):
if items is not None:
super(ImmutableDict, self).__init__(items)
assert (len(kwargs) == 0)
else:
super(ImmutableDict, self).__init__(**kwargs)
def __setitem__(self, key, value):
raise Exception(
'Attempting to map key %r to value %r in ImmutableDict %r'
% (key, value, self))
def __delitem__(self, key):
raise Exception(
'Attempting to remove mapping for key %r in ImmutableDict %r'
% (key, self))
def clear(self):
raise Exception('Attempting to clear ImmutableDict %r' % self)
def update(self, items=None, **kwargs):
raise Exception(
'Attempting to update ImmutableDict %r with items=%r, kwargs=%r'
% (self, args, kwargs))
def pop(self, key, default=None):
raise Exception(
'Attempting to pop key %r from ImmutableDict %r' % (key, self))
def popitem(self):
raise Exception('Attempting to pop item from ImmutableDict %r' % self)
# ------------------------------------------------------------------------------
class Schema(object, metaclass=abc.ABCMeta):
"""Abstract base class for all Schema classes."""
def __init__(self, type, other_props=None):
"""Initializes a new schema object.
Args:
type: Type of the schema to initialize.
other_props: Optional dictionary of additional properties.
"""
if type not in VALID_TYPES:
raise SchemaParseException('%r is not a valid Avro type.' % type)
# All properties of this schema, as a map: property name -> property value
self._props = {}
self._props['type'] = type
self._type = type
if other_props:
self._props.update(other_props)
@property
def name(self):
"""Returns: the simple name of this schema."""
return self._props['name']
@property
def fullname(self):
"""Returns: the fully qualified name of this schema."""
# By default, the full name is the simple name.
# Named schemas override this behavior to include the namespace.
return self.name
@property
def namespace(self):
"""Returns: the namespace this schema belongs to, if any, or None."""
return self._props.get('namespace', None)
@property
def type(self):
"""Returns: the type of this schema."""
return self._type
@property
def doc(self):
"""Returns: the documentation associated to this schema, if any, or None."""
return self._props.get('doc', None)
@property
def props(self):
"""Reports all the properties of this schema.
Includes all properties, reserved and non reserved.
JSON properties of this schema are directly generated from this dict.
Returns:
A read-only dictionary of properties associated to this schema.
"""
return ImmutableDict(self._props)
@property
def other_props(self):
"""Returns: the dictionary of non-reserved properties."""
return dict(FilterKeysOut(items=self._props, keys=SCHEMA_RESERVED_PROPS))
def __str__(self):
"""Returns: the JSON representation of this schema."""
return json.dumps(self.to_json())
@abc.abstractmethod
def to_json(self, names):
"""Converts the schema object into its AVRO specification representation.
Schema types that have names (records, enums, and fixed) must
be aware of not re-defining schemas that are already listed
in the parameter names.
"""
raise Exception('Cannot run abstract method.')
# ------------------------------------------------------------------------------
_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*')
_RE_FULL_NAME = re.compile(
r'^'
r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace
r'([A-Za-z_][A-Za-z0-9_]*)' # name
r'$'
)
class Name(object):
"""Representation of an Avro name."""
def __init__(self, name, namespace=None):
"""Parses an Avro name.
Args:
name: Avro name to parse (relative or absolute).
namespace: Optional explicit namespace if the name is relative.
"""
# Normalize: namespace is always defined as a string, possibly empty.
if namespace is None: namespace = ''
if '.' in name:
# name is absolute, namespace is ignored:
self._fullname = name
match = _RE_FULL_NAME.match(self._fullname)
if match is None:
raise SchemaParseException(
'Invalid absolute schema name: %r.' % self._fullname)
self._name = match.group(1)
self._namespace = self._fullname[:-(len(self._name) + 1)]
else:
# name is relative, combine with explicit namespace:
self._name = name
self._namespace = namespace
self._fullname = '%s.%s' % (self._namespace, self._name)
# Validate the fullname:
if _RE_FULL_NAME.match(self._fullname) is None:
raise SchemaParseException(
'Invalid schema name %r infered from name %r and namespace %r.'
% (self._fullname, self._name, self._namespace))
def __eq__(self, other):
if not isinstance(other, Name):
return False
return (self.fullname == other.fullname)
@property
def simple_name(self):
"""Returns: the simple name part of this name."""
return self._name
@property
def namespace(self):
"""Returns: this name's namespace, possible the empty string."""
return self._namespace
@property
def fullname(self):
"""Returns: the full name (always contains a period '.')."""
return self._fullname
# ------------------------------------------------------------------------------
class Names(object):
"""Tracks Avro named schemas and default namespace during parsing."""
def __init__(self, default_namespace=None, names=None):
"""Initializes a new name tracker.
Args:
default_namespace: Optional default namespace.
names: Optional initial mapping of known named schemas.
"""
if names is None:
names = {}
self._names = names
self._default_namespace = default_namespace
@property
def names(self):
"""Returns: the mapping of known named schemas."""
return self._names
@property
def default_namespace(self):
"""Returns: the default namespace, if any, or None."""
return self._default_namespace
def NewWithDefaultNamespace(self, namespace):
"""Creates a new name tracker from this tracker, but with a new default ns.
Args:
namespace: New default namespace to use.
Returns:
New name tracker with the specified default namespace.
"""
return Names(names=self._names, default_namespace=namespace)
def GetName(self, name, namespace=None):
"""Resolves the Avro name according to this name tracker's state.
Args:
name: Name to resolve (absolute or relative).
namespace: Optional explicit namespace.
Returns:
The specified name, resolved according to this tracker.
"""
if namespace is None: namespace = self._default_namespace
return Name(name=name, namespace=namespace)
def has_name(self, name, namespace=None):
avro_name = self.GetName(name=name, namespace=namespace)
return avro_name.fullname in self._names
def get_name(self, name, namespace=None):
avro_name = self.GetName(name=name, namespace=namespace)
return self._names.get(avro_name.fullname, None)
def GetSchema(self, name, namespace=None):
"""Resolves an Avro schema by name.
Args:
name: Name (relative or absolute) of the Avro schema to look up.
namespace: Optional explicit namespace.
Returns:
The schema with the specified name, if any, or None.
"""
avro_name = self.GetName(name=name, namespace=namespace)
return self._names.get(avro_name.fullname, None)
def prune_namespace(self, properties):
"""given a properties, return properties with namespace removed if
it matches the own default namespace
"""
if self.default_namespace is None:
# I have no default -- no change
return properties
if 'namespace' not in properties:
# he has no namespace - no change
return properties
if properties['namespace'] != self.default_namespace:
# we're different - leave his stuff alone
return properties
# we each have a namespace and it's redundant. delete his.
prunable = properties.copy()
del(prunable['namespace'])
return prunable
def Register(self, schema):
"""Registers a new named schema in this tracker.
Args:
schema: Named Avro schema to register in this tracker.
"""
if schema.fullname in VALID_TYPES:
raise SchemaParseException(
'%s is a reserved type name.' % schema.fullname)
if schema.fullname in self.names:
raise SchemaParseException(
'Avro name %r already exists.' % schema.fullname)
logging.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname)
self._names[schema.fullname] = schema
# ------------------------------------------------------------------------------
class NamedSchema(Schema):
"""Abstract base class for named schemas.
Named schemas are enumerated in NAMED_TYPES.
"""
def __init__(
self,
type,
name,
namespace=None,
names=None,
other_props=None,
):
"""Initializes a new named schema object.
Args:
type: Type of the named schema.
name: Name (absolute or relative) of the schema.
namespace: Optional explicit namespace if name is relative.
names: Tracker to resolve and register Avro names.
other_props: Optional map of additional properties of the schema.
"""
assert (type in NAMED_TYPES), ('Invalid named type: %r' % type)
self._avro_name = names.GetName(name=name, namespace=namespace)
super(NamedSchema, self).__init__(type, other_props)
names.Register(self)
self._props['name'] = self.name
if self.namespace:
self._props['namespace'] = self.namespace
@property
def avro_name(self):
"""Returns: the Name object describing this schema's name."""
return self._avro_name
@property
def name(self):
return self._avro_name.simple_name
@property
def namespace(self):
return self._avro_name.namespace
@property
def fullname(self):
return self._avro_name.fullname
def name_ref(self, names):
"""Reports this schema name relative to the specified name tracker.
Args:
names: Avro name tracker to relativise this schema name against.
Returns:
This schema name, relativised against the specified name tracker.
"""
if self.namespace == names.default_namespace:
return self.name
else:
return self.fullname
# ------------------------------------------------------------------------------
_NO_DEFAULT = object()
class Field(object):
"""Representation of the schema of a field in a record."""
def __init__(
self,
type,
name,
index,
has_default,
default=_NO_DEFAULT,
order=None,
names=None,
doc=None,
other_props=None
):
"""Initializes a new Field object.
Args:
type: Avro schema of the field.
name: Name of the field.
index: 0-based position of the field.
has_default:
default:
order:
names:
doc:
other_props:
"""
if (not isinstance(name, str)) or (len(name) == 0):
raise SchemaParseException('Invalid record field name: %r.' % name)
if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS):
raise SchemaParseException('Invalid record field order: %r.' % order)
# All properties of this record field:
self._props = {}
self._has_default = has_default
if other_props:
self._props.update(other_props)
self._index = index
self._type = self._props['type'] = type
self._name = self._props['name'] = name
# TODO: check to ensure default is valid
if has_default:
self._props['default'] = default
if order is not None:
self._props['order'] = order
if doc is not None:
self._props['doc'] = doc
@property
def type(self):
"""Returns: the schema of this field."""
return self._type
@property
def name(self):
"""Returns: this field name."""
return self._name
@property
def index(self):
"""Returns: the 0-based index of this field in the record."""
return self._index
@property
def default(self):
return self._props['default']
@property
def has_default(self):
return self._has_default
@property
def order(self):
return self._props.get('order', None)
@property
def doc(self):
return self._props.get('doc', None)
@property
def props(self):
return self._props
@property
def other_props(self):
return FilterKeysOut(items=self._props, keys=FIELD_RESERVED_PROPS)
def __str__(self):
return json.dumps(self.to_json())
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = self.props.copy()
to_dump['type'] = self.type.to_json(names)
return to_dump
def __eq__(self, that):
to_cmp = json.loads(str(self))
return to_cmp == json.loads(str(that))
# ------------------------------------------------------------------------------
# Primitive Types
class PrimitiveSchema(Schema):
"""Schema of a primitive Avro type.
Valid primitive types are defined in PRIMITIVE_TYPES.
"""
def __init__(self, type, other_props=None):
"""Initializes a new schema object for the specified primitive type.
Args:
type: Type of the schema to construct. Must be primitive.
"""
if type not in PRIMITIVE_TYPES:
raise AvroException('%r is not a valid primitive type.' % type)
super(PrimitiveSchema, self).__init__(type, other_props=other_props)
@property
def name(self):
"""Returns: the simple name of this schema."""
# The name of a primitive type is the type itself.
return self.type
def to_json(self, names=None):
if len(self.props) == 1:
return self.fullname
else:
return self.props
def __eq__(self, that):
return self.props == that.props
# ------------------------------------------------------------------------------
# Complex Types (non-recursive)
class FixedSchema(NamedSchema):
def __init__(
self,
name,
namespace,
size,
names=None,
other_props=None,
):
# Ensure valid ctor args
if not isinstance(size, int):
fail_msg = 'Fixed Schema requires a valid integer for size property.'
raise AvroException(fail_msg)
super(FixedSchema, self).__init__(
type=FIXED,
name=name,
namespace=namespace,
names=names,
other_props=other_props,
)
self._props['size'] = size
@property
def size(self):
"""Returns: the size of this fixed schema, in bytes."""
return self._props['size']
def to_json(self, names=None):
if names is None:
names = Names()
if self.fullname in names.names:
return self.name_ref(names)
else:
names.names[self.fullname] = self
return names.prune_namespace(self.props)
def __eq__(self, that):
return self.props == that.props
# ------------------------------------------------------------------------------
class EnumSchema(NamedSchema):
def __init__(
self,
name,
namespace,
symbols,
names=None,
doc=None,
other_props=None,
):
"""Initializes a new enumeration schema object.
Args:
name: Simple name of this enumeration.
namespace: Optional namespace.
symbols: Ordered list of symbols defined in this enumeration.
names:
doc:
other_props:
"""
symbols = tuple(symbols)
symbol_set = frozenset(symbols)
if (len(symbol_set) != len(symbols)
or not all(map(lambda symbol: isinstance(symbol, str), symbols))):
raise AvroException(
'Invalid symbols for enum schema: %r.' % (symbols,))
super(EnumSchema, self).__init__(
type=ENUM,
name=name,
namespace=namespace,
names=names,
other_props=other_props,
)
self._props['symbols'] = symbols
if doc is not None:
self._props['doc'] = doc
@property
def symbols(self):
"""Returns: the symbols defined in this enum."""
return self._props['symbols']
def to_json(self, names=None):
if names is None:
names = Names()
if self.fullname in names.names:
return self.name_ref(names)
else:
names.names[self.fullname] = self
return names.prune_namespace(self.props)
def __eq__(self, that):
return self.props == that.props
# ------------------------------------------------------------------------------
# Complex Types (recursive)
class ArraySchema(Schema):
"""Schema of an array."""
def __init__(self, items, other_props=None):
"""Initializes a new array schema object.
Args:
items: Avro schema of the array items.
other_props:
"""
super(ArraySchema, self).__init__(
type=ARRAY,
other_props=other_props,
)
self._items_schema = items
self._props['items'] = items
@property
def items(self):
"""Returns: the schema of the items in this array."""
return self._items_schema
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = self.props.copy()
item_schema = self.items
to_dump['items'] = item_schema.to_json(names)
return to_dump
def __eq__(self, that):
to_cmp = json.loads(str(self))
return to_cmp == json.loads(str(that))
# ------------------------------------------------------------------------------
class MapSchema(Schema):
"""Schema of a map."""
def __init__(self, values, other_props=None):
"""Initializes a new map schema object.
Args:
values: Avro schema of the map values.
other_props:
"""
super(MapSchema, self).__init__(
type=MAP,
other_props=other_props,
)
self._values_schema = values
self._props['values'] = values
@property
def values(self):
"""Returns: the schema of the values in this map."""
return self._values_schema
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = self.props.copy()
to_dump['values'] = self.values.to_json(names)
return to_dump
def __eq__(self, that):
to_cmp = json.loads(str(self))
return to_cmp == json.loads(str(that))
# ------------------------------------------------------------------------------
class UnionSchema(Schema):
"""Schema of a union."""
def __init__(self, schemas):
"""Initializes a new union schema object.
Args:
schemas: Ordered collection of schema branches in the union.
"""
super(UnionSchema, self).__init__(type=UNION)
self._schemas = tuple(schemas)
# Validate the schema branches:
# All named schema names are unique:
named_branches = tuple(
filter(lambda schema: schema.type in NAMED_TYPES, self._schemas))
unique_names = frozenset(map(lambda schema: schema.fullname, named_branches))
if len(unique_names) != len(named_branches):
raise AvroException(
'Invalid union branches with duplicate schema name:%s'
% ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas)))
# Types are unique within unnamed schemas, and union is not allowed:
unnamed_branches = tuple(
filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas))
unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches))
if UNION in unique_types:
raise AvroException(
'Invalid union branches contain other unions:%s'
% ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas)))
if len(unique_types) != len(unnamed_branches):
raise AvroException(
'Invalid union branches with duplicate type:%s'
% ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas)))
@property
def schemas(self):
"""Returns: the ordered list of schema branches in the union."""
return self._schemas
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = []
for schema in self.schemas:
to_dump.append(schema.to_json(names))
return to_dump
def __eq__(self, that):
to_cmp = json.loads(str(self))
return to_cmp == json.loads(str(that))
# ------------------------------------------------------------------------------
class ErrorUnionSchema(UnionSchema):
"""Schema representing the declared errors of a protocol message."""
def __init__(self, schemas):
"""Initializes an error-union schema.
Args:
schema: collection of error schema.
"""
# TODO: check that string isn't already listed explicitly as an error.
# Prepend "string" to handle system errors
schemas = [PrimitiveSchema(type=STRING)] + list(schemas)
super(ErrorUnionSchema, self).__init__(schemas=schemas)
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = []
for schema in self.schemas:
# Don't print the system error schema
if schema.type == STRING: continue
to_dump.append(schema.to_json(names))
return to_dump
# ------------------------------------------------------------------------------
class RecordSchema(NamedSchema):
"""Schema of a record."""
@staticmethod
def _MakeField(index, field_desc, names):
"""Builds field schemas from a list of field JSON descriptors.
Args:
index: 0-based index of the field in the record.
field_desc: JSON descriptors of a record field.
names: Avro schema tracker.
Return:
The field schema.
"""
field_schema = SchemaFromJSONData(
json_data=field_desc['type'],
names=names,
)
other_props = (
dict(FilterKeysOut(items=field_desc, keys=FIELD_RESERVED_PROPS)))
return Field(
type=field_schema,
name=field_desc['name'],
index=index,
has_default=('default' in field_desc),
default=field_desc.get('default', _NO_DEFAULT),
order=field_desc.get('order', None),
names=names,
doc=field_desc.get('doc', None),
other_props=other_props,
)
@staticmethod
def _MakeFieldList(field_desc_list, names):
"""Builds field schemas from a list of field JSON descriptors.
Guarantees field name unicity.
Args:
field_desc_list: collection of field JSON descriptors.
names: Avro schema tracker.
Yields
Field schemas.
"""
for index, field_desc in enumerate(field_desc_list):
yield RecordSchema._MakeField(index, field_desc, names)
@staticmethod
def _MakeFieldMap(fields):
"""Builds the field map.
Guarantees field name unicity.
Args:
fields: iterable of field schema.
Returns:
A read-only map of field schemas, indexed by name.
"""
field_map = {}
for field in fields:
if field.name in field_map:
raise SchemaParseException(
'Duplicate field name %r in list %r.' % (field.name, field_desc_list))
field_map[field.name] = field
return ImmutableDict(field_map)
def __init__(
self,
name,
namespace,
fields=None,
make_fields=None,
names=None,
record_type=RECORD,
doc=None,
other_props=None
):
"""Initializes a new record schema object.
Args:
name: Name of the record (absolute or relative).
namespace: Optional namespace the record belongs to, if name is relative.
fields: collection of fields to add to this record.
Exactly one of fields or make_fields must be specified.
make_fields: function creating the fields that belong to the record.
The function signature is: make_fields(names) -> ordered field list.
Exactly one of fields or make_fields must be specified.
names:
record_type: Type of the record: one of RECORD, ERROR or REQUEST.
Protocol requests are not named.
doc:
other_props:
"""
if record_type == REQUEST:
# Protocol requests are not named:
super(NamedSchema, self).__init__(
type=REQUEST,
other_props=other_props,
)
elif record_type in [RECORD, ERROR]:
# Register this record name in the tracker:
super(RecordSchema, self).__init__(
type=record_type,
name=name,
namespace=namespace,
names=names,
other_props=other_props,
)
else:
raise SchemaParseException(
'Invalid record type: %r.' % record_type)
if record_type in [RECORD, ERROR]:
avro_name = names.GetName(name=name, namespace=namespace)
nested_names = names.NewWithDefaultNamespace(namespace=avro_name.namespace)
elif record_type == REQUEST:
# Protocol request has no name: no need to change default namespace:
nested_names = names
if fields is None:
fields = make_fields(names=nested_names)
else:
assert (make_fields is None)
self._fields = tuple(fields)
self._field_map = RecordSchema._MakeFieldMap(self._fields)
self._props['fields'] = fields
if doc is not None:
self._props['doc'] = doc
@property
def fields(self):
"""Returns: the field schemas, as an ordered tuple."""
return self._fields
@property
def field_map(self):
"""Returns: a read-only map of the field schemas index by field names."""
return self._field_map
def to_json(self, names=None):
if names is None:
names = Names()
# Request records don't have names
if self.type == REQUEST:
return [f.to_json(names) for f in self.fields]
if self.fullname in names.names:
return self.name_ref(names)
else:
names.names[self.fullname] = self
to_dump = names.prune_namespace(self.props.copy())
to_dump['fields'] = [f.to_json(names) for f in self.fields]
return to_dump
def __eq__(self, that):
to_cmp = json.loads(str(self))
return to_cmp == json.loads(str(that))
# ------------------------------------------------------------------------------
# Module functions
def FilterKeysOut(items, keys):
"""Filters a collection of (key, value) items.
Exclude any item whose key belongs to keys.
Args:
items: Dictionary of items to filter the keys out of.
keys: Keys to filter out.
Yields:
Filtered items.
"""
for key, value in items.items():
if key in keys: continue
yield (key, value)
# ------------------------------------------------------------------------------
def _SchemaFromJSONString(json_string, names):
if json_string in PRIMITIVE_TYPES:
return PrimitiveSchema(type=json_string)
else:
# Look for a known named schema:
schema = names.GetSchema(name=json_string)
if schema is None:
raise SchemaParseException(
'Unknown named schema %r, known names: %r.'
% (json_string, sorted(names.names)))
return schema
def _SchemaFromJSONArray(json_array, names):
def MakeSchema(desc):
return SchemaFromJSONData(json_data=desc, names=names)
return UnionSchema(map(MakeSchema, json_array))
def _SchemaFromJSONObject(json_object, names):
type = json_object.get('type')
if type is None:
raise SchemaParseException(
'Avro schema JSON descriptor has no "type" property: %r' % json_object)
other_props = dict(
FilterKeysOut(items=json_object, keys=SCHEMA_RESERVED_PROPS))
if type in PRIMITIVE_TYPES:
# FIXME should not ignore other properties
return PrimitiveSchema(type, other_props=other_props)
elif type in NAMED_TYPES:
name = json_object.get('name')
namespace = json_object.get('namespace', names.default_namespace)
if type == FIXED:
size = json_object.get('size')
return FixedSchema(name, namespace, size, names, other_props)
elif type == ENUM:
symbols = json_object.get('symbols')
doc = json_object.get('doc')
return EnumSchema(name, namespace, symbols, names, doc, other_props)
elif type in [RECORD, ERROR]:
field_desc_list = json_object.get('fields', ())
def MakeFields(names):
return tuple(RecordSchema._MakeFieldList(field_desc_list, names))
return RecordSchema(
name=name,
namespace=namespace,
make_fields=MakeFields,
names=names,
record_type=type,
doc=json_object.get('doc'),
other_props=other_props,
)
else:
raise Exception('Internal error: unknown type %r.' % type)
elif type in VALID_TYPES:
# Unnamed, non-primitive Avro type:
if type == ARRAY:
items_desc = json_object.get('items')
if items_desc is None:
raise SchemaParseException(
'Invalid array schema descriptor with no "items" : %r.'
% json_object)
return ArraySchema(
items=SchemaFromJSONData(items_desc, names),
other_props=other_props,
)
elif type == MAP:
values_desc = json_object.get('values')
if values_desc is None:
raise SchemaParseException(
'Invalid map schema descriptor with no "values" : %r.'
% json_object)
return MapSchema(
values=SchemaFromJSONData(values_desc, names=names),
other_props=other_props,
)
elif type == ERROR_UNION:
error_desc_list = json_object.get('declared_errors')
assert (error_desc_list is not None)
error_schemas = map(
lambda desc: SchemaFromJSONData(desc, names=names),
error_desc_list)
return ErrorUnionSchema(schemas=error_schemas)
else:
raise Exception('Internal error: unknown type %r.' % type)
raise SchemaParseException(
'Invalid JSON descriptor for an Avro schema: %r' % json_object)
# Parsers for the JSON data types:
_JSONDataParserTypeMap = {
str: _SchemaFromJSONString,
list: _SchemaFromJSONArray,
dict: _SchemaFromJSONObject,
}
def SchemaFromJSONData(json_data, names=None):
"""Builds an Avro Schema from its JSON descriptor.
Args:
json_data: JSON data representing the descriptor of the Avro schema.
names: Optional tracker for Avro named schemas.
Returns:
The Avro schema parsed from the JSON descriptor.
Raises:
SchemaParseException: if the descriptor is invalid.
"""
if names is None:
names = Names()
# Select the appropriate parser based on the JSON data type:
parser = _JSONDataParserTypeMap.get(type(json_data))
if parser is None:
raise SchemaParseException(
'Invalid JSON descriptor for an Avro schema: %r.' % json_data)
return parser(json_data, names=names)
# ------------------------------------------------------------------------------
def Parse(json_string):
"""Constructs a Schema from its JSON descriptor in text form.
Args:
json_string: String representation of the JSON descriptor of the schema.
Returns:
The parsed schema.
Raises:
SchemaParseException: on JSON parsing error,
or if the JSON descriptor is invalid.
"""
try:
json_data = json.loads(json_string)
except Exception as exn:
raise SchemaParseException(
'Error parsing schema from JSON: %r. '
'Error message: %r.'
% (json_string, exn))
# Initialize the names object
names = Names()
# construct the Avro Schema object
return SchemaFromJSONData(json_data, names)
| apache-2.0 |
Workday/OpenFrame | tools/telemetry/third_party/gsutilz/third_party/protorpc/demos/appstats/protorpc_appstats/__init__.py | 20 | 4951 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cStringIO
import logging
import os
from protorpc import descriptor
from protorpc import messages
from protorpc import protobuf
from protorpc import remote
from protorpc import stub
from google.appengine.api import memcache
from google.appengine.ext.appstats import recording
# Import contents of appstats.descriptor in to this module from binary appstats
# protobuf descriptor. Definitions are imported into module apphosting.
stub.import_file_set(os.path.join(os.path.dirname(__file__),
'appstats.descriptor'))
import apphosting
class Summary(messages.Message):
"""Response for AppStatsService.get_summary.
Fields:
stats: List of RequestStatProto objects summarizing application activity.
"""
stats = messages.MessageField(apphosting.RequestStatProto, 1, repeated=True)
class GetDetailsRequest(messages.Message):
"""Request for AppStatsService.get_details.
Fields:
timestamp: Timestamp of appstats detail to retrieve.
"""
timestamp = messages.IntegerField(1, required=True)
class Details(messages.Message):
"""Response for AppStatsService.get_details.
Fields:
stat: Individual stat details if found, else None.
"""
stat = messages.MessageField(apphosting.RequestStatProto, 1)
# TODO(rafek): Remove this function when recording.load_summary_protos is
# refactored in the App Engine SDK.
def load_summary_protos():
"""Load all valid summary records from memcache.
Returns:
A list of RequestStatProto instances, in reverse chronological order
(i.e. most recent first).
NOTE: This is limited to returning at most config.KEY_MODULUS records,
since there are only that many distinct keys. See also make_key().
"""
tmpl = '%s%s%s' % (recording.config.KEY_PREFIX,
recording.config.KEY_TEMPLATE,
recording.config.PART_SUFFIX)
keys = [tmpl % i
for i in
range(0, recording.config.KEY_DISTANCE * recording.config.KEY_MODULUS,
recording.config.KEY_DISTANCE)]
results = memcache.get_multi(keys, namespace=recording.config.KEY_NAMESPACE)
records = []
for rec in results.itervalues():
try:
pb = protobuf.decode_message(apphosting.RequestStatProto, rec)
except Exception, err:
logging.warn('Bad record: %s', err)
else:
records.append(pb)
logging.info('Loaded %d raw records, %d valid', len(results), len(records))
# Sorts by time, newest first.
records.sort(key=lambda pb: -pb.start_timestamp_milliseconds)
return records
# TODO(rafek): Remove this function when recording.load_full_protos is
# refactored in the App Engine SDK.
def load_full_proto(timestamp):
"""Load the full record for a given timestamp.
Args:
timestamp: The start_timestamp of the record, as a float in seconds
(see make_key() for details).
Returns:
A RequestStatProto instance if the record exists and can be loaded;
None otherwise.
"""
full_key = recording.make_key(timestamp) + recording.config.FULL_SUFFIX
full_binary = memcache.get(full_key, namespace=recording.config.KEY_NAMESPACE)
if full_binary is None:
logging.info('No full record at %s', full_key)
return None
try:
full = protobuf.decode_message(apphosting.RequestStatProto, full_binary)
except Exception, err:
logging.warn('Bad full record at %s: %s', full_key, err)
return None
if full.start_timestamp_milliseconds != int(timestamp * 1000):
logging.warn('Hash collision, record at %d has timestamp %d',
int(timestamp * 1000), full.start_timestamp_milliseconds)
return None # Hash collision -- the requested record no longer exists.
return full
class AppStatsService(remote.Service):
"""Service for getting access to AppStats data."""
@remote.method(response_type=Summary)
def get_summary(self, request):
"""Get appstats summary."""
response = Summary()
response.stats = load_summary_protos()
return response
@remote.method(GetDetailsRequest, Details)
def get_details(self, request):
"""Get appstats details for a particular timestamp."""
response = Details()
recording_timestamp = request.timestamp * 0.001
logging.error('Fetching recording from %f', recording_timestamp)
response.stat = load_full_proto(recording_timestamp)
return response
| bsd-3-clause |
slz/delidded-kernel-n900j-note3 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
debian-live/live-magic | tests/test_all.py | 1 | 1304 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# live-magic - GUI frontend to create Debian LiveCDs, etc.
# Copyright (C) 2007-2010 Chris Lamb <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import unittest
def suite():
suite = unittest.TestSuite()
for _, _, files in os.walk('.'):
for name in filter(is_test, files):
tests = unittest.defaultTestLoader.loadTestsFromName(name[:-3])
suite.addTests(tests)
return suite
def is_test(filename):
return filename.startswith('test_') and filename.endswith('.py')
if __name__ == "__main__":
sys.path.insert(0, '.')
unittest.main(defaultTest="suite")
| gpl-3.0 |
VitalPet/c2c-rd-addons | stock_invoice_service/__openerp__.py | 4 | 1615 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{ 'sequence': 500,
"name" : "Stock Invoice Service",
"version" : "1.1",
"author" : "ChriCar Beteiligungs- und Beratungs- GmbH",
"category": 'Sales Management',
'complexity': "normal",
"description": """
Invoices Services ordered together with products which are invoiced based on pickings
Sets invoice policy to manual for SO with only services and picking for all others
this should be made configurable
""",
'website': 'http://www.camptocamp.com',
"depends" : ["sale"],
'init_xml': [],
'data': [],
'demo_xml': [],
'installable': False,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
benzmuircroft/REWIRE.io | test/functional/p2p_zpos_fakestake_accepted.py | 1 | 4397 | #!/usr/bin/env python3
# Copyright (c) 2019 The PIVX Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Performs the same check as in Test_02 verifying that zPoS forked blocks that stake a zerocoin which is spent on mainchain on an higher block are still accepted.
'''
from test_framework.authproxy import JSONRPCException
from fake_stake.base_test import PIVX_FakeStakeTest
from time import sleep
class zPoSFakeStakeAccepted(PIVX_FakeStakeTest):
def set_test_params(self):
''' Setup test environment
:param:
:return:
'''
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-staking=1', '-debug=net', '-zpivstake']] * self.num_nodes
def run_test(self):
self.description = "Performs the same check as in Test_02 verifying that zPoS forked blocks that stake a zerocoin which is spent on mainchain on an higher block are still accepted."
self.init_test()
DENOM_TO_USE = 1000 # zc denomination
INITAL_MINED_BLOCKS = 321
MORE_MINED_BLOCKS = 301
FORK_DEPTH = 75
self.NUM_BLOCKS = 2
# 1) Starting mining blocks
self.log.info("Mining %d blocks to get to zPOS activation...." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
sleep(2)
# 2) Collect the possible prevouts and mint zerocoins with those
self.log.info("Collecting all unspent coins which we generated from mining...")
balance = self.node.getbalance("*", 100)
self.log.info("Minting zerocoins...")
initial_mints = 0
while balance > DENOM_TO_USE:
try:
self.node.mintzerocoin(DENOM_TO_USE)
except JSONRPCException:
break
sleep(1)
initial_mints += 1
self.node.generate(1)
sleep(1)
if initial_mints % 5 == 0:
self.log.info("Minted %d coins" % initial_mints)
if initial_mints >= 20:
break
balance = self.node.getbalance("*", 100)
self.log.info("Minted %d coins in the %d-denom, remaining balance %d", initial_mints, DENOM_TO_USE, balance)
sleep(2)
# 3) mine more blocks
self.log.info("Mining %d more blocks ... and getting spendable zerocoins" % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
mints = self.node.listmintedzerocoins(True, True)
sleep(1)
mints_hashes = [x["serial hash"] for x in mints]
# This mints are not ready spendable, only few of them.
self.log.info("Got %d confirmed mints" % len(mints_hashes))
# 4) Start mining again so that spends get confirmed in a block.
self.log.info("Mining 200 more blocks...")
self.node.generate(200)
sleep(2)
# 5) spend mints
self.log.info("Spending mints in block %d..." % self.node.getblockcount())
spends = 0
for mint in mints_hashes:
# create a single element list to pass to RPC spendzerocoinmints
mint_arg = []
mint_arg.append(mint)
try:
self.node.spendzerocoinmints(mint_arg)
sleep(1)
spends += 1
except JSONRPCException as e:
self.log.warning(str(e))
continue
sleep(1)
self.log.info("Successfully spent %d mints" % spends)
self.log.info("Mining 6 more blocks...")
self.node.generate(6)
sleep(2)
# 6) Collect some prevouts for random txes
self.log.info("Collecting inputs for txes...")
utxo_list = self.node.listunspent()
sleep(1)
# 7) Create valid forked zPoS blocks and send them
self.log.info("Creating stake zPoS blocks...")
err_msgs = self.test_spam("Fork", mints, spending_utxo_list=utxo_list, fZPoS=True, fRandomHeight=True, randomRange=FORK_DEPTH, randomRange2=50, fMustPass=True)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
zPoSFakeStakeAccepted().main()
| mit |
tku137/JPKay | docs/source/conf.py | 1 | 10191 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# JPKay documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 10 21:16:13 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import mock
MOCK_MODULES = ['numpy', 'pytz', 'pandas', 'dateutil', 'dateutil.parser']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath(r'..\..'))
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'JPKay'
copyright = '2016, Tony Fischer, Jeremy Perez'
author = 'Tony Fischer, Jeremy Perez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'JPKaydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'JPKay.tex', 'JPKay Documentation',
'Tony Fischer, Jeremy Perez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'jpkay', 'JPKay Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'JPKay', 'JPKay Documentation',
author, 'JPKay', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'https://docs.python.org/': None}
intersphinx_mapping = {'python' : ('http://docs.python.org/3', None),
'numpy' : ('http://docs.scipy.org/doc/numpy/', None),
'scipy' : ('http://docs.scipy.org/doc/scipy/reference/', None),
'pandas' : ('http://pandas-docs.github.io/pandas-docs-travis/', None),
'matplotlib': ('http://matplotlib.sourceforge.net/', None)}
| mit |
opengeogroep/inasafe | safe/engine/impact_functions_for_testing/categorised_hazard_building_impact.py | 3 | 6853 | from safe.impact_functions.core import (FunctionProvider,
get_hazard_layer,
get_exposure_layer,
get_question)
from safe.common.utilities import (ugettext as tr,
format_int)
from safe.storage.vector import Vector
from safe.common.tables import Table, TableRow
from safe.engine.interpolation import assign_hazard_values_to_exposure_data
from safe.common.utilities import OrderedDict
#FIXME: need to normalise all raster data Ole/Kristy
class CategorisedHazardBuildingImpactFunction(FunctionProvider):
"""Impact plugin for categorising hazard impact on building data
:author AIFDR
:rating 2
:param requires category=='hazard' and \
unit=='normalised' and \
layertype=='raster'
:param requires category=='exposure' and \
subcategory=='structure' and \
layertype=='vector'
"""
target_field = 'ICLASS'
# Function documentation
title = tr('Be affected')
synopsis = tr('To assess the impacts of categorized hazard in raster '
'format on structure/building raster layer.')
actions = tr('Provide details about how many building would likely need '
'to be affected for each category.')
hazard_input = tr('A hazard raster layer where each cell represents '
'the category of the hazard. There should be 3 '
'categories: 1, 2, and 3.')
exposure_input = \
tr('Vector polygon layer which can be extracted from OSM '
'where each polygon represents the footprint of a building.')
output = tr('Map of structure exposed to high category and a table with '
'number of structure in each category')
detailed_description = \
tr('This function will calculate how many buildings will be affected '
'per each category for all categories in the hazard layer. '
'Currently there should be 3 categories in the hazard layer. After '
'that it will show the result and the total of buildings that '
'will be affected for the hazard given.')
limitation = tr('The number of categories is three.')
statistics_type = 'class_count'
statistics_classes = ['None', 1, 2, 3]
parameters = OrderedDict([('postprocessors', OrderedDict([
('AggregationCategorical', {'on': True})]))
])
def run(self, layers):
"""Impact plugin for hazard impact
"""
# Extract data
H = get_hazard_layer(layers) # Value
E = get_exposure_layer(layers) # Building locations
question = get_question(H.get_name(),
E.get_name(),
self)
# Interpolate hazard level to building locations
H = assign_hazard_values_to_exposure_data(H, E,
attribute_name='hazard_lev',
mode='constant')
# Extract relevant numerical data
coordinates = H.get_geometry()
category = H.get_data()
N = len(category)
# List attributes to carry forward to result layer
#attributes = E.get_attribute_names()
# Calculate building impact according to guidelines
count2 = 0
count1 = 0
count0 = 0
building_impact = []
for i in range(N):
# Get category value
val = float(category[i]['hazard_lev'])
# Classify buildings according to value
## if val >= 2.0 / 3:
## affected = 2
## count2 += 1
## elif 1.0 / 3 <= val < 2.0 / 3:
## affected = 1
## count1 += 1
## else:
## affected = 0
## count0 += 1
## FIXME it would be good if the affected were words not numbers
## FIXME need to read hazard layer and see category or keyword
if val == 3:
affected = 3
count2 += 1
elif val == 2:
affected = 2
count1 += 1
elif val == 1:
affected = 1
count0 += 1
else:
affected = 'None'
# Collect depth and calculated damage
result_dict = {self.target_field: affected,
'CATEGORY': val}
# Record result for this feature
building_impact.append(result_dict)
# Create impact report
# Generate impact summary
table_body = [question,
TableRow([tr('Category'), tr('Affected')],
header=True),
TableRow([tr('High'), format_int(count2)]),
TableRow([tr('Medium'), format_int(count1)]),
TableRow([tr('Low'), format_int(count0)]),
TableRow([tr('All'), format_int(N)])]
table_body.append(TableRow(tr('Notes'), header=True))
table_body.append(tr('Categorised hazard has only 3'
' classes, high, medium and low.'))
impact_summary = Table(table_body).toNewlineFreeString()
impact_table = impact_summary
map_title = tr('Categorised hazard impact on buildings')
#FIXME it would be great to do categorized rather than grduated
# Create style
style_classes = [dict(label=tr('Low'), min=1, max=1,
colour='#1EFC7C', transparency=0, size=1),
dict(label=tr('Medium'), min=2, max=2,
colour='#FFA500', transparency=0, size=1),
dict(label=tr('High'), min=3, max=3,
colour='#F31A1C', transparency=0, size=1)]
style_info = dict(target_field=self.target_field,
style_classes=style_classes)
# Create vector layer and return
name = 'Buildings Affected'
V = Vector(data=building_impact,
projection=E.get_projection(),
geometry=coordinates,
geometry_type=E.geometry_type,
keywords={'impact_summary': impact_summary,
'impact_table': impact_table,
'map_title': map_title,
'target_field': self.target_field,
'statistics_type': self.statistics_type,
'statistics_classes': self.statistics_classes},
name=name,
style_info=style_info)
return V
| gpl-3.0 |
BT-fgarbely/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/Fields.py | 384 | 12340 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.functions import *
from lib.error import ErrorDialog
from LoginTest import *
from lib.logreport import *
from lib.rpc import *
database="report"
uid = 3
class Fields(unohelper.Base, XJobExecutor ):
def __init__(self, sVariable="", sFields="", sDisplayName="", bFromModify=False):
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.logobj=Logger()
self.win = DBModalDialog(60, 50, 200, 225, "Field Builder")
self.win.addFixedText("lblVariable", 27, 12, 60, 15, "Variable :")
self.win.addComboBox("cmbVariable", 180-120-2, 10, 130, 15,True, itemListenerProc=self.cmbVariable_selected)
self.insVariable = self.win.getControl( "cmbVariable" )
self.win.addFixedText("lblFields", 10, 32, 60, 15, "Variable Fields :")
self.win.addComboListBox("lstFields", 180-120-2, 30, 130, 150, False,True,itemListenerProc=self.lstbox_selected)
self.insField = self.win.getControl( "lstFields" )
self.win.addFixedText("lblUName", 8, 187, 60, 15, "Displayed name :")
self.win.addEdit("txtUName", 180-120-2, 185, 130, 15,)
self.win.addButton('btnOK',-5 ,-5,45,15,'Ok' ,actionListenerProc = self.btnOk_clicked )
self.win.addButton('btnCancel',-5 - 45 - 5 ,-5,45,15,'Cancel' ,actionListenerProc = self.btnCancel_clicked )
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
self.sValue=None
self.sObj=None
self.aSectionList=[]
self.sGDisplayName=sDisplayName
self.aItemList=[]
self.aComponentAdd=[]
self.aObjectList=[]
self.aListFields=[]
self.aVariableList=[]
EnumDocument(self.aItemList,self.aComponentAdd)
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.sMyHost= ""
if not docinfo.getUserFieldValue(3) == "" and not docinfo.getUserFieldValue(0)=="":
self.sMyHost = docinfo.getUserFieldValue(0)
self.count = 0
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
self.count += 1
getList(self.aObjectList, self.sMyHost,self.count)
cursor = doc.getCurrentController().getViewCursor()
text = cursor.getText()
tcur = text.createTextCursorByRange(cursor)
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == "Objects", self.aObjectList ) )
for i in range(len(self.aItemList)):
try:
anItem = self.aItemList[i][1]
component = self.aComponentAdd[i]
if component == "Document":
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextSection:
getRecersiveSection(tcur.TextSection,self.aSectionList)
if component in self.aSectionList:
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextTable:
if not component == "Document" and component[component.rfind(".")+1:] == tcur.TextTable.Name:
VariableScope(tcur, self.aVariableList, self.aObjectList, self.aComponentAdd, self.aItemList, component)
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('Fields', LOG_ERROR, info)
self.bModify=bFromModify
if self.bModify==True:
sItem=""
for anObject in self.aObjectList:
if anObject[:anObject.find("(")] == sVariable:
sItem = anObject
self.insVariable.setText(sItem)
genTree(
sItem[sItem.find("(")+1:sItem.find(")")],
self.aListFields,
self.insField,
self.sMyHost,
2,
ending_excl=['one2many','many2one','many2many','reference'],
recur=['many2one']
)
self.sValue= self.win.getListBoxItem("lstFields",self.aListFields.index(sFields))
for var in self.aVariableList:
self.model_ids =self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=',var[var.find("(")+1:var.find(")")])])
fields=['name','model']
self.model_res = self.sock.execute(database, uid, self.password, 'ir.model', 'read', self.model_ids,fields)
if self.model_res <> []:
self.insVariable.addItem(var[:var.find("(")+1] + self.model_res[0]['name'] + ")" ,self.insVariable.getItemCount())
else:
self.insVariable.addItem(var ,self.insVariable.getItemCount())
self.win.doModalDialog("lstFields",self.sValue)
else:
ErrorDialog("Please insert user define field Field-1 or Field-4","Just go to File->Properties->User Define \nField-1 E.g. http://localhost:8069 \nOR \nField-4 E.g. account.invoice")
self.win.endExecute()
def lstbox_selected(self, oItemEvent):
try:
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
sItem= self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:var.find("(")+1]==sItem[:sItem.find("(")+1]:
sItem = var
sMain=self.aListFields[self.win.getListBoxSelectedItemPos("lstFields")]
sObject=self.getRes(self.sock,sItem[sItem.find("(")+1:-1],sMain[1:])
ids = self.sock.execute(database, uid, self.password, sObject , 'search', [])
res = self.sock.execute(database, uid, self.password, sObject , 'read',[ids[0]])
self.win.setEditText("txtUName",res[0][sMain[sMain.rfind("/")+1:]])
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('Fields', LOG_ERROR, info)
self.win.setEditText("txtUName","TTT")
if self.bModify:
self.win.setEditText("txtUName",self.sGDisplayName)
def getRes(self, sock, sObject, sVar):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
res = sock.execute(database, uid, self.password, sObject , 'fields_get')
key = res.keys()
key.sort()
myval=None
if not sVar.find("/")==-1:
myval=sVar[:sVar.find("/")]
else:
myval=sVar
if myval in key:
if (res[myval]['type'] in ['many2one']):
sObject = res[myval]['relation']
return self.getRes(sock,res[myval]['relation'], sVar[sVar.find("/")+1:])
else:
return sObject
def cmbVariable_selected(self, oItemEvent):
if self.count > 0 :
try:
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.win.removeListBoxItems("lstFields", 0, self.win.getListBoxItemCount("lstFields"))
self.aListFields=[]
tempItem = self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:var.find("(")] == tempItem[:tempItem.find("(")]:
sItem=var
genTree(
sItem[sItem.find("(")+1:sItem.find(")")],
self.aListFields,
self.insField,
self.sMyHost,
2,
ending_excl=['one2many','many2one','many2many','reference'],
recur=['many2one']
)
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('Fields', LOG_ERROR, info)
def btnOk_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
cursor = doc.getCurrentController().getViewCursor()
for i in self.win.getListBoxSelectedItemsPos("lstFields"):
itemSelected = self.aListFields[i]
itemSelectedPos = i
txtUName=self.win.getEditText("txtUName")
sKey=u""+txtUName
if itemSelected != "" and txtUName != "" and self.bModify==True :
txtUName=self.sGDisplayName
sKey=u""+txtUName
txtUName=self.sGDisplayName
oCurObj=cursor.TextField
sObjName=self.insVariable.getText()
sObjName=sObjName[:sObjName.find("(")]
sValue=u"[[ " + sObjName + self.aListFields[itemSelectedPos].replace("/",".") + " ]]"
oCurObj.Items = (sKey,sValue)
oCurObj.update()
self.win.endExecute()
elif itemSelected != "" and txtUName != "" :
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
sObjName=self.win.getComboBoxText("cmbVariable")
sObjName=sObjName[:sObjName.find("(")]
widget = ( cursor.TextTable and cursor.TextTable.getCellByName( cursor.Cell.CellName ) or doc.Text )
sValue = u"[[ " + sObjName + self.aListFields[itemSelectedPos].replace("/",".") + " ]]"
oInputList.Items = (sKey,sValue)
widget.insertTextContent(cursor,oInputList,False)
self.win.endExecute()
else:
ErrorDialog("Please fill appropriate data in Name field \nor select particular value from the list of fields.")
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
Fields()
elif __name__=="package":
g_ImplementationHelper.addImplementation( Fields, "org.openoffice.openerp.report.fields", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sprockets/sprockets.mixins.avro | setup.py | 1 | 2094 | #!/usr/bin/env python
import codecs
import setuptools
import sprockets.mixins.avro
def read_requirements_file(req_name):
requirements = []
try:
with codecs.open(req_name, encoding='utf-8') as req_file:
for req_line in req_file:
if '#' in req_line:
req_line = req_line[0:req_line.find('#')].strip()
if req_line:
requirements.append(req_line.strip())
except IOError:
pass
return requirements
install_requires = read_requirements_file('requirements.txt')
setup_requires = read_requirements_file('setup-requirements.txt')
tests_require = read_requirements_file('test-requirements.txt')
setuptools.setup(
name='sprockets.mixins.avro',
version=sprockets.mixins.avro.__version__,
description='Mixins that make working with Avro data easier.',
long_description=codecs.open('README.rst', encoding='utf-8').read(),
url='https://github.com/sprockets/sprockets.mixins.avro.git',
author='Dave Shawley',
author_email='[email protected]',
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=['sprockets', 'sprockets.mixins'],
package_data={'': ['LICENSE', 'README.md']},
include_package_data=True,
namespace_packages=['sprockets', 'sprockets.mixins'],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
test_suite='nose.collector',
zip_safe=False)
| bsd-3-clause |
BaconPancakes/valor | lib/bs4/tests/test_tree.py | 10 | 78030 | # -*- coding: utf-8 -*-
"""Tests for Beautiful Soup's tree traversal methods.
The tree traversal methods are the main advantage of using Beautiful
Soup over just using a parser.
Different parsers will build different Beautiful Soup trees given the
same markup, but all Beautiful Soup trees can be traversed with the
methods tested here.
"""
from pdb import set_trace
import copy
import pickle
import re
import warnings
from bs4 import BeautifulSoup
from bs4.builder import (
builder_registry,
HTMLParserTreeBuilder,
)
from bs4.element import (
PY3K,
CData,
Comment,
Declaration,
Doctype,
NavigableString,
SoupStrainer,
Tag,
)
from bs4.testing import (
SoupTest,
skipIf,
)
XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None)
LXML_PRESENT = (builder_registry.lookup("lxml") is not None)
class TreeTest(SoupTest):
def assertSelects(self, tags, should_match):
"""Make sure that the given tags have the correct text.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag.string for tag in tags], should_match)
def assertSelectsIDs(self, tags, should_match):
"""Make sure that the given tags have the correct IDs.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag['id'] for tag in tags], should_match)
class TestFind(TreeTest):
"""Basic tests of the find() method.
find() just calls find_all() with limit=1, so it's not tested all
that thouroughly here.
"""
def test_find_tag(self):
soup = self.soup("<a>1</a><b>2</b><a>3</a><b>4</b>")
self.assertEqual(soup.find("b").string, "2")
def test_unicode_text_find(self):
soup = self.soup('<h1>Räksmörgås</h1>')
self.assertEqual(soup.find(string='Räksmörgås'), 'Räksmörgås')
def test_unicode_attribute_find(self):
soup = self.soup('<h1 id="Räksmörgås">here it is</h1>')
str(soup)
self.assertEqual("here it is", soup.find(id='Räksmörgås').text)
def test_find_everything(self):
"""Test an optimization that finds all tags."""
soup = self.soup("<a>foo</a><b>bar</b>")
self.assertEqual(2, len(soup.find_all()))
def test_find_everything_with_name(self):
"""Test an optimization that finds all tags with a given name."""
soup = self.soup("<a>foo</a><b>bar</b><a>baz</a>")
self.assertEqual(2, len(soup.find_all('a')))
class TestFindAll(TreeTest):
"""Basic tests of the find_all() method."""
def test_find_all_text_nodes(self):
"""You can search the tree for text nodes."""
soup = self.soup("<html>Foo<b>bar</b>\xbb</html>")
# Exact match.
self.assertEqual(soup.find_all(string="bar"), ["bar"])
self.assertEqual(soup.find_all(text="bar"), ["bar"])
# Match any of a number of strings.
self.assertEqual(
soup.find_all(text=["Foo", "bar"]), ["Foo", "bar"])
# Match a regular expression.
self.assertEqual(soup.find_all(text=re.compile('.*')),
["Foo", "bar", '\xbb'])
# Match anything.
self.assertEqual(soup.find_all(text=True),
["Foo", "bar", '\xbb'])
def test_find_all_limit(self):
"""You can limit the number of items returned by find_all."""
soup = self.soup("<a>1</a><a>2</a><a>3</a><a>4</a><a>5</a>")
self.assertSelects(soup.find_all('a', limit=3), ["1", "2", "3"])
self.assertSelects(soup.find_all('a', limit=1), ["1"])
self.assertSelects(
soup.find_all('a', limit=10), ["1", "2", "3", "4", "5"])
# A limit of 0 means no limit.
self.assertSelects(
soup.find_all('a', limit=0), ["1", "2", "3", "4", "5"])
def test_calling_a_tag_is_calling_findall(self):
soup = self.soup("<a>1</a><b>2<a id='foo'>3</a></b>")
self.assertSelects(soup('a', limit=1), ["1"])
self.assertSelects(soup.b(id="foo"), ["3"])
def test_find_all_with_self_referential_data_structure_does_not_cause_infinite_recursion(self):
soup = self.soup("<a></a>")
# Create a self-referential list.
l = []
l.append(l)
# Without special code in _normalize_search_value, this would cause infinite
# recursion.
self.assertEqual([], soup.find_all(l))
def test_find_all_resultset(self):
"""All find_all calls return a ResultSet"""
soup = self.soup("<a></a>")
result = soup.find_all("a")
self.assertTrue(hasattr(result, "source"))
result = soup.find_all(True)
self.assertTrue(hasattr(result, "source"))
result = soup.find_all(text="foo")
self.assertTrue(hasattr(result, "source"))
class TestFindAllBasicNamespaces(TreeTest):
def test_find_by_namespaced_name(self):
soup = self.soup('<mathml:msqrt>4</mathml:msqrt><a svg:fill="red">')
self.assertEqual("4", soup.find("mathml:msqrt").string)
self.assertEqual("a", soup.find(attrs= { "svg:fill" : "red" }).name)
class TestFindAllByName(TreeTest):
"""Test ways of finding tags by tag name."""
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup("""<a>First tag.</a>
<b>Second tag.</b>
<c>Third <a>Nested tag.</a> tag.</c>""")
def test_find_all_by_tag_name(self):
# Find all the <a> tags.
self.assertSelects(
self.tree.find_all('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_name_and_text(self):
self.assertSelects(
self.tree.find_all('a', text='First tag.'), ['First tag.'])
self.assertSelects(
self.tree.find_all('a', text=True), ['First tag.', 'Nested tag.'])
self.assertSelects(
self.tree.find_all('a', text=re.compile("tag")),
['First tag.', 'Nested tag.'])
def test_find_all_on_non_root_element(self):
# You can call find_all on any node, not just the root.
self.assertSelects(self.tree.c.find_all('a'), ['Nested tag.'])
def test_calling_element_invokes_find_all(self):
self.assertSelects(self.tree('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_tag_strainer(self):
self.assertSelects(
self.tree.find_all(SoupStrainer('a')),
['First tag.', 'Nested tag.'])
def test_find_all_by_tag_names(self):
self.assertSelects(
self.tree.find_all(['a', 'b']),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_dict(self):
self.assertSelects(
self.tree.find_all({'a' : True, 'b' : True}),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_re(self):
self.assertSelects(
self.tree.find_all(re.compile('^[ab]$')),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_with_tags_matching_method(self):
# You can define an oracle method that determines whether
# a tag matches the search.
def id_matches_name(tag):
return tag.name == tag.get('id')
tree = self.soup("""<a id="a">Match 1.</a>
<a id="1">Does not match.</a>
<b id="b">Match 2.</a>""")
self.assertSelects(
tree.find_all(id_matches_name), ["Match 1.", "Match 2."])
def test_find_with_multi_valued_attribute(self):
soup = self.soup(
"<div class='a b'>1</div><div class='a c'>2</div><div class='a d'>3</div>"
)
r1 = soup.find('div', 'a d');
r2 = soup.find('div', re.compile(r'a d'));
r3, r4 = soup.find_all('div', ['a b', 'a d']);
self.assertEqual('3', r1.string)
self.assertEqual('3', r2.string)
self.assertEqual('1', r3.string)
self.assertEqual('3', r4.string)
class TestFindAllByAttribute(TreeTest):
def test_find_all_by_attribute_name(self):
# You can pass in keyword arguments to find_all to search by
# attribute.
tree = self.soup("""
<a id="first">Matching a.</a>
<a id="second">
Non-matching <b id="first">Matching b.</b>a.
</a>""")
self.assertSelects(tree.find_all(id='first'),
["Matching a.", "Matching b."])
def test_find_all_by_utf8_attribute_value(self):
peace = "םולש".encode("utf8")
data = '<a title="םולש"></a>'.encode("utf8")
soup = self.soup(data)
self.assertEqual([soup.a], soup.find_all(title=peace))
self.assertEqual([soup.a], soup.find_all(title=peace.decode("utf8")))
self.assertEqual([soup.a], soup.find_all(title=[peace, "something else"]))
def test_find_all_by_attribute_dict(self):
# You can pass in a dictionary as the argument 'attrs'. This
# lets you search for attributes like 'name' (a fixed argument
# to find_all) and 'class' (a reserved word in Python.)
tree = self.soup("""
<a name="name1" class="class1">Name match.</a>
<a name="name2" class="class2">Class match.</a>
<a name="name3" class="class3">Non-match.</a>
<name1>A tag called 'name1'.</name1>
""")
# This doesn't do what you want.
self.assertSelects(tree.find_all(name='name1'),
["A tag called 'name1'."])
# This does what you want.
self.assertSelects(tree.find_all(attrs={'name' : 'name1'}),
["Name match."])
self.assertSelects(tree.find_all(attrs={'class' : 'class2'}),
["Class match."])
def test_find_all_by_class(self):
tree = self.soup("""
<a class="1">Class 1.</a>
<a class="2">Class 2.</a>
<b class="1">Class 1.</b>
<c class="3 4">Class 3 and 4.</c>
""")
# Passing in the class_ keyword argument will search against
# the 'class' attribute.
self.assertSelects(tree.find_all('a', class_='1'), ['Class 1.'])
self.assertSelects(tree.find_all('c', class_='3'), ['Class 3 and 4.'])
self.assertSelects(tree.find_all('c', class_='4'), ['Class 3 and 4.'])
# Passing in a string to 'attrs' will also search the CSS class.
self.assertSelects(tree.find_all('a', '1'), ['Class 1.'])
self.assertSelects(tree.find_all(attrs='1'), ['Class 1.', 'Class 1.'])
self.assertSelects(tree.find_all('c', '3'), ['Class 3 and 4.'])
self.assertSelects(tree.find_all('c', '4'), ['Class 3 and 4.'])
def test_find_by_class_when_multiple_classes_present(self):
tree = self.soup("<gar class='foo bar'>Found it</gar>")
f = tree.find_all("gar", class_=re.compile("o"))
self.assertSelects(f, ["Found it"])
f = tree.find_all("gar", class_=re.compile("a"))
self.assertSelects(f, ["Found it"])
# If the search fails to match the individual strings "foo" and "bar",
# it will be tried against the combined string "foo bar".
f = tree.find_all("gar", class_=re.compile("o b"))
self.assertSelects(f, ["Found it"])
def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self):
soup = self.soup("<a class='bar'>Found it</a>")
self.assertSelects(soup.find_all("a", re.compile("ba")), ["Found it"])
def big_attribute_value(value):
return len(value) > 3
self.assertSelects(soup.find_all("a", big_attribute_value), [])
def small_attribute_value(value):
return len(value) <= 3
self.assertSelects(
soup.find_all("a", small_attribute_value), ["Found it"])
def test_find_all_with_string_for_attrs_finds_multiple_classes(self):
soup = self.soup('<a class="foo bar"></a><a class="foo"></a>')
a, a2 = soup.find_all("a")
self.assertEqual([a, a2], soup.find_all("a", "foo"))
self.assertEqual([a], soup.find_all("a", "bar"))
# If you specify the class as a string that contains a
# space, only that specific value will be found.
self.assertEqual([a], soup.find_all("a", class_="foo bar"))
self.assertEqual([a], soup.find_all("a", "foo bar"))
self.assertEqual([], soup.find_all("a", "bar foo"))
def test_find_all_by_attribute_soupstrainer(self):
tree = self.soup("""
<a id="first">Match.</a>
<a id="second">Non-match.</a>""")
strainer = SoupStrainer(attrs={'id' : 'first'})
self.assertSelects(tree.find_all(strainer), ['Match.'])
def test_find_all_with_missing_attribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that do not have that attribute set.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(tree.find_all('a', id=None), ["No ID present."])
def test_find_all_with_defined_attribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that have that attribute set to any value.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(
tree.find_all(id=True), ["ID present.", "ID is empty."])
def test_find_all_with_numeric_attribute(self):
# If you search for a number, it's treated as a string.
tree = self.soup("""<a id=1>Unquoted attribute.</a>
<a id="1">Quoted attribute.</a>""")
expected = ["Unquoted attribute.", "Quoted attribute."]
self.assertSelects(tree.find_all(id=1), expected)
self.assertSelects(tree.find_all(id="1"), expected)
def test_find_all_with_list_attribute_values(self):
# You can pass a list of attribute values instead of just one,
# and you'll get tags that match any of the values.
tree = self.soup("""<a id="1">1</a>
<a id="2">2</a>
<a id="3">3</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=["1", "3", "4"]),
["1", "3"])
def test_find_all_with_regular_expression_attribute_value(self):
# You can pass a regular expression as an attribute value, and
# you'll get tags whose values for that attribute match the
# regular expression.
tree = self.soup("""<a id="a">One a.</a>
<a id="aa">Two as.</a>
<a id="ab">Mixed as and bs.</a>
<a id="b">One b.</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=re.compile("^a+$")),
["One a.", "Two as."])
def test_find_by_name_and_containing_string(self):
soup = self.soup("<b>foo</b><b>bar</b><a>foo</a>")
a = soup.a
self.assertEqual([a], soup.find_all("a", text="foo"))
self.assertEqual([], soup.find_all("a", text="bar"))
self.assertEqual([], soup.find_all("a", text="bar"))
def test_find_by_name_and_containing_string_when_string_is_buried(self):
soup = self.soup("<a>foo</a><a><b><c>foo</c></b></a>")
self.assertEqual(soup.find_all("a"), soup.find_all("a", text="foo"))
def test_find_by_attribute_and_containing_string(self):
soup = self.soup('<b id="1">foo</b><a id="2">foo</a>')
a = soup.a
self.assertEqual([a], soup.find_all(id=2, text="foo"))
self.assertEqual([], soup.find_all(id=1, text="bar"))
class TestIndex(TreeTest):
"""Test Tag.index"""
def test_index(self):
tree = self.soup("""<div>
<a>Identical</a>
<b>Not identical</b>
<a>Identical</a>
<c><d>Identical with child</d></c>
<b>Also not identical</b>
<c><d>Identical with child</d></c>
</div>""")
div = tree.div
for i, element in enumerate(div.contents):
self.assertEqual(i, div.index(element))
self.assertRaises(ValueError, tree.index, 1)
class TestParentOperations(TreeTest):
"""Test navigation and searching through an element's parents."""
def setUp(self):
super(TestParentOperations, self).setUp()
self.tree = self.soup('''<ul id="empty"></ul>
<ul id="top">
<ul id="middle">
<ul id="bottom">
<b>Start here</b>
</ul>
</ul>''')
self.start = self.tree.b
def test_parent(self):
self.assertEqual(self.start.parent['id'], 'bottom')
self.assertEqual(self.start.parent.parent['id'], 'middle')
self.assertEqual(self.start.parent.parent.parent['id'], 'top')
def test_parent_of_top_tag_is_soup_object(self):
top_tag = self.tree.contents[0]
self.assertEqual(top_tag.parent, self.tree)
def test_soup_object_has_no_parent(self):
self.assertEqual(None, self.tree.parent)
def test_find_parents(self):
self.assertSelectsIDs(
self.start.find_parents('ul'), ['bottom', 'middle', 'top'])
self.assertSelectsIDs(
self.start.find_parents('ul', id="middle"), ['middle'])
def test_find_parent(self):
self.assertEqual(self.start.find_parent('ul')['id'], 'bottom')
self.assertEqual(self.start.find_parent('ul', id='top')['id'], 'top')
def test_parent_of_text_element(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.parent.name, 'b')
def test_text_element_find_parent(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.find_parent('ul')['id'], 'bottom')
def test_parent_generator(self):
parents = [parent['id'] for parent in self.start.parents
if parent is not None and 'id' in parent.attrs]
self.assertEqual(parents, ['bottom', 'middle', 'top'])
class ProximityTest(TreeTest):
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup(
'<html id="start"><head></head><body><b id="1">One</b><b id="2">Two</b><b id="3">Three</b></body></html>')
class TestNextOperations(ProximityTest):
def setUp(self):
super(TestNextOperations, self).setUp()
self.start = self.tree.b
def test_next(self):
self.assertEqual(self.start.next_element, "One")
self.assertEqual(self.start.next_element.next_element['id'], "2")
def test_next_of_last_item_is_none(self):
last = self.tree.find(text="Three")
self.assertEqual(last.next_element, None)
def test_next_of_root_is_none(self):
# The document root is outside the next/previous chain.
self.assertEqual(self.tree.next_element, None)
def test_find_all_next(self):
self.assertSelects(self.start.find_all_next('b'), ["Two", "Three"])
self.start.find_all_next(id=3)
self.assertSelects(self.start.find_all_next(id=3), ["Three"])
def test_find_next(self):
self.assertEqual(self.start.find_next('b')['id'], '2')
self.assertEqual(self.start.find_next(text="Three"), "Three")
def test_find_next_for_text_element(self):
text = self.tree.find(text="One")
self.assertEqual(text.find_next("b").string, "Two")
self.assertSelects(text.find_all_next("b"), ["Two", "Three"])
def test_next_generator(self):
start = self.tree.find(text="Two")
successors = [node for node in start.next_elements]
# There are two successors: the final <b> tag and its text contents.
tag, contents = successors
self.assertEqual(tag['id'], '3')
self.assertEqual(contents, "Three")
class TestPreviousOperations(ProximityTest):
def setUp(self):
super(TestPreviousOperations, self).setUp()
self.end = self.tree.find(text="Three")
def test_previous(self):
self.assertEqual(self.end.previous_element['id'], "3")
self.assertEqual(self.end.previous_element.previous_element, "Two")
def test_previous_of_first_item_is_none(self):
first = self.tree.find('html')
self.assertEqual(first.previous_element, None)
def test_previous_of_root_is_none(self):
# The document root is outside the next/previous chain.
# XXX This is broken!
#self.assertEqual(self.tree.previous_element, None)
pass
def test_find_all_previous(self):
# The <b> tag containing the "Three" node is the predecessor
# of the "Three" node itself, which is why "Three" shows up
# here.
self.assertSelects(
self.end.find_all_previous('b'), ["Three", "Two", "One"])
self.assertSelects(self.end.find_all_previous(id=1), ["One"])
def test_find_previous(self):
self.assertEqual(self.end.find_previous('b')['id'], '3')
self.assertEqual(self.end.find_previous(text="One"), "One")
def test_find_previous_for_text_element(self):
text = self.tree.find(text="Three")
self.assertEqual(text.find_previous("b").string, "Three")
self.assertSelects(
text.find_all_previous("b"), ["Three", "Two", "One"])
def test_previous_generator(self):
start = self.tree.find(text="One")
predecessors = [node for node in start.previous_elements]
# There are four predecessors: the <b> tag containing "One"
# the <body> tag, the <head> tag, and the <html> tag.
b, body, head, html = predecessors
self.assertEqual(b['id'], '1')
self.assertEqual(body.name, "body")
self.assertEqual(head.name, "head")
self.assertEqual(html.name, "html")
class SiblingTest(TreeTest):
def setUp(self):
super(SiblingTest, self).setUp()
markup = '''<html>
<span id="1">
<span id="1.1"></span>
</span>
<span id="2">
<span id="2.1"></span>
</span>
<span id="3">
<span id="3.1"></span>
</span>
<span id="4"></span>
</html>'''
# All that whitespace looks good but makes the tests more
# difficult. Get rid of it.
markup = re.compile("\n\s*").sub("", markup)
self.tree = self.soup(markup)
class TestNextSibling(SiblingTest):
def setUp(self):
super(TestNextSibling, self).setUp()
self.start = self.tree.find(id="1")
def test_next_sibling_of_root_is_none(self):
self.assertEqual(self.tree.next_sibling, None)
def test_next_sibling(self):
self.assertEqual(self.start.next_sibling['id'], '2')
self.assertEqual(self.start.next_sibling.next_sibling['id'], '3')
# Note the difference between next_sibling and next_element.
self.assertEqual(self.start.next_element['id'], '1.1')
def test_next_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.next_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.next_sibling, None)
last_span = self.tree.find(id="4")
self.assertEqual(last_span.next_sibling, None)
def test_find_next_sibling(self):
self.assertEqual(self.start.find_next_sibling('span')['id'], '2')
def test_next_siblings(self):
self.assertSelectsIDs(self.start.find_next_siblings("span"),
['2', '3', '4'])
self.assertSelectsIDs(self.start.find_next_siblings(id='3'), ['3'])
def test_next_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="Foo")
self.assertEqual(start.next_sibling.name, 'b')
self.assertEqual(start.next_sibling.next_sibling, 'baz')
self.assertSelects(start.find_next_siblings('b'), ['bar'])
self.assertEqual(start.find_next_sibling(text="baz"), "baz")
self.assertEqual(start.find_next_sibling(text="nonesuch"), None)
class TestPreviousSibling(SiblingTest):
def setUp(self):
super(TestPreviousSibling, self).setUp()
self.end = self.tree.find(id="4")
def test_previous_sibling_of_root_is_none(self):
self.assertEqual(self.tree.previous_sibling, None)
def test_previous_sibling(self):
self.assertEqual(self.end.previous_sibling['id'], '3')
self.assertEqual(self.end.previous_sibling.previous_sibling['id'], '2')
# Note the difference between previous_sibling and previous_element.
self.assertEqual(self.end.previous_element['id'], '3.1')
def test_previous_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.previous_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.previous_sibling, None)
first_span = self.tree.find(id="1")
self.assertEqual(first_span.previous_sibling, None)
def test_find_previous_sibling(self):
self.assertEqual(self.end.find_previous_sibling('span')['id'], '3')
def test_previous_siblings(self):
self.assertSelectsIDs(self.end.find_previous_siblings("span"),
['3', '2', '1'])
self.assertSelectsIDs(self.end.find_previous_siblings(id='1'), ['1'])
def test_previous_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="baz")
self.assertEqual(start.previous_sibling.name, 'b')
self.assertEqual(start.previous_sibling.previous_sibling, 'Foo')
self.assertSelects(start.find_previous_siblings('b'), ['bar'])
self.assertEqual(start.find_previous_sibling(text="Foo"), "Foo")
self.assertEqual(start.find_previous_sibling(text="nonesuch"), None)
class TestTagCreation(SoupTest):
"""Test the ability to create new tags."""
def test_new_tag(self):
soup = self.soup("")
new_tag = soup.new_tag("foo", bar="baz")
self.assertTrue(isinstance(new_tag, Tag))
self.assertEqual("foo", new_tag.name)
self.assertEqual(dict(bar="baz"), new_tag.attrs)
self.assertEqual(None, new_tag.parent)
def test_tag_inherits_self_closing_rules_from_builder(self):
if XML_BUILDER_PRESENT:
xml_soup = BeautifulSoup("", "lxml-xml")
xml_br = xml_soup.new_tag("br")
xml_p = xml_soup.new_tag("p")
# Both the <br> and <p> tag are empty-element, just because
# they have no contents.
self.assertEqual(b"<br/>", xml_br.encode())
self.assertEqual(b"<p/>", xml_p.encode())
html_soup = BeautifulSoup("", "html.parser")
html_br = html_soup.new_tag("br")
html_p = html_soup.new_tag("p")
# The HTML builder users HTML's rules about which tags are
# empty-element tags, and the new tags reflect these rules.
self.assertEqual(b"<br/>", html_br.encode())
self.assertEqual(b"<p></p>", html_p.encode())
def test_new_string_creates_navigablestring(self):
soup = self.soup("")
s = soup.new_string("foo")
self.assertEqual("foo", s)
self.assertTrue(isinstance(s, NavigableString))
def test_new_string_can_create_navigablestring_subclass(self):
soup = self.soup("")
s = soup.new_string("foo", Comment)
self.assertEqual("foo", s)
self.assertTrue(isinstance(s, Comment))
class TestTreeModification(SoupTest):
def test_attribute_modification(self):
soup = self.soup('<a id="1"></a>')
soup.a['id'] = 2
self.assertEqual(soup.decode(), self.document_for('<a id="2"></a>'))
del(soup.a['id'])
self.assertEqual(soup.decode(), self.document_for('<a></a>'))
soup.a['id2'] = 'foo'
self.assertEqual(soup.decode(), self.document_for('<a id2="foo"></a>'))
def test_new_tag_creation(self):
builder = builder_registry.lookup('html')()
soup = self.soup("<body></body>", builder=builder)
a = Tag(soup, builder, 'a')
ol = Tag(soup, builder, 'ol')
a['href'] = 'http://foo.com/'
soup.body.insert(0, a)
soup.body.insert(1, ol)
self.assertEqual(
soup.body.encode(),
b'<body><a href="http://foo.com/"></a><ol></ol></body>')
def test_append_to_contents_moves_tag(self):
doc = """<p id="1">Don't leave me <b>here</b>.</p>
<p id="2">Don\'t leave!</p>"""
soup = self.soup(doc)
second_para = soup.find(id='2')
bold = soup.b
# Move the <b> tag to the end of the second paragraph.
soup.find(id='2').append(soup.b)
# The <b> tag is now a child of the second paragraph.
self.assertEqual(bold.parent, second_para)
self.assertEqual(
soup.decode(), self.document_for(
'<p id="1">Don\'t leave me .</p>\n'
'<p id="2">Don\'t leave!<b>here</b></p>'))
def test_replace_with_returns_thing_that_was_replaced(self):
text = "<a></a><b><c></c></b>"
soup = self.soup(text)
a = soup.a
new_a = a.replace_with(soup.c)
self.assertEqual(a, new_a)
def test_unwrap_returns_thing_that_was_replaced(self):
text = "<a><b></b><c></c></a>"
soup = self.soup(text)
a = soup.a
new_a = a.unwrap()
self.assertEqual(a, new_a)
def test_replace_with_and_unwrap_give_useful_exception_when_tag_has_no_parent(self):
soup = self.soup("<a><b>Foo</b></a><c>Bar</c>")
a = soup.a
a.extract()
self.assertEqual(None, a.parent)
self.assertRaises(ValueError, a.unwrap)
self.assertRaises(ValueError, a.replace_with, soup.c)
def test_replace_tag_with_itself(self):
text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>"
soup = self.soup(text)
c = soup.c
soup.c.replace_with(c)
self.assertEqual(soup.decode(), self.document_for(text))
def test_replace_tag_with_its_parent_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.b.replace_with, soup.a)
def test_insert_tag_into_itself_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.a.insert, 0, soup.a)
def test_replace_with_maintains_next_element_throughout(self):
soup = self.soup('<p><a>one</a><b>three</b></p>')
a = soup.a
b = a.contents[0]
# Make it so the <a> tag has two text children.
a.insert(1, "two")
# Now replace each one with the empty string.
left, right = a.contents
left.replaceWith('')
right.replaceWith('')
# The <b> tag is still connected to the tree.
self.assertEqual("three", soup.b.string)
def test_replace_final_node(self):
soup = self.soup("<b>Argh!</b>")
soup.find(text="Argh!").replace_with("Hooray!")
new_text = soup.find(text="Hooray!")
b = soup.b
self.assertEqual(new_text.previous_element, b)
self.assertEqual(new_text.parent, b)
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.next_element, None)
def test_consecutive_text_nodes(self):
# A builder should never create two consecutive text nodes,
# but if you insert one next to another, Beautiful Soup will
# handle it correctly.
soup = self.soup("<a><b>Argh!</b><c></c></a>")
soup.b.insert(1, "Hooray!")
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Argh!Hooray!</b><c></c></a>"))
new_text = soup.find(text="Hooray!")
self.assertEqual(new_text.previous_element, "Argh!")
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.previous_sibling, "Argh!")
self.assertEqual(new_text.previous_sibling.next_sibling, new_text)
self.assertEqual(new_text.next_sibling, None)
self.assertEqual(new_text.next_element, soup.c)
def test_insert_string(self):
soup = self.soup("<a></a>")
soup.a.insert(0, "bar")
soup.a.insert(0, "foo")
# The string were added to the tag.
self.assertEqual(["foo", "bar"], soup.a.contents)
# And they were converted to NavigableStrings.
self.assertEqual(soup.a.contents[0].next_element, "bar")
def test_insert_tag(self):
builder = self.default_builder
soup = self.soup(
"<a><b>Find</b><c>lady!</c><d></d></a>", builder=builder)
magic_tag = Tag(soup, builder, 'magictag')
magic_tag.insert(0, "the")
soup.a.insert(1, magic_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Find</b><magictag>the</magictag><c>lady!</c><d></d></a>"))
# Make sure all the relationships are hooked up correctly.
b_tag = soup.b
self.assertEqual(b_tag.next_sibling, magic_tag)
self.assertEqual(magic_tag.previous_sibling, b_tag)
find = b_tag.find(text="Find")
self.assertEqual(find.next_element, magic_tag)
self.assertEqual(magic_tag.previous_element, find)
c_tag = soup.c
self.assertEqual(magic_tag.next_sibling, c_tag)
self.assertEqual(c_tag.previous_sibling, magic_tag)
the = magic_tag.find(text="the")
self.assertEqual(the.parent, magic_tag)
self.assertEqual(the.next_element, c_tag)
self.assertEqual(c_tag.previous_element, the)
def test_append_child_thats_already_at_the_end(self):
data = "<a><b></b></a>"
soup = self.soup(data)
soup.a.append(soup.b)
self.assertEqual(data, soup.decode())
def test_move_tag_to_beginning_of_parent(self):
data = "<a><b></b><c></c><d></d></a>"
soup = self.soup(data)
soup.a.insert(0, soup.d)
self.assertEqual("<a><d></d><b></b><c></c></a>", soup.decode())
def test_insert_works_on_empty_element_tag(self):
# This is a little strange, since most HTML parsers don't allow
# markup like this to come through. But in general, we don't
# know what the parser would or wouldn't have allowed, so
# I'm letting this succeed for now.
soup = self.soup("<br/>")
soup.br.insert(1, "Contents")
self.assertEqual(str(soup.br), "<br>Contents</br>")
def test_insert_before(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_before("BAZ")
soup.a.insert_before("QUUX")
self.assertEqual(
soup.decode(), self.document_for("QUUX<a>foo</a>BAZ<b>bar</b>"))
soup.a.insert_before(soup.b)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_after("BAZ")
soup.a.insert_after("QUUX")
self.assertEqual(
soup.decode(), self.document_for("<a>foo</a>QUUX<b>bar</b>BAZ"))
soup.b.insert_after(soup.a)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after_raises_exception_if_after_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_after, tag)
self.assertRaises(NotImplementedError, soup.insert_after, tag)
self.assertRaises(ValueError, tag.insert_after, tag)
def test_insert_before_raises_notimplementederror_if_before_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_before, tag)
self.assertRaises(NotImplementedError, soup.insert_before, tag)
self.assertRaises(ValueError, tag.insert_before, tag)
def test_replace_with(self):
soup = self.soup(
"<p>There's <b>no</b> business like <b>show</b> business</p>")
no, show = soup.find_all('b')
show.replace_with(no)
self.assertEqual(
soup.decode(),
self.document_for(
"<p>There's business like <b>no</b> business</p>"))
self.assertEqual(show.parent, None)
self.assertEqual(no.parent, soup.p)
self.assertEqual(no.next_element, "no")
self.assertEqual(no.next_sibling, " business")
def test_replace_first_child(self):
data = "<a><b></b><c></c></a>"
soup = self.soup(data)
soup.b.replace_with(soup.c)
self.assertEqual("<a><c></c></a>", soup.decode())
def test_replace_last_child(self):
data = "<a><b></b><c></c></a>"
soup = self.soup(data)
soup.c.replace_with(soup.b)
self.assertEqual("<a><b></b></a>", soup.decode())
def test_nested_tag_replace_with(self):
soup = self.soup(
"""<a>We<b>reserve<c>the</c><d>right</d></b></a><e>to<f>refuse</f><g>service</g></e>""")
# Replace the entire <b> tag and its contents ("reserve the
# right") with the <f> tag ("refuse").
remove_tag = soup.b
move_tag = soup.f
remove_tag.replace_with(move_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a>We<f>refuse</f></a><e>to<g>service</g></e>"))
# The <b> tag is now an orphan.
self.assertEqual(remove_tag.parent, None)
self.assertEqual(remove_tag.find(text="right").next_element, None)
self.assertEqual(remove_tag.previous_element, None)
self.assertEqual(remove_tag.next_sibling, None)
self.assertEqual(remove_tag.previous_sibling, None)
# The <f> tag is now connected to the <a> tag.
self.assertEqual(move_tag.parent, soup.a)
self.assertEqual(move_tag.previous_element, "We")
self.assertEqual(move_tag.next_element.next_element, soup.e)
self.assertEqual(move_tag.next_sibling, None)
# The gap where the <f> tag used to be has been mended, and
# the word "to" is now connected to the <g> tag.
to_text = soup.find(text="to")
g_tag = soup.g
self.assertEqual(to_text.next_element, g_tag)
self.assertEqual(to_text.next_sibling, g_tag)
self.assertEqual(g_tag.previous_element, to_text)
self.assertEqual(g_tag.previous_sibling, to_text)
def test_unwrap(self):
tree = self.soup("""
<p>Unneeded <em>formatting</em> is unneeded</p>
""")
tree.em.unwrap()
self.assertEqual(tree.em, None)
self.assertEqual(tree.p.text, "Unneeded formatting is unneeded")
def test_wrap(self):
soup = self.soup("I wish I was bold.")
value = soup.string.wrap(soup.new_tag("b"))
self.assertEqual(value.decode(), "<b>I wish I was bold.</b>")
self.assertEqual(
soup.decode(), self.document_for("<b>I wish I was bold.</b>"))
def test_wrap_extracts_tag_from_elsewhere(self):
soup = self.soup("<b></b>I wish I was bold.")
soup.b.next_sibling.wrap(soup.b)
self.assertEqual(
soup.decode(), self.document_for("<b>I wish I was bold.</b>"))
def test_wrap_puts_new_contents_at_the_end(self):
soup = self.soup("<b>I like being bold.</b>I wish I was bold.")
soup.b.next_sibling.wrap(soup.b)
self.assertEqual(2, len(soup.b.contents))
self.assertEqual(
soup.decode(), self.document_for(
"<b>I like being bold.I wish I was bold.</b>"))
def test_extract(self):
soup = self.soup(
'<html><body>Some content. <div id="nav">Nav crap</div> More content.</body></html>')
self.assertEqual(len(soup.body.contents), 3)
extracted = soup.find(id="nav").extract()
self.assertEqual(
soup.decode(), "<html><body>Some content. More content.</body></html>")
self.assertEqual(extracted.decode(), '<div id="nav">Nav crap</div>')
# The extracted tag is now an orphan.
self.assertEqual(len(soup.body.contents), 2)
self.assertEqual(extracted.parent, None)
self.assertEqual(extracted.previous_element, None)
self.assertEqual(extracted.next_element.next_element, None)
# The gap where the extracted tag used to be has been mended.
content_1 = soup.find(text="Some content. ")
content_2 = soup.find(text=" More content.")
self.assertEqual(content_1.next_element, content_2)
self.assertEqual(content_1.next_sibling, content_2)
self.assertEqual(content_2.previous_element, content_1)
self.assertEqual(content_2.previous_sibling, content_1)
def test_extract_distinguishes_between_identical_strings(self):
soup = self.soup("<a>foo</a><b>bar</b>")
foo_1 = soup.a.string
bar_1 = soup.b.string
foo_2 = soup.new_string("foo")
bar_2 = soup.new_string("bar")
soup.a.append(foo_2)
soup.b.append(bar_2)
# Now there are two identical strings in the <a> tag, and two
# in the <b> tag. Let's remove the first "foo" and the second
# "bar".
foo_1.extract()
bar_2.extract()
self.assertEqual(foo_2, soup.a.string)
self.assertEqual(bar_2, soup.b.string)
def test_extract_multiples_of_same_tag(self):
soup = self.soup("""
<html>
<head>
<script>foo</script>
</head>
<body>
<script>bar</script>
<a></a>
</body>
<script>baz</script>
</html>""")
[soup.script.extract() for i in soup.find_all("script")]
self.assertEqual("<body>\n\n<a></a>\n</body>", str(soup.body))
def test_extract_works_when_element_is_surrounded_by_identical_strings(self):
soup = self.soup(
'<html>\n'
'<body>hi</body>\n'
'</html>')
soup.find('body').extract()
self.assertEqual(None, soup.find('body'))
def test_clear(self):
"""Tag.clear()"""
soup = self.soup("<p><a>String <em>Italicized</em></a> and another</p>")
# clear using extract()
a = soup.a
soup.p.clear()
self.assertEqual(len(soup.p.contents), 0)
self.assertTrue(hasattr(a, "contents"))
# clear using decompose()
em = a.em
a.clear(decompose=True)
self.assertEqual(0, len(em.contents))
def test_string_set(self):
"""Tag.string = 'string'"""
soup = self.soup("<a></a> <b><c></c></b>")
soup.a.string = "foo"
self.assertEqual(soup.a.contents, ["foo"])
soup.b.string = "bar"
self.assertEqual(soup.b.contents, ["bar"])
def test_string_set_does_not_affect_original_string(self):
soup = self.soup("<a><b>foo</b><c>bar</c>")
soup.b.string = soup.c.string
self.assertEqual(soup.a.encode(), b"<a><b>bar</b><c>bar</c></a>")
def test_set_string_preserves_class_of_string(self):
soup = self.soup("<a></a>")
cdata = CData("foo")
soup.a.string = cdata
self.assertTrue(isinstance(soup.a.string, CData))
class TestElementObjects(SoupTest):
"""Test various features of element objects."""
def test_len(self):
"""The length of an element is its number of children."""
soup = self.soup("<top>1<b>2</b>3</top>")
# The BeautifulSoup object itself contains one element: the
# <top> tag.
self.assertEqual(len(soup.contents), 1)
self.assertEqual(len(soup), 1)
# The <top> tag contains three elements: the text node "1", the
# <b> tag, and the text node "3".
self.assertEqual(len(soup.top), 3)
self.assertEqual(len(soup.top.contents), 3)
def test_member_access_invokes_find(self):
"""Accessing a Python member .foo invokes find('foo')"""
soup = self.soup('<b><i></i></b>')
self.assertEqual(soup.b, soup.find('b'))
self.assertEqual(soup.b.i, soup.find('b').find('i'))
self.assertEqual(soup.a, None)
def test_deprecated_member_access(self):
soup = self.soup('<b><i></i></b>')
with warnings.catch_warnings(record=True) as w:
tag = soup.bTag
self.assertEqual(soup.b, tag)
self.assertEqual(
'.bTag is deprecated, use .find("b") instead.',
str(w[0].message))
def test_has_attr(self):
"""has_attr() checks for the presence of an attribute.
Please note note: has_attr() is different from
__in__. has_attr() checks the tag's attributes and __in__
checks the tag's chidlren.
"""
soup = self.soup("<foo attr='bar'>")
self.assertTrue(soup.foo.has_attr('attr'))
self.assertFalse(soup.foo.has_attr('attr2'))
def test_attributes_come_out_in_alphabetical_order(self):
markup = '<b a="1" z="5" m="3" f="2" y="4"></b>'
self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>')
def test_string(self):
# A tag that contains only a text node makes that node
# available as .string.
soup = self.soup("<b>foo</b>")
self.assertEqual(soup.b.string, 'foo')
def test_empty_tag_has_no_string(self):
# A tag with no children has no .stirng.
soup = self.soup("<b></b>")
self.assertEqual(soup.b.string, None)
def test_tag_with_multiple_children_has_no_string(self):
# A tag with no children has no .string.
soup = self.soup("<a>foo<b></b><b></b></b>")
self.assertEqual(soup.b.string, None)
soup = self.soup("<a>foo<b></b>bar</b>")
self.assertEqual(soup.b.string, None)
# Even if all the children are strings, due to trickery,
# it won't work--but this would be a good optimization.
soup = self.soup("<a>foo</b>")
soup.a.insert(1, "bar")
self.assertEqual(soup.a.string, None)
def test_tag_with_recursive_string_has_string(self):
# A tag with a single child which has a .string inherits that
# .string.
soup = self.soup("<a><b>foo</b></a>")
self.assertEqual(soup.a.string, "foo")
self.assertEqual(soup.string, "foo")
def test_lack_of_string(self):
"""Only a tag containing a single text node has a .string."""
soup = self.soup("<b>f<i>e</i>o</b>")
self.assertFalse(soup.b.string)
soup = self.soup("<b></b>")
self.assertFalse(soup.b.string)
def test_all_text(self):
"""Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated"""
soup = self.soup("<a>a<b>r</b> <r> t </r></a>")
self.assertEqual(soup.a.text, "ar t ")
self.assertEqual(soup.a.get_text(strip=True), "art")
self.assertEqual(soup.a.get_text(","), "a,r, , t ")
self.assertEqual(soup.a.get_text(",", strip=True), "a,r,t")
def test_get_text_ignores_comments(self):
soup = self.soup("foo<!--IGNORE-->bar")
self.assertEqual(soup.get_text(), "foobar")
self.assertEqual(
soup.get_text(types=(NavigableString, Comment)), "fooIGNOREbar")
self.assertEqual(
soup.get_text(types=None), "fooIGNOREbar")
def test_all_strings_ignores_comments(self):
soup = self.soup("foo<!--IGNORE-->bar")
self.assertEqual(['foo', 'bar'], list(soup.strings))
class TestCDAtaListAttributes(SoupTest):
"""Testing cdata-list attributes like 'class'.
"""
def test_single_value_becomes_list(self):
soup = self.soup("<a class='foo'>")
self.assertEqual(["foo"],soup.a['class'])
def test_multiple_values_becomes_list(self):
soup = self.soup("<a class='foo bar'>")
self.assertEqual(["foo", "bar"], soup.a['class'])
def test_multiple_values_separated_by_weird_whitespace(self):
soup = self.soup("<a class='foo\tbar\nbaz'>")
self.assertEqual(["foo", "bar", "baz"],soup.a['class'])
def test_attributes_joined_into_string_on_output(self):
soup = self.soup("<a class='foo\tbar'>")
self.assertEqual(b'<a class="foo bar"></a>', soup.a.encode())
def test_accept_charset(self):
soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">')
self.assertEqual(['ISO-8859-1', 'UTF-8'], soup.form['accept-charset'])
def test_cdata_attribute_applying_only_to_one_tag(self):
data = '<a accept-charset="ISO-8859-1 UTF-8"></a>'
soup = self.soup(data)
# We saw in another test that accept-charset is a cdata-list
# attribute for the <form> tag. But it's not a cdata-list
# attribute for any other tag.
self.assertEqual('ISO-8859-1 UTF-8', soup.a['accept-charset'])
def test_string_has_immutable_name_property(self):
string = self.soup("s").string
self.assertEqual(None, string.name)
def t():
string.name = 'foo'
self.assertRaises(AttributeError, t)
class TestPersistence(SoupTest):
"Testing features like pickle and deepcopy."
def setUp(self):
super(TestPersistence, self).setUp()
self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
"http://www.w3.org/TR/REC-html40/transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Beautiful Soup: We called him Tortoise because he taught us.</title>
<link rev="made" href="mailto:[email protected]">
<meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping.">
<meta name="generator" content="Markov Approximation 1.4 (module: leonardr)">
<meta name="author" content="Leonard Richardson">
</head>
<body>
<a href="foo">foo</a>
<a href="foo"><b>bar</b></a>
</body>
</html>"""
self.tree = self.soup(self.page)
def test_pickle_and_unpickle_identity(self):
# Pickling a tree, then unpickling it, yields a tree identical
# to the original.
dumped = pickle.dumps(self.tree, 2)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.__class__, BeautifulSoup)
self.assertEqual(loaded.decode(), self.tree.decode())
def test_deepcopy_identity(self):
# Making a deepcopy of a tree yields an identical tree.
copied = copy.deepcopy(self.tree)
self.assertEqual(copied.decode(), self.tree.decode())
def test_copy_preserves_encoding(self):
soup = BeautifulSoup(b'<p> </p>', 'html.parser')
encoding = soup.original_encoding
copy = soup.__copy__()
self.assertEqual("<p> </p>", str(copy))
self.assertEqual(encoding, copy.original_encoding)
def test_unicode_pickle(self):
# A tree containing Unicode characters can be pickled.
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.decode(), soup.decode())
def test_copy_navigablestring_is_not_attached_to_tree(self):
html = "<b>Foo<a></a></b><b>Bar</b>"
soup = self.soup(html)
s1 = soup.find(string="Foo")
s2 = copy.copy(s1)
self.assertEqual(s1, s2)
self.assertEqual(None, s2.parent)
self.assertEqual(None, s2.next_element)
self.assertNotEqual(None, s1.next_sibling)
self.assertEqual(None, s2.next_sibling)
self.assertEqual(None, s2.previous_element)
def test_copy_navigablestring_subclass_has_same_type(self):
html = "<b><!--Foo--></b>"
soup = self.soup(html)
s1 = soup.string
s2 = copy.copy(s1)
self.assertEqual(s1, s2)
self.assertTrue(isinstance(s2, Comment))
def test_copy_entire_soup(self):
html = "<div><b>Foo<a></a></b><b>Bar</b></div>end"
soup = self.soup(html)
soup_copy = copy.copy(soup)
self.assertEqual(soup, soup_copy)
def test_copy_tag_copies_contents(self):
html = "<div><b>Foo<a></a></b><b>Bar</b></div>end"
soup = self.soup(html)
div = soup.div
div_copy = copy.copy(div)
# The two tags look the same, and evaluate to equal.
self.assertEqual(str(div), str(div_copy))
self.assertEqual(div, div_copy)
# But they're not the same object.
self.assertFalse(div is div_copy)
# And they don't have the same relation to the parse tree. The
# copy is not associated with a parse tree at all.
self.assertEqual(None, div_copy.parent)
self.assertEqual(None, div_copy.previous_element)
self.assertEqual(None, div_copy.find(string='Bar').next_element)
self.assertNotEqual(None, div.find(string='Bar').next_element)
class TestSubstitutions(SoupTest):
def test_default_formatter_is_minimal(self):
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_html(self):
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="html")
self.assertEqual(
decoded,
self.document_for("<b><<Sacré bleu!>></b>"))
def test_formatter_minimal(self):
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_null(self):
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter=None)
# Neither the angle brackets nor the e-with-acute are converted.
# This is not valid HTML, but it's what the user wanted.
self.assertEqual(decoded,
self.document_for("<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_custom(self):
markup = "<b><foo></b><b>bar</b>"
soup = self.soup(markup)
decoded = soup.decode(formatter = lambda x: x.upper())
# Instead of normal entity conversion code, the custom
# callable is called on every string.
self.assertEqual(
decoded,
self.document_for("<b><FOO></b><b>BAR</b>"))
def test_formatter_is_run_on_attribute_values(self):
markup = '<a href="http://a.com?a=b&c=é">e</a>'
soup = self.soup(markup)
a = soup.a
expect_minimal = '<a href="http://a.com?a=b&c=é">e</a>'
self.assertEqual(expect_minimal, a.decode())
self.assertEqual(expect_minimal, a.decode(formatter="minimal"))
expect_html = '<a href="http://a.com?a=b&c=é">e</a>'
self.assertEqual(expect_html, a.decode(formatter="html"))
self.assertEqual(markup, a.decode(formatter=None))
expect_upper = '<a href="HTTP://A.COM?A=B&C=É">E</a>'
self.assertEqual(expect_upper, a.decode(formatter=lambda x: x.upper()))
def test_formatter_skips_script_tag_for_html_documents(self):
doc = """
<script type="text/javascript">
console.log("< < hey > > ");
</script>
"""
encoded = BeautifulSoup(doc, 'html.parser').encode()
self.assertTrue(b"< < hey > >" in encoded)
def test_formatter_skips_style_tag_for_html_documents(self):
doc = """
<style type="text/css">
console.log("< < hey > > ");
</style>
"""
encoded = BeautifulSoup(doc, 'html.parser').encode()
self.assertTrue(b"< < hey > >" in encoded)
def test_prettify_leaves_preformatted_text_alone(self):
soup = self.soup("<div> foo <pre> \tbar\n \n </pre> baz ")
# Everything outside the <pre> tag is reformatted, but everything
# inside is left alone.
self.assertEqual(
'<div>\n foo\n <pre> \tbar\n \n </pre>\n baz\n</div>',
soup.div.prettify())
def test_prettify_accepts_formatter(self):
soup = BeautifulSoup("<html><body>foo</body></html>", 'html.parser')
pretty = soup.prettify(formatter = lambda x: x.upper())
self.assertTrue("FOO" in pretty)
def test_prettify_outputs_unicode_by_default(self):
soup = self.soup("<a></a>")
self.assertEqual(str, type(soup.prettify()))
def test_prettify_can_encode_data(self):
soup = self.soup("<a></a>")
self.assertEqual(bytes, type(soup.prettify("utf-8")))
def test_html_entity_substitution_off_by_default(self):
markup = "<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>"
soup = self.soup(markup)
encoded = soup.b.encode("utf-8")
self.assertEqual(encoded, markup.encode('utf-8'))
def test_encoding_substitution(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
soup = self.soup(meta_tag)
# Parse the document, and the charset apprears unchanged.
self.assertEqual(soup.meta['content'], 'text/html; charset=x-sjis')
# Encode the document into some encoding, and the encoding is
# substituted into the meta tag.
utf_8 = soup.encode("utf-8")
self.assertTrue(b"charset=utf-8" in utf_8)
euc_jp = soup.encode("euc_jp")
self.assertTrue(b"charset=euc_jp" in euc_jp)
shift_jis = soup.encode("shift-jis")
self.assertTrue(b"charset=shift-jis" in shift_jis)
utf_16_u = soup.encode("utf-16").decode("utf-16")
self.assertTrue("charset=utf-16" in utf_16_u)
def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self):
markup = ('<head><meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/></head><pre>foo</pre>')
# Beautiful Soup used to try to rewrite the meta tag even if the
# meta tag got filtered out by the strainer. This test makes
# sure that doesn't happen.
strainer = SoupStrainer('pre')
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.contents[0].name, 'pre')
class TestEncoding(SoupTest):
"""Test the ability to encode objects into strings."""
def test_unicode_string_can_be_encoded(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.string.encode("utf-8"),
"\N{SNOWMAN}".encode("utf-8"))
def test_tag_containing_unicode_string_can_be_encoded(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
soup.b.encode("utf-8"), html.encode("utf-8"))
def test_encoding_substitutes_unrecognized_characters_by_default(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.encode("ascii"), b"<b>☃</b>")
def test_encoding_can_be_made_strict(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertRaises(
UnicodeEncodeError, soup.encode, "ascii", errors="strict")
def test_decode_contents(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual("\N{SNOWMAN}", soup.b.decode_contents())
def test_encode_contents(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
"\N{SNOWMAN}".encode("utf8"), soup.b.encode_contents(
encoding="utf8"))
def test_deprecated_renderContents(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
"\N{SNOWMAN}".encode("utf8"), soup.b.renderContents())
def test_repr(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
if PY3K:
self.assertEqual(html, repr(soup))
else:
self.assertEqual(b'<b>\\u2603</b>', repr(soup))
class TestNavigableStringSubclasses(SoupTest):
def test_cdata(self):
# None of the current builders turn CDATA sections into CData
# objects, but you can create them manually.
soup = self.soup("")
cdata = CData("foo")
soup.insert(1, cdata)
self.assertEqual(str(soup), "<![CDATA[foo]]>")
self.assertEqual(soup.find(text="foo"), "foo")
self.assertEqual(soup.contents[0], "foo")
def test_cdata_is_never_formatted(self):
"""Text inside a CData object is passed into the formatter.
But the return value is ignored.
"""
self.count = 0
def increment(*args):
self.count += 1
return "BITTER FAILURE"
soup = self.soup("")
cdata = CData("<><><>")
soup.insert(1, cdata)
self.assertEqual(
b"<![CDATA[<><><>]]>", soup.encode(formatter=increment))
self.assertEqual(1, self.count)
def test_doctype_ends_in_newline(self):
# Unlike other NavigableString subclasses, a DOCTYPE always ends
# in a newline.
doctype = Doctype("foo")
soup = self.soup("")
soup.insert(1, doctype)
self.assertEqual(soup.encode(), b"<!DOCTYPE foo>\n")
def test_declaration(self):
d = Declaration("foo")
self.assertEqual("<?foo?>", d.output_ready())
class TestSoupSelector(TreeTest):
HTML = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>The title</title>
<link rel="stylesheet" href="blah.css" type="text/css" id="l1">
</head>
<body>
<custom-dashed-tag class="dashed" id="dash1">Hello there.</custom-dashed-tag>
<div id="main" class="fancy">
<div id="inner">
<h1 id="header1">An H1</h1>
<p>Some text</p>
<p class="onep" id="p1">Some more text</p>
<h2 id="header2">An H2</h2>
<p class="class1 class2 class3" id="pmulti">Another</p>
<a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a>
<h2 id="header3">Another H2</h2>
<a id="me" href="http://simonwillison.net/" rel="me">me</a>
<span class="s1">
<a href="#" id="s1a1">span1a1</a>
<a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a>
<span class="span2">
<a href="#" id="s2a1">span2a1</a>
</span>
<span class="span3"></span>
<custom-dashed-tag class="dashed" id="dash2"/>
<div data-tag="dashedvalue" id="data1"/>
</span>
</div>
<x id="xid">
<z id="zida"/>
<z id="zidab"/>
<z id="zidac"/>
</x>
<y id="yid">
<z id="zidb"/>
</y>
<p lang="en" id="lang-en">English</p>
<p lang="en-gb" id="lang-en-gb">English UK</p>
<p lang="en-us" id="lang-en-us">English US</p>
<p lang="fr" id="lang-fr">French</p>
</div>
<div id="footer">
</div>
"""
def setUp(self):
self.soup = BeautifulSoup(self.HTML, 'html.parser')
def assertSelects(self, selector, expected_ids, **kwargs):
el_ids = [el['id'] for el in self.soup.select(selector, **kwargs)]
el_ids.sort()
expected_ids.sort()
self.assertEqual(expected_ids, el_ids,
"Selector %s, expected [%s], got [%s]" % (
selector, ', '.join(expected_ids), ', '.join(el_ids)
)
)
assertSelect = assertSelects
def assertSelectMultiple(self, *tests):
for selector, expected_ids in tests:
self.assertSelect(selector, expected_ids)
def test_one_tag_one(self):
els = self.soup.select('title')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'title')
self.assertEqual(els[0].contents, ['The title'])
def test_one_tag_many(self):
els = self.soup.select('div')
self.assertEqual(len(els), 4)
for div in els:
self.assertEqual(div.name, 'div')
el = self.soup.select_one('div')
self.assertEqual('main', el['id'])
def test_select_one_returns_none_if_no_match(self):
match = self.soup.select_one('nonexistenttag')
self.assertEqual(None, match)
def test_tag_in_tag_one(self):
els = self.soup.select('div div')
self.assertSelects('div div', ['inner', 'data1'])
def test_tag_in_tag_many(self):
for selector in ('html div', 'html body div', 'body div'):
self.assertSelects(selector, ['data1', 'main', 'inner', 'footer'])
def test_limit(self):
self.assertSelects('html div', ['main'], limit=1)
self.assertSelects('html body div', ['inner', 'main'], limit=2)
self.assertSelects('body div', ['data1', 'main', 'inner', 'footer'],
limit=10)
def test_tag_no_match(self):
self.assertEqual(len(self.soup.select('del')), 0)
def test_invalid_tag(self):
self.assertRaises(ValueError, self.soup.select, 'tag%t')
def test_select_dashed_tag_ids(self):
self.assertSelects('custom-dashed-tag', ['dash1', 'dash2'])
def test_select_dashed_by_id(self):
dashed = self.soup.select('custom-dashed-tag[id=\"dash2\"]')
self.assertEqual(dashed[0].name, 'custom-dashed-tag')
self.assertEqual(dashed[0]['id'], 'dash2')
def test_dashed_tag_text(self):
self.assertEqual(self.soup.select('body > custom-dashed-tag')[0].text, 'Hello there.')
def test_select_dashed_matches_find_all(self):
self.assertEqual(self.soup.select('custom-dashed-tag'), self.soup.find_all('custom-dashed-tag'))
def test_header_tags(self):
self.assertSelectMultiple(
('h1', ['header1']),
('h2', ['header2', 'header3']),
)
def test_class_one(self):
for selector in ('.onep', 'p.onep', 'html p.onep'):
els = self.soup.select(selector)
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'p')
self.assertEqual(els[0]['class'], ['onep'])
def test_class_mismatched_tag(self):
els = self.soup.select('div.onep')
self.assertEqual(len(els), 0)
def test_one_id(self):
for selector in ('div#inner', '#inner', 'div div#inner'):
self.assertSelects(selector, ['inner'])
def test_bad_id(self):
els = self.soup.select('#doesnotexist')
self.assertEqual(len(els), 0)
def test_items_in_id(self):
els = self.soup.select('div#inner p')
self.assertEqual(len(els), 3)
for el in els:
self.assertEqual(el.name, 'p')
self.assertEqual(els[1]['class'], ['onep'])
self.assertFalse(els[0].has_attr('class'))
def test_a_bunch_of_emptys(self):
for selector in ('div#main del', 'div#main div.oops', 'div div#main'):
self.assertEqual(len(self.soup.select(selector)), 0)
def test_multi_class_support(self):
for selector in ('.class1', 'p.class1', '.class2', 'p.class2',
'.class3', 'p.class3', 'html p.class2', 'div#inner .class2'):
self.assertSelects(selector, ['pmulti'])
def test_multi_class_selection(self):
for selector in ('.class1.class3', '.class3.class2',
'.class1.class2.class3'):
self.assertSelects(selector, ['pmulti'])
def test_child_selector(self):
self.assertSelects('.s1 > a', ['s1a1', 's1a2'])
self.assertSelects('.s1 > a span', ['s1a2s1'])
def test_child_selector_id(self):
self.assertSelects('.s1 > a#s1a2 span', ['s1a2s1'])
def test_attribute_equals(self):
self.assertSelectMultiple(
('p[class="onep"]', ['p1']),
('p[id="p1"]', ['p1']),
('[class="onep"]', ['p1']),
('[id="p1"]', ['p1']),
('link[rel="stylesheet"]', ['l1']),
('link[type="text/css"]', ['l1']),
('link[href="blah.css"]', ['l1']),
('link[href="no-blah.css"]', []),
('[rel="stylesheet"]', ['l1']),
('[type="text/css"]', ['l1']),
('[href="blah.css"]', ['l1']),
('[href="no-blah.css"]', []),
('p[href="no-blah.css"]', []),
('[href="no-blah.css"]', []),
)
def test_attribute_tilde(self):
self.assertSelectMultiple(
('p[class~="class1"]', ['pmulti']),
('p[class~="class2"]', ['pmulti']),
('p[class~="class3"]', ['pmulti']),
('[class~="class1"]', ['pmulti']),
('[class~="class2"]', ['pmulti']),
('[class~="class3"]', ['pmulti']),
('a[rel~="friend"]', ['bob']),
('a[rel~="met"]', ['bob']),
('[rel~="friend"]', ['bob']),
('[rel~="met"]', ['bob']),
)
def test_attribute_startswith(self):
self.assertSelectMultiple(
('[rel^="style"]', ['l1']),
('link[rel^="style"]', ['l1']),
('notlink[rel^="notstyle"]', []),
('[rel^="notstyle"]', []),
('link[rel^="notstyle"]', []),
('link[href^="bla"]', ['l1']),
('a[href^="http://"]', ['bob', 'me']),
('[href^="http://"]', ['bob', 'me']),
('[id^="p"]', ['pmulti', 'p1']),
('[id^="m"]', ['me', 'main']),
('div[id^="m"]', ['main']),
('a[id^="m"]', ['me']),
('div[data-tag^="dashed"]', ['data1'])
)
def test_attribute_endswith(self):
self.assertSelectMultiple(
('[href$=".css"]', ['l1']),
('link[href$=".css"]', ['l1']),
('link[id$="1"]', ['l1']),
('[id$="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1', 'dash1']),
('div[id$="1"]', ['data1']),
('[id$="noending"]', []),
)
def test_attribute_contains(self):
self.assertSelectMultiple(
# From test_attribute_startswith
('[rel*="style"]', ['l1']),
('link[rel*="style"]', ['l1']),
('notlink[rel*="notstyle"]', []),
('[rel*="notstyle"]', []),
('link[rel*="notstyle"]', []),
('link[href*="bla"]', ['l1']),
('[href*="http://"]', ['bob', 'me']),
('[id*="p"]', ['pmulti', 'p1']),
('div[id*="m"]', ['main']),
('a[id*="m"]', ['me']),
# From test_attribute_endswith
('[href*=".css"]', ['l1']),
('link[href*=".css"]', ['l1']),
('link[id*="1"]', ['l1']),
('[id*="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1', 'dash1']),
('div[id*="1"]', ['data1']),
('[id*="noending"]', []),
# New for this test
('[href*="."]', ['bob', 'me', 'l1']),
('a[href*="."]', ['bob', 'me']),
('link[href*="."]', ['l1']),
('div[id*="n"]', ['main', 'inner']),
('div[id*="nn"]', ['inner']),
('div[data-tag*="edval"]', ['data1'])
)
def test_attribute_exact_or_hypen(self):
self.assertSelectMultiple(
('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('p[lang|="fr"]', ['lang-fr']),
('p[lang|="gb"]', []),
)
def test_attribute_exists(self):
self.assertSelectMultiple(
('[rel]', ['l1', 'bob', 'me']),
('link[rel]', ['l1']),
('a[rel]', ['bob', 'me']),
('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']),
('p[class]', ['p1', 'pmulti']),
('[blah]', []),
('p[blah]', []),
('div[data-tag]', ['data1'])
)
def test_quoted_space_in_selector_name(self):
html = """<div style="display: wrong">nope</div>
<div style="display: right">yes</div>
"""
soup = BeautifulSoup(html, 'html.parser')
[chosen] = soup.select('div[style="display: right"]')
self.assertEqual("yes", chosen.string)
def test_unsupported_pseudoclass(self):
self.assertRaises(
NotImplementedError, self.soup.select, "a:no-such-pseudoclass")
self.assertRaises(
NotImplementedError, self.soup.select, "a:nth-of-type(a)")
def test_nth_of_type(self):
# Try to select first paragraph
els = self.soup.select('div#inner p:nth-of-type(1)')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, 'Some text')
# Try to select third paragraph
els = self.soup.select('div#inner p:nth-of-type(3)')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, 'Another')
# Try to select (non-existent!) fourth paragraph
els = self.soup.select('div#inner p:nth-of-type(4)')
self.assertEqual(len(els), 0)
# Pass in an invalid value.
self.assertRaises(
ValueError, self.soup.select, 'div p:nth-of-type(0)')
def test_nth_of_type_direct_descendant(self):
els = self.soup.select('div#inner > p:nth-of-type(1)')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, 'Some text')
def test_id_child_selector_nth_of_type(self):
self.assertSelects('#inner > p:nth-of-type(2)', ['p1'])
def test_select_on_element(self):
# Other tests operate on the tree; this operates on an element
# within the tree.
inner = self.soup.find("div", id="main")
selected = inner.select("div")
# The <div id="inner"> tag was selected. The <div id="footer">
# tag was not.
self.assertSelectsIDs(selected, ['inner', 'data1'])
def test_overspecified_child_id(self):
self.assertSelects(".fancy #inner", ['inner'])
self.assertSelects(".normal #inner", [])
def test_adjacent_sibling_selector(self):
self.assertSelects('#p1 + h2', ['header2'])
self.assertSelects('#p1 + h2 + p', ['pmulti'])
self.assertSelects('#p1 + #header2 + .class1', ['pmulti'])
self.assertEqual([], self.soup.select('#p1 + p'))
def test_general_sibling_selector(self):
self.assertSelects('#p1 ~ h2', ['header2', 'header3'])
self.assertSelects('#p1 ~ #header2', ['header2'])
self.assertSelects('#p1 ~ h2 + a', ['me'])
self.assertSelects('#p1 ~ h2 + [rel="me"]', ['me'])
self.assertEqual([], self.soup.select('#inner ~ h2'))
def test_dangling_combinator(self):
self.assertRaises(ValueError, self.soup.select, 'h1 >')
def test_sibling_combinator_wont_select_same_tag_twice(self):
self.assertSelects('p[lang] ~ p', ['lang-en-gb', 'lang-en-us', 'lang-fr'])
# Test the selector grouping operator (the comma)
def test_multiple_select(self):
self.assertSelects('x, y', ['xid', 'yid'])
def test_multiple_select_with_no_space(self):
self.assertSelects('x,y', ['xid', 'yid'])
def test_multiple_select_with_more_space(self):
self.assertSelects('x, y', ['xid', 'yid'])
def test_multiple_select_duplicated(self):
self.assertSelects('x, x', ['xid'])
def test_multiple_select_sibling(self):
self.assertSelects('x, y ~ p[lang=fr]', ['xid', 'lang-fr'])
def test_multiple_select_tag_and_direct_descendant(self):
self.assertSelects('x, y > z', ['xid', 'zidb'])
def test_multiple_select_direct_descendant_and_tags(self):
self.assertSelects('div > x, y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
def test_multiple_select_indirect_descendant(self):
self.assertSelects('div x,y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
def test_invalid_multiple_select(self):
self.assertRaises(ValueError, self.soup.select, ',x, y')
self.assertRaises(ValueError, self.soup.select, 'x,,y')
def test_multiple_select_attrs(self):
self.assertSelects('p[lang=en], p[lang=en-gb]', ['lang-en', 'lang-en-gb'])
def test_multiple_select_ids(self):
self.assertSelects('x, y > z[id=zida], z[id=zidab], z[id=zidb]', ['xid', 'zidb', 'zidab'])
def test_multiple_select_nested(self):
self.assertSelects('body > div > x, y > z', ['xid', 'zidb'])
| gpl-3.0 |
OpenBfS/dokpool-plone | Plone/src/docpool.example/docpool/example/local/example.py | 1 | 1443 | # -*- coding: utf-8 -*-
from docpool.base.content.documentpool import APPLICATIONS_KEY
from docpool.example.config import EXAMPLE_APP
from Products.CMFCore.utils import getToolByName
from zope.annotation.interfaces import IAnnotations
def dpAdded(self):
"""
@param self:
@return:
"""
annotations = IAnnotations(self)
fresh = EXAMPLE_APP not in annotations[APPLICATIONS_KEY]
if fresh:
annotations[APPLICATIONS_KEY].append(EXAMPLE_APP)
if fresh:
createExampleGroups(self)
# TODO:
def dpRemoved(self):
"""
@param self:
@return:
"""
# TODO:
return
def createExampleGroups(self):
"""
Create Group for example application access
@param self:
@return:
"""
prefix = self.prefix or self.getId()
prefix = str(prefix)
title = self.Title()
gtool = getToolByName(self, 'portal_groups')
# Group for Example application rights
props = {
'allowedDocTypes': [],
'title': 'Example Users (%s)' % title,
'description': 'Users with access to Example functions.',
'dp': self.UID(),
}
gtool.addGroup("%s_ExampleUsers" % prefix, properties=props)
gtool.addPrincipalToGroup(
'%s_dpadmin' %
prefix,
'%s_ExampleUsers' %
prefix)
# Set Example role as a local role for the new group
self.manage_setLocalRoles("%s_ExampleUsers" % prefix, ["ExampleUser"])
| gpl-3.0 |
jessicalucci/NovaOrc | nova/tests/api/openstack/compute/test_consoles.py | 14 | 11541 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid as stdlib_uuid
from lxml import etree
import webob
from nova.api.openstack.compute import consoles
from nova.compute import vm_states
from nova import console
from nova import db
from nova import exception
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import matchers
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
class FakeInstanceDB(object):
def __init__(self):
self.instances_by_id = {}
self.ids_by_uuid = {}
self.max_id = 0
def return_server_by_id(self, context, id):
if id not in self.instances_by_id:
self._add_server(id=id)
return dict(self.instances_by_id[id])
def return_server_by_uuid(self, context, uuid):
if uuid not in self.ids_by_uuid:
self._add_server(uuid=uuid)
return dict(self.instances_by_id[self.ids_by_uuid[uuid]])
def _add_server(self, id=None, uuid=None):
if id is None:
id = self.max_id + 1
if uuid is None:
uuid = str(stdlib_uuid.uuid4())
instance = stub_instance(id, uuid=uuid)
self.instances_by_id[id] = instance
self.ids_by_uuid[uuid] = id
if id > self.max_id:
self.max_id = id
def stub_instance(id, user_id='fake', project_id='fake', host=None,
vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0):
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"admin_pass": "",
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
"key_name": key_name,
"key_data": key_data,
"vm_state": vm_state or vm_states.BUILDING,
"task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"root_gb": 0,
"hostname": "",
"host": host,
"instance_type": {},
"user_data": "",
"reservation_id": reservation_id,
"mac_address": "",
"scheduled_at": timeutils.utcnow(),
"launched_at": timeutils.utcnow(),
"terminated_at": timeutils.utcnow(),
"availability_zone": "",
"display_name": server_name,
"display_description": "",
"locked": False,
"metadata": [],
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress}
return instance
class ConsolesControllerTest(test.TestCase):
def setUp(self):
super(ConsolesControllerTest, self).setUp()
self.flags(verbose=True)
self.instance_db = FakeInstanceDB()
self.stubs.Set(db, 'instance_get',
self.instance_db.return_server_by_id)
self.stubs.Set(db, 'instance_get_by_uuid',
self.instance_db.return_server_by_uuid)
self.uuid = str(stdlib_uuid.uuid4())
self.url = '/v2/fake/servers/%s/consoles' % self.uuid
self.controller = consoles.Controller()
def test_create_console(self):
def fake_create_console(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
return {}
self.stubs.Set(console.api.API, 'create_console', fake_create_console)
req = fakes.HTTPRequest.blank(self.url)
self.controller.create(req, self.uuid)
def test_show_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool, instance_name='inst-0001')
expected = {'console': {'id': 20,
'port': 'fake_port',
'host': 'fake_hostname',
'password': 'fake_password',
'instance_name': 'inst-0001',
'console_type': 'fake_type'}}
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
res_dict = self.controller.show(req, self.uuid, '20')
self.assertThat(res_dict, matchers.DictMatches(expected))
def test_show_console_unknown_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_show_console_unknown_instance(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_list_consoles(self):
def fake_get_consoles(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
pool1 = dict(console_type='fake_type',
public_hostname='fake_hostname')
cons1 = dict(id=10, password='fake_password',
port='fake_port', pool=pool1)
pool2 = dict(console_type='fake_type2',
public_hostname='fake_hostname2')
cons2 = dict(id=11, password='fake_password2',
port='fake_port2', pool=pool2)
return [cons1, cons2]
expected = {'consoles':
[{'console': {'id': 10, 'console_type': 'fake_type'}},
{'console': {'id': 11, 'console_type': 'fake_type2'}}]}
self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.uuid)
self.assertThat(res_dict, matchers.DictMatches(expected))
def test_delete_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool)
def fake_delete_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.controller.delete(req, self.uuid, '20')
def test_delete_console_unknown_console(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.uuid, '20')
def test_delete_console_unknown_instance(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.uuid, '20')
class TestConsolesXMLSerializer(test.TestCase):
def test_show(self):
fixture = {'console': {'id': 20,
'password': 'fake_password',
'port': 'fake_port',
'host': 'fake_hostname',
'console_type': 'fake_type'}}
output = consoles.ConsoleTemplate().serialize(fixture)
res_tree = etree.XML(output)
self.assertEqual(res_tree.tag, 'console')
self.assertEqual(res_tree.xpath('id')[0].text, '20')
self.assertEqual(res_tree.xpath('port')[0].text, 'fake_port')
self.assertEqual(res_tree.xpath('host')[0].text, 'fake_hostname')
self.assertEqual(res_tree.xpath('password')[0].text, 'fake_password')
self.assertEqual(res_tree.xpath('console_type')[0].text, 'fake_type')
def test_index(self):
fixture = {'consoles': [{'console': {'id': 10,
'console_type': 'fake_type'}},
{'console': {'id': 11,
'console_type': 'fake_type2'}}]}
output = consoles.ConsolesTemplate().serialize(fixture)
res_tree = etree.XML(output)
self.assertEqual(res_tree.tag, 'consoles')
self.assertEqual(len(res_tree), 2)
self.assertEqual(res_tree[0].tag, 'console')
self.assertEqual(res_tree[1].tag, 'console')
self.assertEqual(len(res_tree[0]), 1)
self.assertEqual(res_tree[0][0].tag, 'console')
self.assertEqual(len(res_tree[1]), 1)
self.assertEqual(res_tree[1][0].tag, 'console')
self.assertEqual(res_tree[0][0].xpath('id')[0].text, '10')
self.assertEqual(res_tree[1][0].xpath('id')[0].text, '11')
self.assertEqual(res_tree[0][0].xpath('console_type')[0].text,
'fake_type')
self.assertEqual(res_tree[1][0].xpath('console_type')[0].text,
'fake_type2')
| apache-2.0 |
yb-kim/gemV | src/arch/x86/isa/insts/simd128/floating_point/logical/orp.py | 91 | 3181 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop ORPS_XMM_XMM {
mor xmml, xmml, xmmlm
mor xmmh, xmmh, xmmhm
};
def macroop ORPS_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mor xmml, xmml, ufp1
mor xmmh, xmmh, ufp2
};
def macroop ORPS_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mor xmml, xmml, ufp1
mor xmmh, xmmh, ufp2
};
def macroop ORPD_XMM_XMM {
mor xmml, xmml, xmmlm
mor xmmh, xmmh, xmmhm
};
def macroop ORPD_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mor xmml, xmml, ufp1
mor xmmh, xmmh, ufp2
};
def macroop ORPD_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mor xmml, xmml, ufp1
mor xmmh, xmmh, ufp2
};
'''
| bsd-3-clause |
ofgulban/scikit-image | doc/examples/segmentation/plot_label.py | 7 | 1493 | """
===================
Label image regions
===================
This example shows how to segment an image with image labelling. The following
steps are applied:
1. Thresholding with automatic Otsu method
2. Close small holes with binary closing
3. Remove artifacts touching image border
4. Measure image regions to filter small objects
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from skimage import data
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label
from skimage.morphology import closing, square
from skimage.measure import regionprops
from skimage.color import label2rgb
image = data.coins()[50:-50, 50:-50]
# apply threshold
thresh = threshold_otsu(image)
bw = closing(image > thresh, square(3))
# remove artifacts connected to image border
cleared = bw.copy()
clear_border(cleared)
# label image regions
label_image = label(cleared)
image_label_overlay = label2rgb(label_image, image=image)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(image_label_overlay)
for region in regionprops(label_image):
# skip small images
if region.area < 100:
continue
# draw rectangle around segmented coins
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
plt.show()
| bsd-3-clause |
smart-developerr/my-first-blog | Lib/site-packages/setuptools/extension.py | 299 | 1729 | import re
import functools
import distutils.core
import distutils.errors
import distutils.extension
from setuptools.extern.six.moves import map
from .monkey import get_unpatched
def _have_cython():
"""
Return True if Cython can be imported.
"""
cython_impl = 'Cython.Distutils.build_ext'
try:
# from (cython_impl) import build_ext
__import__(cython_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False
# for compatibility
have_pyrex = _have_cython
_Extension = get_unpatched(distutils.core.Extension)
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
def __init__(self, name, sources, *args, **kw):
# The *args is needed for compatibility as calls may use positional
# arguments. py_limited_api may be set only via keyword.
self.py_limited_api = kw.pop("py_limited_api", False)
_Extension.__init__(self, name, sources, *args, **kw)
def _convert_pyx_sources_to_lang(self):
"""
Replace sources with .pyx extensions to sources with the target
language extension. This mechanism allows language authors to supply
pre-converted sources but to prefer the .pyx sources.
"""
if _have_cython():
# the build has Cython, so allow it to compile the .pyx files
return
lang = self.language or ''
target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
sub = functools.partial(re.sub, '.pyx$', target_ext)
self.sources = list(map(sub, self.sources))
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
| gpl-3.0 |
Celthi/youtube-dl-GUI | youtube_dl/extractor/rutube.py | 42 | 5766 | # encoding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
ExtractorError,
unified_strdate,
)
class RutubeIE(InfoExtractor):
IE_NAME = 'rutube'
IE_DESC = 'Rutube videos'
_VALID_URL = r'https?://rutube\.ru/video/(?P<id>[\da-z]{32})'
_TEST = {
'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/',
'info_dict': {
'id': '3eac3b4561676c17df9132a9a1e62e3e',
'ext': 'mp4',
'title': 'Раненный кенгуру забежал в аптеку',
'description': 'http://www.ntdtv.ru ',
'duration': 80,
'uploader': 'NTDRussian',
'uploader_id': '29790',
'upload_date': '20131016',
},
'params': {
# It requires ffmpeg (m3u8 download)
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://rutube.ru/api/video/%s/?format=json' % video_id,
video_id, 'Downloading video JSON')
# Some videos don't have the author field
author = video.get('author') or {}
options = self._download_json(
'http://rutube.ru/api/play/options/%s/?format=json' % video_id,
video_id, 'Downloading options JSON')
m3u8_url = options['video_balancer'].get('m3u8')
if m3u8_url is None:
raise ExtractorError('Couldn\'t find m3u8 manifest url')
formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')
return {
'id': video['id'],
'title': video['title'],
'description': video['description'],
'duration': video['duration'],
'view_count': video['hits'],
'formats': formats,
'thumbnail': video['thumbnail_url'],
'uploader': author.get('name'),
'uploader_id': compat_str(author['id']) if author else None,
'upload_date': unified_strdate(video['created_ts']),
'age_limit': 18 if video['is_adult'] else 0,
}
class RutubeEmbedIE(InfoExtractor):
IE_NAME = 'rutube:embed'
IE_DESC = 'Rutube embedded videos'
_VALID_URL = 'https?://rutube\.ru/video/embed/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://rutube.ru/video/embed/6722881?vk_puid37=&vk_puid38=',
'info_dict': {
'id': 'a10e53b86e8f349080f718582ce4c661',
'ext': 'mp4',
'upload_date': '20131223',
'uploader_id': '297833',
'description': 'Видео группы ★http://vk.com/foxkidsreset★ музей Fox Kids и Jetix<br/><br/> восстановлено и сделано в шикоформате subziro89 http://vk.com/subziro89',
'uploader': 'subziro89 ILya',
'title': 'Мистический городок Эйри в Индиан 5 серия озвучка subziro89',
},
'params': {
'skip_download': 'Requires ffmpeg',
},
}
def _real_extract(self, url):
embed_id = self._match_id(url)
webpage = self._download_webpage(url, embed_id)
canonical_url = self._html_search_regex(
r'<link\s+rel="canonical"\s+href="([^"]+?)"', webpage,
'Canonical URL')
return self.url_result(canonical_url, 'Rutube')
class RutubeChannelIE(InfoExtractor):
IE_NAME = 'rutube:channel'
IE_DESC = 'Rutube channels'
_VALID_URL = r'http://rutube\.ru/tags/video/(?P<id>\d+)'
_TESTS = [{
'url': 'http://rutube.ru/tags/video/1800/',
'info_dict': {
'id': '1800',
},
'playlist_mincount': 68,
}]
_PAGE_TEMPLATE = 'http://rutube.ru/api/tags/video/%s/?page=%s&format=json'
def _extract_videos(self, channel_id, channel_title=None):
entries = []
for pagenum in itertools.count(1):
page = self._download_json(
self._PAGE_TEMPLATE % (channel_id, pagenum),
channel_id, 'Downloading page %s' % pagenum)
results = page['results']
if not results:
break
entries.extend(self.url_result(result['video_url'], 'Rutube') for result in results)
if not page['has_next']:
break
return self.playlist_result(entries, channel_id, channel_title)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
return self._extract_videos(channel_id)
class RutubeMovieIE(RutubeChannelIE):
IE_NAME = 'rutube:movie'
IE_DESC = 'Rutube movies'
_VALID_URL = r'http://rutube\.ru/metainfo/tv/(?P<id>\d+)'
_TESTS = []
_MOVIE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/?format=json'
_PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json'
def _real_extract(self, url):
movie_id = self._match_id(url)
movie = self._download_json(
self._MOVIE_TEMPLATE % movie_id, movie_id,
'Downloading movie JSON')
movie_name = movie['name']
return self._extract_videos(movie_id, movie_name)
class RutubePersonIE(RutubeChannelIE):
IE_NAME = 'rutube:person'
IE_DESC = 'Rutube person videos'
_VALID_URL = r'http://rutube\.ru/video/person/(?P<id>\d+)'
_TESTS = [{
'url': 'http://rutube.ru/video/person/313878/',
'info_dict': {
'id': '313878',
},
'playlist_mincount': 37,
}]
_PAGE_TEMPLATE = 'http://rutube.ru/api/video/person/%s/?page=%s&format=json'
| mit |
kg-bot/SupyBot | plugins/Trivial/test.py | 1 | 1739 | ###
# Copyright (c) 2007, Benjamin N. Rubin
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class TrivialTestCase(PluginTestCase):
plugins = ('Trivial',)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| gpl-3.0 |
jesseditson/rethinkdb | external/v8_3.30.33.16/testing/gmock/scripts/gmock_doctor.py | 163 | 23590 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Converts compiler's errors in code using Google Mock to plain English."""
__author__ = '[email protected] (Zhanyong Wan)'
import re
import sys
_VERSION = '1.0.3'
_EMAIL = '[email protected]'
_COMMON_GMOCK_SYMBOLS = [
# Matchers
'_',
'A',
'AddressSatisfies',
'AllOf',
'An',
'AnyOf',
'ContainerEq',
'Contains',
'ContainsRegex',
'DoubleEq',
'ElementsAre',
'ElementsAreArray',
'EndsWith',
'Eq',
'Field',
'FloatEq',
'Ge',
'Gt',
'HasSubstr',
'IsInitializedProto',
'Le',
'Lt',
'MatcherCast',
'Matches',
'MatchesRegex',
'NanSensitiveDoubleEq',
'NanSensitiveFloatEq',
'Ne',
'Not',
'NotNull',
'Pointee',
'Property',
'Ref',
'ResultOf',
'SafeMatcherCast',
'StartsWith',
'StrCaseEq',
'StrCaseNe',
'StrEq',
'StrNe',
'Truly',
'TypedEq',
'Value',
# Actions
'Assign',
'ByRef',
'DeleteArg',
'DoAll',
'DoDefault',
'IgnoreResult',
'Invoke',
'InvokeArgument',
'InvokeWithoutArgs',
'Return',
'ReturnNew',
'ReturnNull',
'ReturnRef',
'SaveArg',
'SetArgReferee',
'SetArgPointee',
'SetArgumentPointee',
'SetArrayArgument',
'SetErrnoAndReturn',
'Throw',
'WithArg',
'WithArgs',
'WithoutArgs',
# Cardinalities
'AnyNumber',
'AtLeast',
'AtMost',
'Between',
'Exactly',
# Sequences
'InSequence',
'Sequence',
# Misc
'DefaultValue',
'Mock',
]
# Regex for matching source file path and line number in the compiler's errors.
_GCC_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(\d+:)?\s+'
_CLANG_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(?P<column>\d+):\s+'
_CLANG_NON_GMOCK_FILE_LINE_RE = (
r'(?P<file>.*[/\\^](?!gmock-)[^/\\]+):(?P<line>\d+):(?P<column>\d+):\s+')
def _FindAllMatches(regex, s):
"""Generates all matches of regex in string s."""
r = re.compile(regex)
return r.finditer(s)
def _GenericDiagnoser(short_name, long_name, diagnoses, msg):
"""Diagnoses the given disease by pattern matching.
Can provide different diagnoses for different patterns.
Args:
short_name: Short name of the disease.
long_name: Long name of the disease.
diagnoses: A list of pairs (regex, pattern for formatting the diagnosis
for matching regex).
msg: Compiler's error messages.
Yields:
Tuples of the form
(short name of disease, long name of disease, diagnosis).
"""
for regex, diagnosis in diagnoses:
if re.search(regex, msg):
diagnosis = '%(file)s:%(line)s:' + diagnosis
for m in _FindAllMatches(regex, msg):
yield (short_name, long_name, diagnosis % m.groupdict())
def _NeedToReturnReferenceDiagnoser(msg):
"""Diagnoses the NRR disease, given the error messages by the compiler."""
gcc_regex = (r'In member function \'testing::internal::ReturnAction<R>.*\n'
+ _GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: creating array with negative size')
clang_regex = (r'error:.*array.*negative.*\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of function template specialization '
r'\'testing::internal::ReturnAction<(?P<type>.*)>'
r'::operator Action<.*>\' requested here')
diagnosis = """
You are using a Return() action in a function that returns a reference to
%(type)s. Please use ReturnRef() instead."""
return _GenericDiagnoser('NRR', 'Need to Return Reference',
[(clang_regex, diagnosis),
(gcc_regex, diagnosis % {'type': 'a type'})],
msg)
def _NeedToReturnSomethingDiagnoser(msg):
"""Diagnoses the NRS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'(instantiated from here\n.'
r'*gmock.*actions\.h.*error: void value not ignored)'
r'|(error: control reaches end of non-void function)')
clang_regex1 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'Result\' \(aka \'(?P<return_type>.*)\'\) '
r'with an rvalue of type \'void\'')
clang_regex2 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'(?P<return_type>.*)\' '
r'with an rvalue of type \'void\'')
diagnosis = """
You are using an action that returns void, but it needs to return
%(return_type)s. Please tell it *what* to return. Perhaps you can use
the pattern DoAll(some_action, Return(some_value))?"""
return _GenericDiagnoser(
'NRS',
'Need to Return Something',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _NeedToReturnNothingDiagnoser(msg):
"""Diagnoses the NRN disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: instantiation of '
r'\'testing::internal::ReturnAction<R>::Impl<F>::value_\' '
r'as type \'void\'')
clang_regex1 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(?P<return_type>.*)>'
r'::operator Action<void \(.*\)>\' requested here')
clang_regex2 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::DoBothAction<.*>'
r'::operator Action<(?P<return_type>.*) \(.*\)>\' '
r'requested here')
diagnosis = """
You are using an action that returns %(return_type)s, but it needs to return
void. Please use a void-returning action instead.
All actions but the last in DoAll(...) must return void. Perhaps you need
to re-arrange the order of actions in a DoAll(), if you are using one?"""
return _GenericDiagnoser(
'NRN',
'Need to Return Nothing',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _IncompleteByReferenceArgumentDiagnoser(msg):
"""Diagnoses the IBRA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to incomplete type \'(?P<type>.*)\'')
clang_regex = (r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to an incomplete type '
r'\'(?P<type>.*)( const)?\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of member function '
r'\'testing::internal2::TypeWithoutFormatter<.*>::'
r'PrintValue\' requested here')
diagnosis = """
In order to mock this function, Google Mock needs to see the definition
of type "%(type)s" - declaration alone is not enough. Either #include
the header that defines it, or change the argument to be passed
by pointer."""
return _GenericDiagnoser('IBRA', 'Incomplete By-Reference Argument Type',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionMatcherDiagnoser(msg):
"""Diagnoses the OFM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly\(<unresolved overloaded function type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly')
diagnosis = """
The argument you gave to Truly() is an overloaded function. Please tell
your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool Foo(int n);
you should write
Truly(static_cast<bool (*)(int n)>(Foo))"""
return _GenericDiagnoser('OFM', 'Overloaded Function Matcher',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionActionDiagnoser(msg):
"""Diagnoses the OFA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for call to '
r'\'Invoke\(<unresolved overloaded function type>')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching '
r'function for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-\w+-actions\.h:\d+:\d+:\s+'
r'note: candidate template ignored:\s+'
r'couldn\'t infer template argument \'FunctionImpl\'')
diagnosis = """
Function you are passing to Invoke is overloaded. Please tell your compiler
which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool MyFunction(int n, double x);
you should write something like
Invoke(static_cast<bool (*)(int n, double x)>(MyFunction))"""
return _GenericDiagnoser('OFA', 'Overloaded Function Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedMethodActionDiagnoser(msg):
"""Diagnoses the OMA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Invoke\(.+, <unresolved overloaded function '
r'type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function '
r'for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-\w+-actions\.h:\d+:\d+: '
r'note: candidate function template not viable: '
r'requires .*, but 2 (arguments )?were provided')
diagnosis = """
The second argument you gave to Invoke() is an overloaded method. Please
tell your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
class Foo {
...
bool Bar(int n, double x);
};
you should write something like
Invoke(foo, static_cast<bool (Foo::*)(int n, double x)>(&Foo::Bar))"""
return _GenericDiagnoser('OMA', 'Overloaded Method Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _MockObjectPointerDiagnoser(msg):
"""Diagnoses the MOP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: request for member '
r'\'gmock_(?P<method>.+)\' in \'(?P<mock_object>.+)\', '
r'which is of non-class type \'(.*::)*(?P<class_name>.+)\*\'')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: member reference type '
r'\'(?P<class_name>.*?) *\' is a pointer; '
r'maybe you meant to use \'->\'\?')
diagnosis = """
The first argument to ON_CALL() and EXPECT_CALL() must be a mock *object*,
not a *pointer* to it. Please write '*(%(mock_object)s)' instead of
'%(mock_object)s' as your first argument.
For example, given the mock class:
class %(class_name)s : public ... {
...
MOCK_METHOD0(%(method)s, ...);
};
and the following mock instance:
%(class_name)s* mock_ptr = ...
you should use the EXPECT_CALL like this:
EXPECT_CALL(*mock_ptr, %(method)s(...));"""
return _GenericDiagnoser(
'MOP',
'Mock Object Pointer',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis % {'mock_object': 'mock_object',
'method': 'method',
'class_name': '%(class_name)s'})],
msg)
def _NeedToUseSymbolDiagnoser(msg):
"""Diagnoses the NUS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: \'(?P<symbol>.+)\' '
r'(was not declared in this scope|has not been declared)')
clang_regex = (_CLANG_FILE_LINE_RE +
r'error: (use of undeclared identifier|unknown type name|'
r'no template named) \'(?P<symbol>[^\']+)\'')
diagnosis = """
'%(symbol)s' is defined by Google Mock in the testing namespace.
Did you forget to write
using testing::%(symbol)s;
?"""
for m in (list(_FindAllMatches(gcc_regex, msg)) +
list(_FindAllMatches(clang_regex, msg))):
symbol = m.groupdict()['symbol']
if symbol in _COMMON_GMOCK_SYMBOLS:
yield ('NUS', 'Need to Use Symbol', diagnosis % m.groupdict())
def _NeedToUseReturnNullDiagnoser(msg):
"""Diagnoses the NRNULL disease, given the error messages by the compiler."""
gcc_regex = ('instantiated from \'testing::internal::ReturnAction<R>'
'::operator testing::Action<Func>\(\) const.*\n' +
_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*error: no matching function for call to \'ImplicitCast_\('
r'(:?long )?int&\)')
clang_regex = (r'\bgmock-actions.h:.* error: no matching function for '
r'call to \'ImplicitCast_\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(int|long)>::operator '
r'Action<(?P<type>.*)\(\)>\' requested here')
diagnosis = """
You are probably calling Return(NULL) and the compiler isn't sure how to turn
NULL into %(type)s. Use ReturnNull() instead.
Note: the line number may be off; please fix all instances of Return(NULL)."""
return _GenericDiagnoser(
'NRNULL', 'Need to use ReturnNull',
[(clang_regex, diagnosis),
(gcc_regex, diagnosis % {'type': 'the right type'})],
msg)
def _TypeInTemplatedBaseDiagnoser(msg):
"""Diagnoses the TTB disease, given the error messages by the compiler."""
# This version works when the type is used as the mock function's return
# type.
gcc_4_3_1_regex_type_in_retval = (
r'In member function \'int .*\n' + _GCC_FILE_LINE_RE +
r'error: a function call cannot appear in a constant-expression')
gcc_4_4_0_regex_type_in_retval = (
r'error: a function call cannot appear in a constant-expression'
+ _GCC_FILE_LINE_RE + r'error: template argument 1 is invalid\n')
# This version works when the type is used as the mock function's sole
# parameter type.
gcc_regex_type_of_sole_param = (
_GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n')
# This version works when the type is used as a parameter of a mock
# function that has multiple parameters.
gcc_regex_type_of_a_param = (
r'error: expected `;\' before \'::\' token\n'
+ _GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n'
r'.*error: \'.+\' was not declared in this scope')
clang_regex_type_of_retval_or_sole_param = (
_CLANG_FILE_LINE_RE +
r'error: use of undeclared identifier \'(?P<type>.*)\'\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):\d+: error: '
r'non-friend class member \'Result\' cannot have a qualified name'
)
clang_regex_type_of_a_param = (
_CLANG_FILE_LINE_RE +
r'error: C\+\+ requires a type specifier for all declarations\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: '
r'C\+\+ requires a type specifier for all declarations'
)
clang_regex_unknown_type = (
_CLANG_FILE_LINE_RE +
r'error: unknown type name \'(?P<type>[^\']+)\''
)
diagnosis = """
In a mock class template, types or typedefs defined in the base class
template are *not* automatically visible. This is how C++ works. Before
you can use a type or typedef named %(type)s defined in base class Base<T>, you
need to make it visible. One way to do it is:
typedef typename Base<T>::%(type)s %(type)s;"""
for diag in _GenericDiagnoser(
'TTB', 'Type in Template Base',
[(gcc_4_3_1_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_4_4_0_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_regex_type_of_sole_param, diagnosis),
(gcc_regex_type_of_a_param, diagnosis),
(clang_regex_type_of_retval_or_sole_param, diagnosis),
(clang_regex_type_of_a_param, diagnosis % {'type': 'Foo'})],
msg):
yield diag
# Avoid overlap with the NUS pattern.
for m in _FindAllMatches(clang_regex_unknown_type, msg):
type_ = m.groupdict()['type']
if type_ not in _COMMON_GMOCK_SYMBOLS:
yield ('TTB', 'Type in Template Base', diagnosis % m.groupdict())
def _WrongMockMethodMacroDiagnoser(msg):
"""Diagnoses the WMM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'.*this_method_does_not_take_(?P<wrong_args>\d+)_argument.*\n'
r'.*\n'
r'.*candidates are.*FunctionMocker<[^>]+A(?P<args>\d+)\)>')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error:.*array.*negative.*r?\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: too few arguments '
r'to function call, expected (?P<args>\d+), '
r'have (?P<wrong_args>\d+)')
diagnosis = """
You are using MOCK_METHOD%(wrong_args)s to define a mock method that has
%(args)s arguments. Use MOCK_METHOD%(args)s (or MOCK_CONST_METHOD%(args)s,
MOCK_METHOD%(args)s_T, MOCK_CONST_METHOD%(args)s_T as appropriate) instead."""
return _GenericDiagnoser('WMM', 'Wrong MOCK_METHODn Macro',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _WrongParenPositionDiagnoser(msg):
"""Diagnoses the WPP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'error:.*testing::internal::MockSpec<.* has no member named \''
r'(?P<method>\w+)\'')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error: no member named \'(?P<method>\w+)\' in '
r'\'testing::internal::MockSpec<.*>\'')
diagnosis = """
The closing parenthesis of ON_CALL or EXPECT_CALL should be *before*
".%(method)s". For example, you should write:
EXPECT_CALL(my_mock, Foo(_)).%(method)s(...);
instead of:
EXPECT_CALL(my_mock, Foo(_).%(method)s(...));"""
return _GenericDiagnoser('WPP', 'Wrong Parenthesis Position',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
_DIAGNOSERS = [
_IncompleteByReferenceArgumentDiagnoser,
_MockObjectPointerDiagnoser,
_NeedToReturnNothingDiagnoser,
_NeedToReturnReferenceDiagnoser,
_NeedToReturnSomethingDiagnoser,
_NeedToUseReturnNullDiagnoser,
_NeedToUseSymbolDiagnoser,
_OverloadedFunctionActionDiagnoser,
_OverloadedFunctionMatcherDiagnoser,
_OverloadedMethodActionDiagnoser,
_TypeInTemplatedBaseDiagnoser,
_WrongMockMethodMacroDiagnoser,
_WrongParenPositionDiagnoser,
]
def Diagnose(msg):
"""Generates all possible diagnoses given the compiler error message."""
msg = re.sub(r'\x1b\[[^m]*m', '', msg) # Strips all color formatting.
# Assuming the string is using the UTF-8 encoding, replaces the left and
# the right single quote characters with apostrophes.
msg = re.sub(r'(\xe2\x80\x98|\xe2\x80\x99)', "'", msg)
diagnoses = []
for diagnoser in _DIAGNOSERS:
for diag in diagnoser(msg):
diagnosis = '[%s - %s]\n%s' % diag
if not diagnosis in diagnoses:
diagnoses.append(diagnosis)
return diagnoses
def main():
print ('Google Mock Doctor v%s - '
'diagnoses problems in code using Google Mock.' % _VERSION)
if sys.stdin.isatty():
print ('Please copy and paste the compiler errors here. Press c-D when '
'you are done:')
else:
print 'Waiting for compiler errors on stdin . . .'
msg = sys.stdin.read().strip()
diagnoses = Diagnose(msg)
count = len(diagnoses)
if not count:
print ("""
Your compiler complained:
8<------------------------------------------------------------
%s
------------------------------------------------------------>8
Uh-oh, I'm not smart enough to figure out what the problem is. :-(
However...
If you send your source code and the compiler's error messages to
%s, you can be helped and I can get smarter --
win-win for us!""" % (msg, _EMAIL))
else:
print '------------------------------------------------------------'
print 'Your code appears to have the following',
if count > 1:
print '%s diseases:' % (count,)
else:
print 'disease:'
i = 0
for d in diagnoses:
i += 1
if count > 1:
print '\n#%s:' % (i,)
print d
print ("""
How did I do? If you think I'm wrong or unhelpful, please send your
source code and the compiler's error messages to %s.
Then you can be helped and I can get smarter -- I promise I won't be upset!""" %
_EMAIL)
if __name__ == '__main__':
main()
| agpl-3.0 |
lmprice/ansible | lib/ansible/modules/cloud/cloudstack/cs_instancegroup.py | 37 | 5335 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_instancegroup
short_description: Manages instance groups on Apache CloudStack based clouds.
description:
- Create and remove instance groups.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the instance group.
required: true
domain:
description:
- Domain the instance group is related to.
account:
description:
- Account the instance group is related to.
project:
description:
- Project the instance group is related to.
state:
description:
- State of the instance group.
default: 'present'
choices: [ 'present', 'absent' ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create an instance group
- local_action:
module: cs_instancegroup
name: loadbalancers
# Remove an instance group
- local_action:
module: cs_instancegroup
name: loadbalancers
state: absent
'''
RETURN = '''
---
id:
description: UUID of the instance group.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the instance group.
returned: success
type: string
sample: webservers
created:
description: Date when the instance group was created.
returned: success
type: string
sample: 2015-05-03T15:05:51+0200
domain:
description: Domain the instance group is related to.
returned: success
type: string
sample: example domain
account:
description: Account the instance group is related to.
returned: success
type: string
sample: example account
project:
description: Project the instance group is related to.
returned: success
type: string
sample: example project
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackInstanceGroup(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackInstanceGroup, self).__init__(module)
self.instance_group = None
def get_instance_group(self):
if self.instance_group:
return self.instance_group
name = self.module.params.get('name')
args = {
'account': self.get_account('name'),
'domainid': self.get_domain('id'),
'projectid': self.get_project('id'),
'fetch_list': True,
}
instance_groups = self.query_api('listInstanceGroups', **args)
if instance_groups:
for g in instance_groups:
if name in [g['name'], g['id']]:
self.instance_group = g
break
return self.instance_group
def present_instance_group(self):
instance_group = self.get_instance_group()
if not instance_group:
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'account': self.get_account('name'),
'domainid': self.get_domain('id'),
'projectid': self.get_project('id'),
}
if not self.module.check_mode:
res = self.query_api('createInstanceGroup', **args)
instance_group = res['instancegroup']
return instance_group
def absent_instance_group(self):
instance_group = self.get_instance_group()
if instance_group:
self.result['changed'] = True
if not self.module.check_mode:
self.query_api('deleteInstanceGroup', id=instance_group['id'])
return instance_group
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
domain=dict(),
account=dict(),
project=dict(),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_ig = AnsibleCloudStackInstanceGroup(module)
state = module.params.get('state')
if state in ['absent']:
instance_group = acs_ig.absent_instance_group()
else:
instance_group = acs_ig.present_instance_group()
result = acs_ig.get_result(instance_group)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
isajhon/tuconsejocomunal | usuarios_venezuela/modelos/usuarios_venezuela.py | 4 | 3179 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# Generated by the OpenERP plugin for Dia !
from openerp.osv import fields, osv
class usuarios_venezuela(osv.osv):
"""Usuarios venezuela"""
_name = 'res.users'
_inherit = 'res.users'
def limpiar_campos(self,cr,uid,ids,nombre):
res={}
if nombre=='estado':
res={
'municipio_id':'',
'parroquia_id':'',
#~ 'sector_id':'',
}
if nombre=='municipio':
res={
'parroquia_id':'',
#~ 'sector_id':'',
}
return {
'value':res
}
_columns = {
'cedula': fields.char('Cedula de Identidad',
size=15,
required=True,
help='Cedula de Identidad del Usuario'
),
'telefono': fields.char('Telefono',
size=20,
help='Teléfono de Contacto del Usuario'
),
'estado_id': fields.many2one('res.estados',
'Estado',
required=True,
help='Estado donde vive el usuario'),
'municipio_id': fields.many2one('res.municipios',
'Municipio',
required=True,
help='Municipio donde vive el usuario'),
'parroquia_id': fields.many2one('res.parroquias',
'Parroquia',
required=True,
help='Parroquia donde vive el usuario'),
'sector': fields.char('Sector',
size=80,
help='Sector donde vive el usuario'
),
'calle_av': fields.char('Calle/Avenida',
size=80,
help='Calle/Avenida donde vive el usuario'
),
'casa_edif': fields.char('Casa/Edificio',
size=80,
help='Casa/Edificio donde vive el usuario'
),
'piso_apart': fields.char('Piso y Apartemento',
size=20,
help='Piso y Apartemento donde vive el usuario'
),
}
| gpl-3.0 |
hayderimran7/zulip | bots/summarize_stream.py | 115 | 2559 | # This is hacky code to analyze data on our support stream. The main
# reusable bits are get_recent_messages and get_words.
import zulip
import re
import collections
def get_recent_messages(client, narrow, count=100):
narrow = [word.split(':') for word in narrow.split()]
req = {
'narrow': narrow,
'num_before': count,
'num_after': 0,
'anchor': 1000000000,
'apply_markdown': False
}
old_messages = client.do_api_query(req, zulip.API_VERSTRING + 'messages', method='GET')
if 'messages' not in old_messages:
return []
return old_messages['messages']
def get_words(content):
regex = "[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\'\w\-]+"
words = re.findall(regex, content, re.M)
words = [w.lower() for w in words]
# words = [w.rstrip('s') for w in words]
return words
def analyze_messages(msgs, word_count, email_count):
for msg in msgs:
if False:
if ' ack' in msg['content']:
name = msg['sender_full_name'].split()[0]
print 'ACK', name
m = re.search('ticket (Z....).*email: (\S+).*~~~(.*)', msg['content'], re.M | re.S)
if m:
ticket, email, req = m.groups()
words = get_words(req)
for word in words:
word_count[word] += 1
email_count[email] += 1
if False:
print
for k, v in msg.items():
print '%-20s: %s' % (k, v)
def generate_support_stats():
client = zulip.Client()
narrow = 'stream:support'
count = 2000
msgs = get_recent_messages(client, narrow, count)
msgs_by_topic = collections.defaultdict(list)
for msg in msgs:
topic = msg['subject']
msgs_by_topic[topic].append(msg)
word_count = collections.defaultdict(int)
email_count = collections.defaultdict(int)
if False:
for topic in msgs_by_topic:
msgs = msgs_by_topic[topic]
analyze_messages(msgs, word_count, email_count)
if True:
words = word_count.keys()
words = filter(lambda w: word_count[w] >= 10, words)
words = filter(lambda w: len(w) >= 5, words)
words = sorted(words, key=lambda w: word_count[w], reverse=True)
for word in words:
print word, word_count[word]
if False:
emails = email_count.keys()
emails = sorted(emails, key=lambda w: email_count[w], reverse=True)
for email in emails:
print email, email_count[email]
generate_support_stats()
| apache-2.0 |
tashaband/RYU295 | ryu/tests/unit/lib/test_stringify.py | 23 | 2008 | #!/usr/bin/env python
#
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import unittest
from nose.tools import eq_
from ryu.lib import stringify
class C1(stringify.StringifyMixin):
def __init__(self, a, c):
print "init", a, c
self.a = a
self._b = 'B'
self.c = c
class Test_stringify(unittest.TestCase):
""" Test case for ryu.lib.stringify
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_jsondict(self):
j = {'C1': {'a': 'QUFB', 'c': 'Q0ND'}}
eq_(j['C1']['a'], base64.b64encode('AAA'))
eq_(j['C1']['c'], base64.b64encode('CCC'))
c = C1(a='AAA', c='CCC')
c2 = C1.from_jsondict(j['C1'])
eq_(c.__class__, c2.__class__)
eq_(c.__dict__, c2.__dict__)
eq_(j, c.to_jsondict())
def test_jsondict2(self):
import string
def my_encode(x):
return string.lower(x)
def my_decode(x):
return string.upper(x)
j = {'C1': {'a': 'aaa', 'c': 'ccc'}}
eq_(j['C1']['a'], my_encode('AAA'))
eq_(j['C1']['c'], my_encode('CCC'))
c = C1(a='AAA', c='CCC')
c2 = C1.from_jsondict(j['C1'], decode_string=my_decode)
eq_(c.__class__, c2.__class__)
eq_(c.__dict__, c2.__dict__)
eq_(j, c.to_jsondict(encode_string=my_encode))
| apache-2.0 |
kivio/PerfKitBenchmarker | perfkitbenchmarker/benchmarks/ping_benchmark.py | 2 | 2517 | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs ping.
This benchmark runs ping using the internal ips of vms in the same zone.
"""
import logging
from perfkitbenchmarker import configs
from perfkitbenchmarker import sample
import re
BENCHMARK_NAME = 'ping'
BENCHMARK_CONFIG = """
ping:
description: Benchmarks ping latency over internal IP addresses
vm_groups:
default:
vm_spec: *default_single_core
vm_count: 2
"""
METRICS = ('Min Latency', 'Average Latency', 'Max Latency', 'Latency Std Dev')
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec): # pylint: disable=unused-argument
"""Install ping on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
pass
def Run(benchmark_spec):
"""Run ping on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
if not vms[0].IsReachable(vms[1]):
logging.warn('%s is not reachable from %s', vms[1], vms[0])
return []
vm = vms[0]
logging.info('Ping results:')
ping_cmd = 'ping -c 100 %s' % vms[1].internal_ip
stdout, _ = vm.RemoteCommand(ping_cmd, should_log=True)
stats = re.findall('([0-9]*\\.[0-9]*)', stdout.splitlines()[-1])
assert len(stats) == len(METRICS), stats
results = []
metadata = {'ip_type': 'internal'}
for i, metric in enumerate(METRICS):
results.append(sample.Sample(metric, float(stats[i]), 'ms', metadata))
return results
def Cleanup(benchmark_spec): # pylint: disable=unused-argument
"""Cleanup ping on the target vm (by uninstalling).
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
pass
| apache-2.0 |
imruahmed/microblog | flask/lib/python2.7/site-packages/pbr/tests/test_util.py | 22 | 2386 | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P. (HP)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import textwrap
import six
from six.moves import configparser
from pbr.tests import base
from pbr import util
class TestExtrasRequireParsingScenarios(base.BaseTestCase):
scenarios = [
('simple_extras', {
'config_text': """
[extras]
first =
foo
bar==1.0
second =
baz>=3.2
foo
""",
'expected_extra_requires': {'first': ['foo', 'bar==1.0'],
'second': ['baz>=3.2', 'foo']}
}),
('with_markers', {
'config_text': """
[extras]
test =
foo:python_version=='2.6'
bar
baz<1.6 :python_version=='2.6'
""",
'expected_extra_requires': {
"test:(python_version=='2.6')": ['foo', 'baz<1.6'],
"test": ['bar']}}),
('no_extras', {
'config_text': """
[metadata]
long_description = foo
""",
'expected_extra_requires':
{}
})]
def config_from_ini(self, ini):
config = {}
parser = configparser.SafeConfigParser()
ini = textwrap.dedent(six.u(ini))
parser.readfp(io.StringIO(ini))
for section in parser.sections():
config[section] = dict(parser.items(section))
return config
def test_extras_parsing(self):
config = self.config_from_ini(self.config_text)
kwargs = util.setup_cfg_to_setup_kwargs(config)
self.assertEqual(self.expected_extra_requires,
kwargs['extras_require'])
| bsd-3-clause |
bitshares/bitshares | snapshots/build_sharedrop.py | 10 | 12110 | # BTS merger allocation draft 3
# ags-pts-nov-5.json
# 662e2bd091a401108fd9736dd556a2c8744111e1 -
# vote-aug-21-noFMV.json
# c9a08d1a6a1921b1caf2c793cc77f62a2056db1f -
# fmv-key.json
# b5b55c27b50f6c7fe9737e16b0c438dcd0da7ec4 -
# dns-dev-fund.json
# 0847a39bd69e1cc7dc073317184bb789bf8d89f2 -
# dns-collapse.json
# 2e2aa23930ec8e2cbca4e9cf7ea14daa620fb1a1 -
import json
# Collect (address, balance) pairs
exodus_balances = []
# https://github.com/BitShares/bitshares/issues/880
substitutions = dict([
("1Gaw39RvbkZxcXeYzGrjWvmiEqAB6PMqsX", "1A2SAL7i5UwZ3pYuaf7rcBj1U3wffEAoo7"),
("13U3XLUTRHLGMwfCmhde7EmQtNdJE7X2zw", "1A2SAL7i5UwZ3pYuaf7rcBj1U3wffEAoo7"),
("178RVtWSyrCD8N1BnSdiSbMMokD2foQhAd", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1GJczjbQF8evXsLCHpX9sNXQ3m2QbwH2Wv", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("19btdFcvEgF6t7hyr5n4gzsGitHZjk7uF4", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1J9FPXTMJXwh1mC4CYDp8kjtgsQehiVos4", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("18Cpgt8w1aFsGBc3s82NMCU5RiRE1kiEe3", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1MnEmUHF9TYXMio86dTGXvRxeirW4VFE9w", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1DL9Rt8bWZbmNbSMZedqA2WeFRUeJS415s", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1QC9jKo53vAdo6zYUcHwpJWqGwCe5voxLv", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1JtuJEr3cGL7AyB4xcg9qMLjWV73qnHRBt", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1G3HMZzCr4QDihJEpz1arrmR4swm7krinM", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1HziNRPCtiY6HXsBouwpVzTYtZws5A25LZ", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("135xcKg36YPGi2c1yDuacgDJuxqWcporDv", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("15MeKFbkdHp7357Ez3p1jnNdGSovoBKop6", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("12tiMQ2eFAsG5SwG1xYaejigbsXiqac6hx", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1GqU61rhQ6sbwLiTc4FYSNpKZyjEA827VV", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1ED1wLdA3WW7Xr3EPRhZH6LmWP7gwJP5Qj", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1PegsunSa7ThjbEoRHxxFa5M4BgfXjbAj1", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("14seLs1EwvsXTb3dLvchFgMquJnF2yJwd2", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1NhBbVJJUPN7X451bN9FEEq4LxyWLSWcft", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("12ZoxtSZVqApmRTmY75P6jYpYDLkPJzNai", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1NZHLx6jNqY3R3953j6oALgkkZF4VoM6rH", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("14Efv8ZHhbFz1fJ3eD4tfKLyHVqAXMug7y", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("12ahkRrYhBcRGT9GbGRc7Y8uMGe9WTLibF", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("1TDLLyd684EqFHHwCuQuh5Hh1aEBZpEhv", "12Zv4sdQbm6bKpz5artZYoM8WbfFXKuTHC"),
("16qGZVP5rgwQCeLSnf2U94dHBa3QMNHJ3F", "12Zv4sdQbm6bKpz5artZYoM8WbfFXKuTHC"),
("1FnqQz36y18HK1EvaTCaGS65xUYDSTxBtx", "12Zv4sdQbm6bKpz5artZYoM8WbfFXKuTHC"),
("1JyiMXW7NXjsyhp4hvsm77xFn3J4tvxuZa", "16fgrvY7ACrZDLNz9GWiwPfMuk34kZWKmf"),
("1AEv2pKdGqJomxE9ApkW68QBg32QjjwA7b", "1BYrChoJn2UNMhTtWfRrqfcJ5ntkMYjTo8"),
("1KgyqkYwq1uFMwc6MTeQKsz72jJfqHSBD9", "1KLNYniYHM2UwYxSA7rjRtXqNFQEEUDhPv"),
("1PUZQbeu94iarZtrkKXiL4ryAEsvcsEvcE", "13P4or5Dz8734Arqg2CQLXFtxy2DSjKdXa"),
("1CbkZbefm25BYFeUUUQGEvt9HYWh39hvJk", "1M69AMjAkeKkk6YfuEECZzbz54EnXijzvk"),
("13XnJ6zKd6qgK5Uu4zJw4bdPT8M7232ZBf", "1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb"),
("PbXSuic9B1iEmgMiWqW93cdXFvPQsHXdUc", "PfSQYEJYKN3YTmk74BwXy6fk2StTJczaQw"),
("PitE7xxJvjzkVcs6BT5yRxV55YJqgFrhCU", "PfSQYEJYKN3YTmk74BwXy6fk2StTJczaQw"),
("Pe9F7tmq8Wxd2bCkFrW6nc4h5RASAzHAWC", "PmBR2p6uYY1SKhB9FbdHMbSGcqjEGsfK2n"),
("1376AFc3gfic94o9yK1dx7JMMqxzfbssrg", "1gD8zEUgPN6imT3uGUqVVnxT5agAH9r4Y")
])
# All assets have precision 10**8
# These should add up to 500m * 10**8
bts_added = 0
bts_excluded = 0
# AGS and PTS - generated using snapshot tool.
with open("ags-pts-nov-5.json") as agspts:
snapshot = json.load(agspts)
agspts_total = 0
for item in snapshot["balances"]:
balance = int(item[1])
if item[0] == "KEYHOTEE_FOUNDERS":
bts_excluded += balance
continue
if item[0] == "PaNGELmZgzRQCKeEKM6ifgTqNkC4ceiAWw":
balance -= 297806854702309
# Dan adds another vested interest
exodus_balances.append(["1CTicY4ESkTqD5n5kFVTeNees7cq7tboXe", 297806854702309])
print "If this line gets printed twice, it means the PaNGEL address donated to AGS!"
agspts_total += int(item[1])
exodus_balances.append([item[0], balance])
bts_added += int(item[1])
# VOTE
with open("vote-aug-21-noFMV.json") as vote:
snapshot = json.load(vote)
vote_total = 0
for item in snapshot["balances"]:
if item[0] == "KEYHOTEE_FOUNDERS":
#print "removing keyhotee ags: " + str(item[1])
bts_excluded += int(item[1])
continue
vote_total += int(item[1])
exodus_balances.append(item)
bts_added += int(item[1])
#print "** vote-aug-21-noFMV.json total: " + str(vote_total)
# Follow my vote dev stake
with open("fmv-key.json") as fmvkey:
items = json.load(fmvkey)
for item in items:
print item
exodus_balances.append(item)
bts_added += item[1]
print "FMV allocation: " + str(item[1])
pangel_dns = 0 # PaNGEL address
dns_no_dev = 0
dns_scaled_total = 0
bts_to_bonus = 0
# DNS normal - remove pangel and dev funds
with open("dns-dev-fund.json") as devfund:
devkeys = json.load(devfund)
with open("dns-nov-5.json") as dns:
snapshot = json.load(dns)
for item in snapshot:
if (item[0] in devkeys and item[1] != 0):
if item[1] < 200000000 * 10**8:
#pangel_dns += item[1]
continue
else:
#print "skipping dev balance: " + str(item[1])
continue
if item[0] == "PaNGELmZgzRQCKeEKM6ifgTqNkC4ceiAWw":
pangel_dns += item[1]
dns_no_dev += item[1]
#print "dns no dev: " + str(dns_no_dev)
scale = 75000000.0 / dns_no_dev
scale *= 10 ** 8
print "BTS per DNS normal: " + str(scale / 1000)
for item in snapshot:
if (item[0] in devkeys and item[1] != 0):
#print "skipping dev balance: " + str(item[1])
continue
if item[0] == "PaNGELmZgzRQCKeEKM6ifgTqNkC4ceiAWw":
continue
balance = int(scale * int(item[1]))
bts_added += balance
exodus_balances.append([item[0], balance])
bts_to_bonus = int(scale * pangel_dns)
print "bts to bonus: " + str(bts_to_bonus / (10**8))
# DNS extra - add pangel funds and exchange subsidy
with open("dns-collapse.json") as collapse:
collapse = json.load(collapse)
collapse_total = 0
for item in collapse:
if (item[0] in devkeys and item[1] != 0):
continue
if item[0] == "PaNGELmZgzRQCKeEKM6ifgTqNkC4ceiAWw":
continue
collapse_total += int(item[1])
bts_for_exchanges = bts_to_bonus * 2/3 # 10000000 * 10**8
print "bts for exchanges: " + str(bts_for_exchanges / 10**8)
print "bts for on-chain: " + str((bts_to_bonus - bts_for_exchanges) / 10**8)
with open("exchanges.json") as exchanges:
exchanges = json.load(exchanges)
ex_total = 0
for item in exchanges:
ex_total += item["proportion"]
for item in exchanges:
print item["name"]
bts_to_exchange = int(bts_for_exchanges * 1.0 * item["proportion"] / ex_total)
exodus_balances.append([item["address"], bts_to_exchange])
bts_added += bts_to_exchange
print bts_to_exchange
scale = 1.0 * (bts_to_bonus - bts_for_exchanges) / collapse_total
print "BTS per DNS for normal claimed balances " + str(scale / 1000)
print "total claimed DNS outside of exchanges " + str(collapse_total / 10**5)
for item in collapse:
if item[0] in devkeys:
continue
if item[0] == "PaNGELmZgzRQCKeEKM6ifgTqNkC4ceiAWw":
continue
balance = int(scale * int(item[1]))
exodus_balances.append([item[0], balance])
bts_added += balance
# bts_added and bts_excluded add up at this point. Any further changes have to adjust both balances
output = []
for item in exodus_balances:
if item[1] != 0:
address = item[0]
if address in substitutions.keys():
address = substitutions[address]
# ~ 1 in 600 million chance... not a coincidence
if address.startswith("PaNGEL") and address != "PaNGELmZgzRQCKeEKM6ifgTqNkC4ceiAWw":
bts_added -= item[1]
bts_excluded += item[1]
print "Removed from pangel scammer: " + str(item[1] / 10**5)
continue
obj = {
"raw_address": address,
"balance": item[1]
}
output.append(obj)
print "bts_added: " + str(bts_added)
print "bts_excluded: " + str(bts_excluded)
print "total reviewed: " + str(bts_added + bts_excluded)
with open("bts-sharedrop.json", "w") as sharedrop:
sharedrop.write(json.dumps(output, indent=4))
# coinbase substitutions - signatures confirmed by toast
# https://bitsharestalk.org/index.php?topic=9516.msg123741#msg123741
# Address: 178RVtWSyrCD8N1BnSdiSbMMokD2foQhAd
# Message: New BTC Address: 1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb
# Signature: G4SbByvsoQhLVjyA3ZDwNUZW/cYO2pJK/HsaS2KgGajMUAQMZw+ZxuD82sRzq88l30fWVgn+gYoDs1uvE6gLPbY=
#
# Address: 1GJczjbQF8evXsLCHpX9sNXQ3m2QbwH2Wv
# Message: New BTC Address: 1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb
# Signature: GwrBVRWHmbyhFIkD5aDRxPDIS6I1HubOZEAw+9OPHAEuRS3SH6Wy6Ma9ndKDLdmc/TF279ADDLxbYr6k1ucy8ao=
#
# Address: 19btdFcvEgF6t7hyr5n4gzsGitHZjk7uF4
# Message: New BTC Address: 1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb
# Signature: HDdneKnMiA5dc9uKPHlPSSI53WBL0QQw43oRhKjLOePQfzPZN39gKEZObxg45Hb8MIyq6sEBO5vfY1vJaRokHjQ=
#
# Address: 1J9FPXTMJXwh1mC4CYDp8kjtgsQehiVos4
# Message: New BTC Address: 1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb
# Signature: HIxFt5eGHnvLe5jgqxzP6ZuKCeVJn7hb4JgVLaFOZE4vSJNJ6qrD/ScIXny2pjOqLekdoGL11st+Jd+rmXADVQQ=
#
# Address: 18Cpgt8w1aFsGBc3s82NMCU5RiRE1kiEe3
# Message: New BTC Address: 1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb
# Signature: HE2tfLu9bz5GQ3pNxYLmY/vIfytnblIgqmqxWKxCqRZRuMpsXra049k+vzmKU2bOcLzpZm0OKlaW+vPOA0bHe/k=
#
# Address: 1MnEmUHF9TYXMio86dTGXvRxeirW4VFE9w
# Message: New BTC Address: 1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb
# Signature: G8jfexkOvKP9LLtT6pzrplIVUXja/eoWsToCFMC55uqv/w5np0A9P4ijBqLrd9lKMouwHl6jlIN+qlkBXnoVCXU=
#
# Address: 1DL9Rt8bWZbmNbSMZedqA2WeFRUeJS415s
# Message: New BTC Address: 1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb
# Signature: G2gllIrPC6iWBB+LBHdMdPigOFC8yhShikKJwt4lv+Nwz+Ff7sQFpvaq4Z/1yui3ngnlKdi7JdkgE04WDTf5Mgs=
#
# Address: 1QC9jKo53vAdo6zYUcHwpJWqGwCe5voxLv
# Message: New BTC Address: 1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb
# Signature: G8j7vZOXw6EJ4/bOXAwKQppyK9YxXSgMrRwdvwnNrJu7uRajHfYN346LWiFwz0pqLkA7+vI2xJ4D7GCFXcVDlMI=
#
# Address: 1JtuJEr3cGL7AyB4xcg9qMLjWV73qnHRBt
# Message: New BTC Address: 1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb
# Signature: G7iO3R96/ojfmba3v9spFQEIDUYVwjd7fXpjmb7fw5uy4sOizXbyLXvOX5AUFPlLakKqDjKE5AftdC4XE8/eG+k=
#
# Address: 1G3HMZzCr4QDihJEpz1arrmR4swm7krinM
# Message: New BTC Address: 1KfjASdNiX97R8eJM9HPbnKcFWZ8RRhzzb
# Signature: HJk5aiPFINZ9+3yKe9h7yZPuPY6yLY05T27bBReEAYDr2jCHrmDhmFzkwe8/+dtnz
# more: https://github.com/bitsuperlab/bitshares_play/blob/develop/libraries/blockchain/pts_ags_snapshot/substitution.txt
# educatedwarrior theft - confirmed manually by BM
# ILuXgc0SsMz4vBni3kRBoAiWc6m2bJnUiqMpyRgNI8Zjf4n/ikFFuflV/cQv4p6PRuKxw6CwQ1mD3CC/EEki8Kw=
# "Bastard stole my PTS. Whip his ass."
# if address == "Pe9F7tmq8Wxd2bCkFrW6nc4h5RASAzHAWC":
# address = "PmBR2p6uYY1SKhB9FbdHMbSGcqjEGsfK2n"
# H0eoaUht5pgKeO0W6U+graUn2kkyTxybb5ttkdZ8BxOBamghu0vB/ZbBw4329LbzKIZIoH4QtTTLPAFBqmX6IR4=
# "Dan is the man with the plan!" - same person
# if address == "1376AFc3gfic94o9yK1dx7JMMqxzfbssrg":
# address = "1gD8zEUgPN6imT3uGUqVVnxT5agAH9r4Y"
| unlicense |
jindongh/kombu | kombu/pools.py | 38 | 3812 | """
kombu.pools
===========
Public resource pools.
"""
from __future__ import absolute_import
import os
from itertools import chain
from .connection import Resource
from .five import range, values
from .messaging import Producer
from .utils import EqualityDict
from .utils.functional import lazy
__all__ = ['ProducerPool', 'PoolGroup', 'register_group',
'connections', 'producers', 'get_limit', 'set_limit', 'reset']
_limit = [200]
_used = [False]
_groups = []
use_global_limit = object()
disable_limit_protection = os.environ.get('KOMBU_DISABLE_LIMIT_PROTECTION')
class ProducerPool(Resource):
Producer = Producer
def __init__(self, connections, *args, **kwargs):
self.connections = connections
self.Producer = kwargs.pop('Producer', None) or self.Producer
super(ProducerPool, self).__init__(*args, **kwargs)
def _acquire_connection(self):
return self.connections.acquire(block=True)
def create_producer(self):
conn = self._acquire_connection()
try:
return self.Producer(conn)
except BaseException:
conn.release()
raise
def new(self):
return lazy(self.create_producer)
def setup(self):
if self.limit:
for _ in range(self.limit):
self._resource.put_nowait(self.new())
def close_resource(self, resource):
pass
def prepare(self, p):
if callable(p):
p = p()
if p._channel is None:
conn = self._acquire_connection()
try:
p.revive(conn)
except BaseException:
conn.release()
raise
return p
def release(self, resource):
if resource.__connection__:
resource.__connection__.release()
resource.channel = None
super(ProducerPool, self).release(resource)
class PoolGroup(EqualityDict):
def __init__(self, limit=None):
self.limit = limit
def create(self, resource, limit):
raise NotImplementedError('PoolGroups must define ``create``')
def __missing__(self, resource):
limit = self.limit
if limit is use_global_limit:
limit = get_limit()
if not _used[0]:
_used[0] = True
k = self[resource] = self.create(resource, limit)
return k
def register_group(group):
_groups.append(group)
return group
class Connections(PoolGroup):
def create(self, connection, limit):
return connection.Pool(limit=limit)
connections = register_group(Connections(limit=use_global_limit))
class Producers(PoolGroup):
def create(self, connection, limit):
return ProducerPool(connections[connection], limit=limit)
producers = register_group(Producers(limit=use_global_limit))
def _all_pools():
return chain(*[(values(g) if g else iter([])) for g in _groups])
def get_limit():
return _limit[0]
def set_limit(limit, force=False, reset_after=False):
limit = limit or 0
glimit = _limit[0] or 0
if limit < glimit:
if not disable_limit_protection and (_used[0] and not force):
raise RuntimeError("Can't lower limit after pool in use.")
reset_after = True
if limit != glimit:
_limit[0] = limit
for pool in _all_pools():
pool.limit = limit
if reset_after:
reset()
return limit
def reset(*args, **kwargs):
for pool in _all_pools():
try:
pool.force_close_all()
except Exception:
pass
for group in _groups:
group.clear()
_used[0] = False
try:
from multiprocessing.util import register_after_fork
register_after_fork(connections, reset)
except ImportError: # pragma: no cover
pass
| bsd-3-clause |
robhudson/django | django/contrib/auth/password_validation.py | 229 | 7041 | from __future__ import unicode_literals
import gzip
import os
import re
from difflib import SequenceMatcher
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.module_loading import import_string
from django.utils.six import string_types
from django.utils.translation import ugettext as _
@lru_cache.lru_cache(maxsize=None)
def get_default_password_validators():
return get_password_validators(settings.AUTH_PASSWORD_VALIDATORS)
def get_password_validators(validator_config):
validators = []
for validator in validator_config:
try:
klass = import_string(validator['NAME'])
except ImportError:
msg = "The module in NAME could not be imported: %s. Check your AUTH_PASSWORD_VALIDATORS setting."
raise ImproperlyConfigured(msg % validator['NAME'])
validators.append(klass(**validator.get('OPTIONS', {})))
return validators
def validate_password(password, user=None, password_validators=None):
"""
Validate whether the password meets all validator requirements.
If the password is valid, return ``None``.
If the password is invalid, raise ValidationError with all error messages.
"""
errors = []
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
try:
validator.validate(password, user)
except ValidationError as error:
errors.append(error)
if errors:
raise ValidationError(errors)
def password_changed(password, user=None, password_validators=None):
"""
Inform all validators that have implemented a password_changed() method
that the password has been changed.
"""
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
password_changed = getattr(validator, 'password_changed', lambda *a: None)
password_changed(password, user)
def password_validators_help_texts(password_validators=None):
"""
Return a list of all help texts of all configured validators.
"""
help_texts = []
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
help_texts.append(validator.get_help_text())
return help_texts
def password_validators_help_text_html(password_validators=None):
"""
Return an HTML string with all help texts of all configured validators
in an <ul>.
"""
help_texts = password_validators_help_texts(password_validators)
help_items = [format_html('<li>{}</li>', help_text) for help_text in help_texts]
return '<ul>%s</ul>' % ''.join(help_items)
class MinimumLengthValidator(object):
"""
Validate whether the password is of a minimum length.
"""
def __init__(self, min_length=8):
self.min_length = min_length
def validate(self, password, user=None):
if len(password) < self.min_length:
raise ValidationError(
_("This password is too short. It must contain at least %(min_length)d characters."),
code='password_too_short',
params={'min_length': self.min_length},
)
def get_help_text(self):
return _("Your password must contain at least %(min_length)d characters.") % {'min_length': self.min_length}
class UserAttributeSimilarityValidator(object):
"""
Validate whether the password is sufficiently different from the user's
attributes.
If no specific attributes are provided, look at a sensible list of
defaults. Attributes that don't exist are ignored. Comparison is made to
not only the full attribute value, but also its components, so that, for
example, a password is validated against either part of an email address,
as well as the full address.
"""
DEFAULT_USER_ATTRIBUTES = ('username', 'first_name', 'last_name', 'email')
def __init__(self, user_attributes=DEFAULT_USER_ATTRIBUTES, max_similarity=0.7):
self.user_attributes = user_attributes
self.max_similarity = max_similarity
def validate(self, password, user=None):
if not user:
return
for attribute_name in self.user_attributes:
value = getattr(user, attribute_name, None)
if not value or not isinstance(value, string_types):
continue
value_parts = re.split('\W+', value) + [value]
for value_part in value_parts:
if SequenceMatcher(a=password.lower(), b=value_part.lower()).quick_ratio() > self.max_similarity:
verbose_name = force_text(user._meta.get_field(attribute_name).verbose_name)
raise ValidationError(
_("The password is too similar to the %(verbose_name)s."),
code='password_too_similar',
params={'verbose_name': verbose_name},
)
def get_help_text(self):
return _("Your password can't be too similar to your other personal information.")
class CommonPasswordValidator(object):
"""
Validate whether the password is a common password.
The password is rejected if it occurs in a provided list, which may be gzipped.
The list Django ships with contains 1000 common passwords, created by Mark Burnett:
https://xato.net/passwords/more-top-worst-passwords/
"""
DEFAULT_PASSWORD_LIST_PATH = os.path.join(
os.path.dirname(os.path.realpath(upath(__file__))), 'common-passwords.txt.gz'
)
def __init__(self, password_list_path=DEFAULT_PASSWORD_LIST_PATH):
try:
common_passwords_lines = gzip.open(password_list_path).read().decode('utf-8').splitlines()
except IOError:
with open(password_list_path) as f:
common_passwords_lines = f.readlines()
self.passwords = {p.strip() for p in common_passwords_lines}
def validate(self, password, user=None):
if password.lower().strip() in self.passwords:
raise ValidationError(
_("This password is too common."),
code='password_too_common',
)
def get_help_text(self):
return _("Your password can't be a commonly used password.")
class NumericPasswordValidator(object):
"""
Validate whether the password is alphanumeric.
"""
def validate(self, password, user=None):
if password.isdigit():
raise ValidationError(
_("This password is entirely numeric."),
code='password_entirely_numeric',
)
def get_help_text(self):
return _("Your password can't be entirely numeric.")
| bsd-3-clause |
marratj/ansible | test/units/playbook/test_playbook.py | 119 | 2239 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook import Playbook
from ansible.vars.manager import VariableManager
from units.mock.loader import DictDataLoader
class TestPlaybook(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_playbook(self):
fake_loader = DictDataLoader({})
p = Playbook(loader=fake_loader)
def test_basic_playbook(self):
fake_loader = DictDataLoader({
"test_file.yml": """
- hosts: all
""",
})
p = Playbook.load("test_file.yml", loader=fake_loader)
plays = p.get_plays()
def test_bad_playbook_files(self):
fake_loader = DictDataLoader({
# represents a playbook which is not a list of plays
"bad_list.yml": """
foo: bar
""",
# represents a playbook where a play entry is mis-formatted
"bad_entry.yml": """
-
- "This should be a mapping..."
""",
})
vm = VariableManager()
self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader)
self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader)
| gpl-3.0 |
kose-y/pylearn2 | pylearn2/utils/mnist_ubyte.py | 44 | 3963 | """
Low-level utilities for reading in raw MNIST files.
"""
__author__ = "David Warde-Farley"
__copyright__ = "Copyright 2012, Universite de Montreal"
__credits__ = ["David Warde-Farley"]
__license__ = "3-clause BSD"
__email__ = "wardefar@iro"
__maintainer__ = "David Warde-Farley"
import struct
import numpy
from theano.compat import six
MNIST_IMAGE_MAGIC = 2051
MNIST_LABEL_MAGIC = 2049
class open_if_filename(object):
"""
.. todo::
WRITEME
Parameters
----------
f : WRITEME
mode : WRITEME
buffering : WRITEME
"""
def __init__(self, f, mode='r', buffering=-1):
self._f = f
self._mode = mode
self._buffering = buffering
self._handle = None
def __enter__(self):
"""
.. todo::
WRITEME
"""
if isinstance(self._f, six.string_types):
self._handle = open(self._f, self._mode, self._buffering)
else:
self._handle = self._f
return self._handle
def __exit__(self, exc_type, exc_value, traceback):
"""
.. todo::
WRITEME
"""
if self._handle is not self._f:
self._handle.close()
def read_mnist_images(fn, dtype=None):
"""
Read MNIST images from the original ubyte file format.
Parameters
----------
fn : str or object
Filename/path from which to read labels, or an open file
object for the same (will not be closed for you).
dtype : str or object, optional
A NumPy dtype or string that can be converted to one.
If unspecified, images will be returned in their original
unsigned byte format.
Returns
-------
images : ndarray, shape (n_images, n_rows, n_cols)
An image array, with individual examples indexed along the
first axis and the image dimensions along the second and
third axis.
Notes
-----
If the dtype provided was boolean, the resulting array will
be boolean with `True` if the corresponding pixel had a value
greater than or equal to 128, `False` otherwise.
If the dtype provided was a float or complex dtype, the values
will be mapped to the unit interval [0, 1], with pixel values
that were 255 in the original unsigned byte representation
equal to 1.0.
"""
with open_if_filename(fn, 'rb') as f:
magic, number, rows, cols = struct.unpack('>iiii', f.read(16))
if magic != MNIST_IMAGE_MAGIC:
raise ValueError('wrong magic number reading MNIST image file: ' +
fn)
array = numpy.fromfile(f, dtype='uint8').reshape((number, rows, cols))
if dtype:
dtype = numpy.dtype(dtype)
# If the user wants booleans, threshold at half the range.
if dtype.kind is 'b':
array = array >= 128
else:
# Otherwise, just convert.
array = array.astype(dtype)
# I don't know why you'd ever turn MNIST into complex,
# but just in case, check for float *or* complex dtypes.
# Either way, map to the unit interval.
if dtype.kind in ('f', 'c'):
array /= 255.
return array
def read_mnist_labels(fn):
"""
Read MNIST labels from the original ubyte file format.
Parameters
----------
fn : str or object
Filename/path from which to read labels, or an open file
object for the same (will not be closed for you).
Returns
-------
labels : ndarray, shape (nlabels,)
A one-dimensional unsigned byte array containing the
labels as integers.
"""
with open_if_filename(fn, 'rb') as f:
magic, number = struct.unpack('>ii', f.read(8))
if magic != MNIST_LABEL_MAGIC:
raise ValueError('wrong magic number reading MNIST label file: ' +
fn)
array = numpy.fromfile(f, dtype='uint8')
return array
| bsd-3-clause |
Phonebooth/depot_tools | third_party/boto/cacerts/__init__.py | 260 | 1097 | # Copyright 2010 Google Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| bsd-3-clause |
bealdav/OpenUpgrade | addons/l10n_at/account_wizard.py | 379 | 1234 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) conexus.at
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import osv
from openerp import addons
class AccountWizard_cd(osv.osv_memory):
_inherit='wizard.multi.charts.accounts'
_defaults = {
'code_digits' : 0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rhndg/openedx | common/djangoapps/config_models/models.py | 75 | 7291 | """
Django Model baseclass for database-backed configuration.
"""
from django.db import connection, models
from django.contrib.auth.models import User
from django.core.cache import get_cache, InvalidCacheBackendError
from django.utils.translation import ugettext_lazy as _
try:
cache = get_cache('configuration') # pylint: disable=invalid-name
except InvalidCacheBackendError:
from django.core.cache import cache
class ConfigurationModelManager(models.Manager):
"""
Query manager for ConfigurationModel
"""
def _current_ids_subquery(self):
"""
Internal helper method to return an SQL string that will get the IDs of
all the current entries (i.e. the most recent entry for each unique set
of key values). Only useful if KEY_FIELDS is set.
"""
key_fields_escaped = [connection.ops.quote_name(name) for name in self.model.KEY_FIELDS]
# The following assumes that the rows with the most recent date also have the highest IDs
return "SELECT MAX(id) FROM {table_name} GROUP BY {key_fields}".format(
key_fields=', '.join(key_fields_escaped),
table_name=self.model._meta.db_table # pylint: disable=protected-access
)
def current_set(self):
"""
A queryset for the active configuration entries only. Only useful if KEY_FIELDS is set.
Active means the means recent entries for each unique combination of keys. It does not
necessaryily mean enbled.
"""
assert self.model.KEY_FIELDS != (), "Just use model.current() if there are no KEY_FIELDS"
return self.get_query_set().extra(
where=["id IN ({subquery})".format(subquery=self._current_ids_subquery())],
select={'is_active': 1}, # This annotation is used by the admin changelist. sqlite requires '1', not 'True'
)
def with_active_flag(self):
"""
A query set where each result is annotated with an 'is_active' field that indicates
if it's the most recent entry for that combination of keys.
"""
if self.model.KEY_FIELDS:
subquery = self._current_ids_subquery()
return self.get_query_set().extra(
select={'is_active': "id IN ({subquery})".format(subquery=subquery)}
)
else:
return self.get_query_set().extra(
select={'is_active': "id = {pk}".format(pk=self.model.current().pk)}
)
class ConfigurationModel(models.Model):
"""
Abstract base class for model-based configuration
Properties:
cache_timeout (int): The number of seconds that this configuration
should be cached
"""
class Meta(object): # pylint: disable=missing-docstring
abstract = True
ordering = ("-change_date", )
objects = ConfigurationModelManager()
KEY_FIELDS = ()
# The number of seconds
cache_timeout = 600
change_date = models.DateTimeField(auto_now_add=True, verbose_name=_("Change date"))
changed_by = models.ForeignKey(
User,
editable=False,
null=True,
on_delete=models.PROTECT,
# Translators: this label indicates the name of the user who made this change:
verbose_name=_("Changed by"),
)
enabled = models.BooleanField(default=False, verbose_name=_("Enabled"))
def save(self, *args, **kwargs):
"""
Clear the cached value when saving a new configuration entry
"""
super(ConfigurationModel, self).save(*args, **kwargs)
cache.delete(self.cache_key_name(*[getattr(self, key) for key in self.KEY_FIELDS]))
if self.KEY_FIELDS:
cache.delete(self.key_values_cache_key_name())
@classmethod
def cache_key_name(cls, *args):
"""Return the name of the key to use to cache the current configuration"""
if cls.KEY_FIELDS != ():
if len(args) != len(cls.KEY_FIELDS):
raise TypeError(
"cache_key_name() takes exactly {} arguments ({} given)".format(len(cls.KEY_FIELDS), len(args))
)
return u'configuration/{}/current/{}'.format(cls.__name__, u','.join(unicode(arg) for arg in args))
else:
return 'configuration/{}/current'.format(cls.__name__)
@classmethod
def current(cls, *args):
"""
Return the active configuration entry, either from cache,
from the database, or by creating a new empty entry (which is not
persisted).
"""
cached = cache.get(cls.cache_key_name(*args))
if cached is not None:
return cached
key_dict = dict(zip(cls.KEY_FIELDS, args))
try:
current = cls.objects.filter(**key_dict).order_by('-change_date')[0]
except IndexError:
current = cls(**key_dict)
cache.set(cls.cache_key_name(*args), current, cls.cache_timeout)
return current
@classmethod
def is_enabled(cls):
"""Returns True if this feature is configured as enabled, else False."""
return cls.current().enabled
@classmethod
def key_values_cache_key_name(cls, *key_fields):
""" Key for fetching unique key values from the cache """
key_fields = key_fields or cls.KEY_FIELDS
return 'configuration/{}/key_values/{}'.format(cls.__name__, ','.join(key_fields))
@classmethod
def key_values(cls, *key_fields, **kwargs):
"""
Get the set of unique values in the configuration table for the given
key[s]. Calling cls.current(*value) for each value in the resulting
list should always produce an entry, though any such entry may have
enabled=False.
Arguments:
key_fields: The positional arguments are the KEY_FIELDS to return. For example if
you had a course embargo configuration where each entry was keyed on (country,
course), then you might want to know "What countries have embargoes configured?"
with cls.key_values('country'), or "Which courses have country restrictions?"
with cls.key_values('course'). You can also leave this unspecified for the
default, which returns the distinct combinations of all keys.
flat: If you pass flat=True as a kwarg, it has the same effect as in Django's
'values_list' method: Instead of returning a list of lists, you'll get one list
of values. This makes sense to use whenever there is only one key being queried.
Return value:
List of lists of each combination of keys found in the database.
e.g. [("Italy", "course-v1:SomeX+some+2015"), ...] for the course embargo example
"""
flat = kwargs.pop('flat', False)
assert not kwargs, "'flat' is the only kwarg accepted"
key_fields = key_fields or cls.KEY_FIELDS
cache_key = cls.key_values_cache_key_name(*key_fields)
cached = cache.get(cache_key)
if cached is not None:
return cached
values = list(cls.objects.values_list(*key_fields, flat=flat).order_by().distinct())
cache.set(cache_key, values, cls.cache_timeout)
return values
| agpl-3.0 |
KevinMidboe/statusHandler | flask/lib/python3.4/encodings/cp852.py | 272 | 35002 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
'\xac' # 0x00aa -> NOT SIGN
'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
'\xb4' # 0x00ef -> ACUTE ACCENT
'\xad' # 0x00f0 -> SOFT HYPHEN
'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
'\u02db' # 0x00f2 -> OGONEK
'\u02c7' # 0x00f3 -> CARON
'\u02d8' # 0x00f4 -> BREVE
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\xb8' # 0x00f7 -> CEDILLA
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\u02d9' # 0x00fa -> DOT ABOVE
'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
sliz1/servo | tests/wpt/css-tests/tools/webdriver/webdriver/command.py | 258 | 3985 | """Dispatches requests to remote WebDriver endpoint."""
import exceptions
import httplib
import json
import urlparse
import webelement
class CommandExecutor(object):
"""Dispatches requests to remote WebDriver endpoint."""
_HEADERS = {
"User-Agent": "Python WebDriver Local End",
"Content-Type": "application/json;charset=\"UTF-8\"",
"Accept": "application/json",
"Accept-Charset": "utf-8",
"Accept-Encoding": "identity",
"Connection": "close",
}
def __init__(self, url, mode='strict'):
self._parsed_url = urlparse.urlparse(url)
self._conn = httplib.HTTPConnection(self._parsed_url.hostname,
self._parsed_url.port)
self._mode = mode
def execute(self,
method,
path,
session_id,
name,
parameters=None,
object_hook=None):
"""Execute a command against the WebDriver endpoint.
Arguments:
method -- one of GET, POST, DELETE
path -- the path of the url endpoint (needs to include
session/<sessionId> if needed)
session_id -- the sessionId to include in the JSON body
name -- name of the command that is being executed to include in
the JSON body
parameters -- the JSON body to send with the command. Only used if
method is POST
object_hook -- function used by json.loads to properly deserialize
objects in the request
"""
if self._mode == 'strict':
return self._execute_strict(
method, path, session_id, name, parameters, object_hook)
elif self._mode == 'compatibility':
return self._execute_compatibility(
method, path, session_id, name, parameters, object_hook)
else:
raise Exception("Unknown mode: " + self._mode)
def _execute_compatibility(self,
method,
path,
session_id,
name,
parameters,
object_hook):
body = {'sessionId': session_id, 'name': name }
if parameters:
body.update(parameters)
self._conn.request(
method,
self._parsed_url.path + path,
json.dumps(body, default = self._json_encode).encode('utf-8'),
self._HEADERS)
resp = self._conn.getresponse()
data = resp.read().decode('utf-8')
if data:
data = json.loads(data, object_hook = object_hook)
if data['status'] != 0:
raise exceptions.create_webdriver_exception_compatibility(
data['status'], data['value']['message'])
return data
if resp.status < 200 or resp.status > 299:
raise exceptions.create_webdriver_exception_compatibility(
resp.status, resp.reason)
def _execute_strict(self,
method,
path,
session_id,
name,
parameters,
object_hook):
body = {
'sessionId': session_id,
'name': name,
'parameters': parameters }
self._conn.request(
method,
self._parsed_url.path + path,
json.dumps(body, default = self._json_encode).encode('utf-8'),
self._HEADERS)
resp = self._conn.getresponse()
data = json.loads(
resp.read().decode('utf-8'), object_hook = object_hook)
if data['status'] != 'success':
raise exceptions.create_webdriver_exception_strict(
data['status'], data['value'])
return data
def _json_encode(self, obj):
return obj.to_json()
| mpl-2.0 |
cognoma/task-service | api/queue.py | 1 | 1935 | from django.db import connection
from api.models import TaskDef, Task
get_task_sql = """
WITH nextTasks as (
SELECT id, status, started_at
FROM tasks
JOIN task_defs
ON tasks.task_def_name = task_defs.name
WHERE
task_def_name = ANY(%s)
AND run_at <= NOW()
AND (status = 'queued' OR
(status = 'in_progress' AND
(NOW() > (locked_at + INTERVAL '1 second' * task_defs.default_timeout))) OR
(status = 'failed_retrying' AND
attempts < task_defs.max_attempts))
ORDER BY
CASE WHEN priority = 'critical'
THEN 1
WHEN priority = 'high'
THEN 2
WHEN priority = 'normal'
THEN 3
WHEN priority = 'low'
THEN 4
END,
run_at
LIMIT %s
FOR UPDATE SKIP LOCKED
)
UPDATE tasks SET
status = 'in_progress',
worker_id = %s,
locked_at = NOW(),
started_at =
CASE WHEN nextTasks.started_at = null
THEN NOW()
ELSE null
END,
attempts =
CASE WHEN nextTasks.status = 'in_progress'
THEN attempts
ELSE attempts + 1
END
FROM nextTasks
WHERE tasks.id = nextTasks.id
RETURNING tasks.*;
"""
def dictfetchall(cursor):
"""Return all rows from a cursor as a list of dicts"""
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
def get_tasks(task_names, worker_id, limit=1):
with connection.cursor() as cursor:
cursor.execute(get_task_sql, [task_names, limit, worker_id])
raw_tasks = dictfetchall(cursor)
for raw_task in raw_tasks:
task_name = raw_task.pop('task_def_name')
raw_task['task_def'] = TaskDef(name=task_name)
tasks = []
for raw_task in raw_tasks:
tasks.append(Task(**raw_task))
return tasks
| bsd-3-clause |
mrunge/openstack_horizon | openstack_horizon/dashboards/project/data_processing/jobs/tabs.py | 1 | 1174 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon_lib import tabs
from openstack_horizon.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class GeneralTab(tabs.Tab):
name = _("General Info")
slug = "job_details_tab"
template_name = ("project/data_processing.jobs/_details.html")
def get_context_data(self, request):
job_id = self.tab_group.kwargs['job_id']
job = saharaclient.job_get(request, job_id)
return {"job": job}
class JobDetailsTabs(tabs.TabGroup):
slug = "job_details"
tabs = (GeneralTab,)
sticky = True
| apache-2.0 |
ifduyue/sentry | src/sentry/south_migrations/0029_auto__del_field_projectmember_is_superuser__del_field_projectmember_pe.py | 5 | 17645 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ProjectMember.is_superuser'
db.delete_column('sentry_projectmember', 'is_superuser')
# Deleting field 'ProjectMember.permissions'
db.delete_column('sentry_projectmember', 'permissions')
# Adding field 'ProjectMember.type'
db.add_column(
'sentry_projectmember',
'type',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False
)
def backwards(self, orm):
# Adding field 'ProjectMember.is_superuser'
db.add_column(
'sentry_projectmember',
'is_superuser',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False
)
# User chose to not deal with backwards NULL issues for 'ProjectMember.permissions'
raise RuntimeError(
"Cannot reverse this migration. 'ProjectMember.permissions' and its values cannot be restored."
)
# Deleting field 'ProjectMember.type'
db.delete_column('sentry_projectmember', 'type')
models = {
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('django.db.models.fields.AutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '30'
})
},
'contenttypes.contenttype': {
'Meta': {
'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"
},
'app_label': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'model': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '100'
})
},
'sentry.event': {
'Meta': {
'object_name': 'Event',
'db_table': "'sentry_message'"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'server_name': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'site': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'time_spent': ('django.db.models.fields.FloatField', [], {
'null': 'True'
})
},
'sentry.filtervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'FilterValue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'score': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.messagecountbyminute': {
'Meta': {
'unique_together': "(('project', 'group', 'date'),)",
'object_name': 'MessageCountByMinute'
},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.messagefiltervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'MessageFilterValue'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.messageindex': {
'Meta': {
'unique_together': "(('column', 'value', 'object_id'),)",
'object_name': 'MessageIndex'
},
'column': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '128'
})
},
'sentry.project': {
'Meta': {
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'owner': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'owned_project_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectdomain': {
'Meta': {
'unique_together': "(('project', 'domain'),)",
'object_name': 'ProjectDomain'
},
'domain': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'domain_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.projectmember': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'ProjectMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'project_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.projectoptions': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'ProjectOptions'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
Santinell/ansible-modules-core | cloud/rackspace/rax_dns_record.py | 136 | 10840 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_dns_record
short_description: Manage DNS records on Rackspace Cloud DNS
description:
- Manage DNS records on Rackspace Cloud DNS
version_added: 1.5
options:
comment:
description:
- Brief description of the domain. Maximum length of 160 characters
data:
description:
- IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
SRV/TXT
required: True
domain:
description:
- Domain name to create the record in. This is an invalid option when
type=PTR
loadbalancer:
description:
- Load Balancer ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
name:
description:
- FQDN record name to create
required: True
priority:
description:
- Required for MX and SRV records, but forbidden for other record types.
If specified, must be an integer from 0 to 65535.
server:
description:
- Server ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
ttl:
description:
- Time to live of record in seconds
default: 3600
type:
description:
- DNS record type
choices:
- A
- AAAA
- CNAME
- MX
- NS
- SRV
- TXT
- PTR
required: true
notes:
- "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by
the Rackspace CloudDNS API"
- To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
supplied
- As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
- C(PTR) record support was added in version 1.7
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Create DNS Records
hosts: all
gather_facts: False
tasks:
- name: Create A record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
domain: example.org
name: www.example.org
data: "{{ rax_accessipv4 }}"
type: A
register: a_record
- name: Create PTR record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
server: "{{ rax_id }}"
name: "{{ inventory_hostname }}"
region: DFW
register: ptr_record
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
name=None, server=None, state='present', ttl=7200):
changed = False
results = []
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if loadbalancer:
item = rax_find_loadbalancer(module, pyrax, loadbalancer)
elif server:
item = rax_find_server(module, pyrax, server)
if state == 'present':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
if record.ttl != ttl or record.name != name:
try:
dns.update_ptr_record(item, record, name, data, ttl)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
record.ttl = ttl
record.name = name
results.append(rax_to_dict(record))
break
else:
results.append(rax_to_dict(record))
break
if not results:
record = dict(name=name, type='PTR', data=data, ttl=ttl,
comment=comment)
try:
results = dns.add_ptr_records(item, [record])
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
elif state == 'absent':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
results.append(rax_to_dict(record))
break
if results:
try:
dns.delete_ptr_records(item, data)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
priority=None, record_type='A', state='present', ttl=7200):
"""Function for manipulating record types other than PTR"""
changed = False
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not priority and record_type in ['MX', 'SRV']:
module.fail_json(msg='A "priority" attribute is required for '
'creating a MX or SRV record')
try:
domain = dns.find(name=domain)
except Exception, e:
module.fail_json(msg='%s' % e.message)
try:
record = domain.find_record(record_type, name=name)
except pyrax.exceptions.DomainRecordNotUnique, e:
module.fail_json(msg='%s' % e.message)
except pyrax.exceptions.DomainRecordNotFound, e:
try:
record_data = {
'type': record_type,
'name': name,
'data': data,
'ttl': ttl
}
if comment:
record_data.update(dict(comment=comment))
if priority and record_type.upper() in ['MX', 'SRV']:
record_data.update(dict(priority=priority))
record = domain.add_records([record_data])[0]
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
update = {}
if comment != getattr(record, 'comment', None):
update['comment'] = comment
if ttl != getattr(record, 'ttl', None):
update['ttl'] = ttl
if priority != getattr(record, 'priority', None):
update['priority'] = priority
if data != getattr(record, 'data', None):
update['data'] = data
if update:
try:
record.update(**update)
changed = True
record.get()
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
domain = dns.find(name=domain)
except Exception, e:
module.fail_json(msg='%s' % e.message)
try:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotFound, e:
record = {}
pass
except pyrax.exceptions.DomainRecordNotUnique, e:
module.fail_json(msg='%s' % e.message)
if record:
try:
record.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, record=rax_to_dict(record))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
comment=dict(),
data=dict(required=True),
domain=dict(),
loadbalancer=dict(),
name=dict(required=True),
priority=dict(type='int'),
server=dict(),
state=dict(default='present', choices=['present', 'absent']),
ttl=dict(type='int', default=3600),
type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
'SRV', 'TXT', 'PTR'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[
['server', 'loadbalancer', 'domain'],
],
required_one_of=[
['server', 'loadbalancer', 'domain'],
],
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
comment = module.params.get('comment')
data = module.params.get('data')
domain = module.params.get('domain')
loadbalancer = module.params.get('loadbalancer')
name = module.params.get('name')
priority = module.params.get('priority')
server = module.params.get('server')
state = module.params.get('state')
ttl = module.params.get('ttl')
record_type = module.params.get('type')
setup_rax_module(module, pyrax, False)
if record_type.upper() == 'PTR':
if not server and not loadbalancer:
module.fail_json(msg='one of the following is required: '
'server,loadbalancer')
rax_dns_record_ptr(module, data=data, comment=comment,
loadbalancer=loadbalancer, name=name, server=server,
state=state, ttl=ttl)
else:
rax_dns_record(module, comment=comment, data=data, domain=domain,
name=name, priority=priority, record_type=record_type,
state=state, ttl=ttl)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
| gpl-3.0 |
SatoshiNXSimudrone/sl4a-damon-clone | python/src/Lib/encodings/cp775.py | 593 | 34732 | """ Python Character Mapping Codec cp775 generated from 'VENDORS/MICSFT/PC/CP775.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp775',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0096: 0x00a2, # CENT SIGN
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x00a4, # CURRENCY SIGN
0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00a7: 0x00a6, # BROKEN BAR
0x00a8: 0x00a9, # COPYRIGHT SIGN
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE
u'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
u'\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON
u'\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA
u'\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON
u'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\xa2' # 0x0096 -> CENT SIGN
u'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\xa4' # 0x009f -> CURRENCY SIGN
u'\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON
u'\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE
u'\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK
u'\xa6' # 0x00a7 -> BROKEN BAR
u'\xa9' # 0x00a8 -> COPYRIGHT SIGN
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON
u'\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON
u'\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK
u'\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON
u'\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK
u'\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK
u'\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON
u'\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK
u'\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON
u'\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON
u'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA
u'\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA
u'\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA
u'\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x0096, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x009f, # CURRENCY SIGN
0x00a6: 0x00a7, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a9: 0x00a8, # COPYRIGHT SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON
0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON
0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON
0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON
0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON
0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE
0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK
0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA
0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON
0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON
0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK
0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK
0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA
0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA
0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA
0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA
0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE
0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA
0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA
0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON
0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON
0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA
0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON
0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON
0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON
0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK
0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON
0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK
0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK
0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK
0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK
0x2219: 0x00f9, # BULLET OPERATOR
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
wecontinue/book-collection | lib/wunderlist.py | 2 | 6498 | #!/usr/bin/env python
from lib.base import BaseHandler
from lib.auth import NologinHandler
import tornado.locale
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import json
from datetime import datetime
from tornado.options import define, options
import pymongo
class GetWunBooksHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
page = self.get_argument("page", None)
# pmax = [1, 20]
pmax = self.get_argument("pmax", 8)
pmax = int(pmax)
sort = self.get_argument("sort", None)
if not page:
no_page = {
"errmsg": "no_page",
"errcode": 1
}
self.write(no_page)
return
else:
page = int(page)
if pmax not in range(1, 20):
invalid_pmax = {
"errmsg": "invalid_pmax",
"errcode": 1
}
self.write(invalid_pmax)
return
if not sort:
no_sort = {
"errmsg": "no_sort",
"errcode": 1
}
self.write(no_sort)
return
sort_fields = ["vote_count", "created_at", "updated_at"]
if sort not in sort_fields:
no_sort_field = {
"errmsg" : "no_sort_field",
"errcode": 1
}
self.write(no_sort_field)
return
# Connect to collection - wunbooks
coll = self.db[self.gsettings.COLL_WUNDER]
# Init wunbooks
cursor = coll.find({}, {"_id": 0, "voter.password": 0, "voter.password_hash": 0})
cursor.sort([(sort, pymongo.DESCENDING), ("updated_at", pymongo.DESCENDING)])
pages = cursor.count() / pmax if cursor.count() % pmax is 0 else (cursor.count() / pmax) + 1
try:
bindex = pmax * (page-1)
ncursor = cursor[bindex:(bindex + pmax)]
books_r = []
for book in ncursor:
books_r.append(book)
books_r_s = {
"pages": pages,
"page": page,
"books": books_r
}
self.write(json.dumps(books_r_s))
except NameError:
illegal_request = {
"errmsg": "illegal_request",
"errcode": 1
}
self.write(illegal_request)
return
class WunSearchHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
isbn = self.get_argument("isbn", None)
if not isbn:
no_isbn = {
"errmsg": "no_isbn",
"errcode": 1
}
self.write(no_isbn)
return
coll = self.db[self.gsettings.COLL_BOOKS]
coll_w = self.db[self.gsettings.COLL_WUNDER]
book_in_books = coll.find_one({"isbn": isbn})
book_in_wbooks = coll_w.find_one({"isbn": isbn})
if book_in_books is not None:
book_got = {
"errcode": 1,
"errmsg": "book_got"
}
self.write(book_got)
return
if book_in_wbooks is not None:
book_exist = {
"errcode": 1,
"errmsg": "book_exist"
}
self.write(book_exist)
return
book_not_exist = {
"errcode": 0
}
self.write(book_not_exist)
class WunEditHandler(BaseHandler):
@tornado.web.authenticated
def post(self):
if not self.get_current_user():
self.redirect("/auth/nologin")
return
isbn = self.get_argument("isbn", None)
if not isbn:
no_isbn = {
"errmsg": "no_isbn",
"errcode": 1
}
self.write(no_isbn)
return
# Check the book if existed in wunderlist | bookcase
# Return error message if the book exists
coll = self.db[self.gsettings.COLL_BOOKS]
coll_w = self.db[self.gsettings.COLL_WUNDER]
book_in_books = coll.find_one({"isbn": isbn})
book_in_wbooks = coll_w.find_one({"isbn": isbn})
if book_in_books is not None:
book_got = {
"errcode": 1,
"errmsg": "book_got"
}
self.write(book_got)
return
if book_in_wbooks is not None:
book_exist = {
"errcode": 1,
"errmsg": "book_exist"
}
self.write(book_exist)
return
# Insert new book into wunderlist
book_fields = ["isbn", "title", "alt", "author",
"publisher", "image", "tags", "pub_date"]
if isbn:
wunbook = {}
wunbook["voter"] = [self.get_current_user()]
wunbook["vote_count"] = 1
for key in book_fields:
wunbook[key] = self.get_argument(key, None)
wunbook["created_at"] = datetime.now().__format__("%Y-%m-%d %H:%M:%S")
wunbook["updated_at"] = datetime.now().__format__("%Y-%m-%d %H:%M:%S")
coll_w.insert(wunbook)
# Save success
insert_sucs = {
"errcode": 0
}
self.write(insert_sucs)
class VoteHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
isbn = self.get_argument("isbn", None)
if not isbn:
no_isbn = {
"errmsg": "no_isbn",
"errcode": 1
}
self.write(no_isbn)
return
# Wunderlist database
coll = self.db[self.gsettings.COLL_WUNDER]
vote_book = coll.find_one({"isbn": isbn})
# Confirm user vote or not vote
member = self.current_user
del member["created"]
del member["last_updated"]
if member not in vote_book["voter"]:
vote_book["vote_count"] += 1
vote_book["voter"].append(member)
coll.save(vote_book)
# Return sucs
new_vote_book = coll.find_one({"isbn": isbn}, {"_id": 0,
"voter.created": 0, "voter.last_updated": 0})
self.write(new_vote_book)
else:
already_vote = {
"errcode": 1,
"errmsg": "already_vote"
}
self.write(already_vote)
| mit |
jtrain/django-guardian | guardian/models.py | 45 | 3521 | from __future__ import unicode_literals
from django.db import models
from django.core.exceptions import ValidationError
from django.contrib.auth.models import Group
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.generic import GenericForeignKey
from django.utils.translation import ugettext_lazy as _
from guardian.compat import get_user_model
from guardian.compat import user_model_label
from guardian.compat import unicode
from guardian.utils import get_anonymous_user
from guardian.managers import GroupObjectPermissionManager
from guardian.managers import UserObjectPermissionManager
class BaseObjectPermission(models.Model):
"""
Abstract ObjectPermission class. Actual class should additionally define
a ``content_object`` field and either ``user`` or ``group`` field.
"""
permission = models.ForeignKey(Permission)
class Meta:
abstract = True
def __unicode__(self):
return u'%s | %s | %s' % (
unicode(self.content_object),
unicode(getattr(self, 'user', False) or self.group),
unicode(self.permission.codename))
def save(self, *args, **kwargs):
content_type = ContentType.objects.get_for_model(self.content_object)
if content_type != self.permission.content_type:
raise ValidationError("Cannot persist permission not designed for "
"this class (permission's type is %r and object's type is %r)"
% (self.permission.content_type, content_type))
return super(BaseObjectPermission, self).save(*args, **kwargs)
class BaseGenericObjectPermission(models.Model):
content_type = models.ForeignKey(ContentType)
object_pk = models.CharField(_('object ID'), max_length=255)
content_object = GenericForeignKey(fk_field='object_pk')
class Meta:
abstract = True
class UserObjectPermissionBase(BaseObjectPermission):
"""
**Manager**: :manager:`UserObjectPermissionManager`
"""
user = models.ForeignKey(user_model_label)
objects = UserObjectPermissionManager()
class Meta:
abstract = True
unique_together = ['user', 'permission', 'content_object']
class UserObjectPermission(UserObjectPermissionBase, BaseGenericObjectPermission):
class Meta:
unique_together = ['user', 'permission', 'object_pk']
class GroupObjectPermissionBase(BaseObjectPermission):
"""
**Manager**: :manager:`GroupObjectPermissionManager`
"""
group = models.ForeignKey(Group)
objects = GroupObjectPermissionManager()
class Meta:
abstract = True
unique_together = ['group', 'permission', 'content_object']
class GroupObjectPermission(GroupObjectPermissionBase, BaseGenericObjectPermission):
class Meta:
unique_together = ['group', 'permission', 'object_pk']
User = get_user_model()
# Prototype User and Group methods
setattr(User, 'get_anonymous', staticmethod(lambda: get_anonymous_user()))
setattr(User, 'add_obj_perm',
lambda self, perm, obj: UserObjectPermission.objects.assign_perm(perm, self, obj))
setattr(User, 'del_obj_perm',
lambda self, perm, obj: UserObjectPermission.objects.remove_perm(perm, self, obj))
setattr(Group, 'add_obj_perm',
lambda self, perm, obj: GroupObjectPermission.objects.assign_perm(perm, self, obj))
setattr(Group, 'del_obj_perm',
lambda self, perm, obj: GroupObjectPermission.objects.remove_perm(perm, self, obj))
| bsd-2-clause |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/boto-2.38.0/boto/cloudformation/template.py | 164 | 1686 | from boto.resultset import ResultSet
from boto.cloudformation.stack import Capability
class Template(object):
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.template_parameters = None
self.capabilities_reason = None
self.capabilities = None
def startElement(self, name, attrs, connection):
if name == "Parameters":
self.template_parameters = ResultSet([('member', TemplateParameter)])
return self.template_parameters
elif name == "Capabilities":
self.capabilities = ResultSet([('member', Capability)])
return self.capabilities
else:
return None
def endElement(self, name, value, connection):
if name == "Description":
self.description = value
elif name == "CapabilitiesReason":
self.capabilities_reason = value
else:
setattr(self, name, value)
class TemplateParameter(object):
def __init__(self, parent):
self.parent = parent
self.default_value = None
self.description = None
self.no_echo = None
self.parameter_key = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "DefaultValue":
self.default_value = value
elif name == "Description":
self.description = value
elif name == "NoEcho":
self.no_echo = bool(value)
elif name == "ParameterKey":
self.parameter_key = value
else:
setattr(self, name, value)
| mit |
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/setuptools/tests/test_sandbox.py | 342 | 2170 | """develop tests
"""
import sys
import os
import shutil
import unittest
import tempfile
import types
import pkg_resources
import setuptools.sandbox
from setuptools.sandbox import DirectorySandbox, SandboxViolation
def has_win32com():
"""
Run this to determine if the local machine has win32com, and if it
does, include additional tests.
"""
if not sys.platform.startswith('win32'):
return False
try:
mod = __import__('win32com')
except ImportError:
return False
return True
class TestSandbox(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dir)
def test_devnull(self):
if sys.version < '2.4':
return
sandbox = DirectorySandbox(self.dir)
sandbox.run(self._file_writer(os.devnull))
def _file_writer(path):
def do_write():
f = open(path, 'w')
f.write('xxx')
f.close()
return do_write
_file_writer = staticmethod(_file_writer)
if has_win32com():
def test_win32com(self):
"""
win32com should not be prevented from caching COM interfaces
in gen_py.
"""
import win32com
gen_py = win32com.__gen_path__
target = os.path.join(gen_py, 'test_write')
sandbox = DirectorySandbox(self.dir)
try:
try:
sandbox.run(self._file_writer(target))
except SandboxViolation:
self.fail("Could not create gen_py file due to SandboxViolation")
finally:
if os.path.exists(target): os.remove(target)
def test_setup_py_with_BOM(self):
"""
It should be possible to execute a setup.py with a Byte Order Mark
"""
target = pkg_resources.resource_filename(__name__,
'script-with-bom.py')
namespace = types.ModuleType('namespace')
setuptools.sandbox.execfile(target, vars(namespace))
assert namespace.result == 'passed'
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
ggiscan/OnlineClerk | core/Interactor/squashcity/tests/test_db.py | 4 | 2377 | '''
Created on Nov 19, 2015
@author: george
'''
import unittest
from core.model import User
import core.dbman as dbman
from squashcity.model import SquashCityRequest
import sys
from datetime import datetime
class TestDBModel(unittest.TestCase):
def setUp(self):
self.session = dbman.new_session('memory')
self.product = 'SQUASHCITY'
self.user = 'George'
def test_squash(self):
self.create_users_and_products()
self.create_requests()
self.active_product_requests()
self.active_user_requests()
def create_users_and_products(self):
dbman.create_product(self.product, self.session)
dbman.register_user(self.product, self.user, session=self.session)
def create_requests(self):
session = self.session
try:
user = session.query(User).filter(User.id==self.user).one()
user_product = user.user_products[0]
#below request is active
squashcity_req = SquashCityRequest(request_type=self.product,
userproduct_id = user_product.id,
start_date = datetime.now(),
end_date = datetime(2015, 11, 5, 22, 36))
session.add(squashcity_req)
#below request is inactive
squashcity_req = SquashCityRequest(request_type=self.product,
userproduct_id = user_product.id,
closing_date = datetime.now(),
start_date = datetime.now(),
end_date = None)
session.add(squashcity_req)
session.commit()
except:
self.assertTrue(False, sys.exc_info()[1])
def active_product_requests(self):
requests = dbman.active_product_requests(self.product, session=self.session)
self.assertEqual(1, len(requests))
self.assertIsNone(requests[0].closing_date)
def active_user_requests(self):
requests = dbman.active_user_requests(self.user, self.product, session=self.session)
self.assertEqual(1, len(requests))
self.assertIsNone(requests[0].closing_date)
| gpl-2.0 |
yongshengwang/hue | desktop/core/ext-py/boto-2.38.0/boto/datapipeline/exceptions.py | 235 | 1471 | # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class PipelineDeletedException(JSONResponseError):
pass
class InvalidRequestException(JSONResponseError):
pass
class TaskNotFoundException(JSONResponseError):
pass
class PipelineNotFoundException(JSONResponseError):
pass
class InternalServiceError(JSONResponseError):
pass
| apache-2.0 |
VISTAS-IVES/pyvistas | source/vistas/ui/controls/main_status_bar.py | 1 | 1162 | import wx
from vistas.core.task import Task
class MainStatusBar(wx.StatusBar):
""" Main task status bar for currently running Tasks. """
def __init__(self, parent, id):
super().__init__(parent, id)
self.SetFieldsCount(2, [70, -1])
self.SetStatusText("Idle", 1)
self.gauge = None
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnNotify)
self.timer.Start(100, True)
def OnNotify(self, event):
task = Task.tasks[-1] if len(Task.tasks) else None
if task is not None and task.status not in [Task.STOPPED, Task.COMPLETE]:
if self.gauge is None:
self.gauge = wx.Gauge(self, wx.ID_ANY, 100, wx.Point(5, 3), wx.Size(60, self.GetSize().Get()[1] - 6))
self.SetStatusText(task.name, 1)
if task.status is Task.RUNNING:
self.gauge.SetValue(task.percent)
else:
self.gauge.Pulse()
else:
self.SetStatusText("Idle", 1)
if self.gauge is not None:
self.gauge.Destroy()
self.gauge = None
self.timer.Start(100, True)
| bsd-3-clause |
Chairshot215/starship_kernel_moto_shamu | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
groschovskiy/lerigos_music | Server/API/lib/gcloud/bigquery/query.py | 1 | 11587 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Queries."""
import six
from gcloud.bigquery._helpers import _TypedProperty
from gcloud.bigquery._helpers import _rows_from_json
from gcloud.bigquery.dataset import Dataset
from gcloud.bigquery.job import QueryJob
from gcloud.bigquery.table import _parse_schema_resource
class _SyncQueryConfiguration(object):
"""User-settable configuration options for synchronous query jobs.
Values which are ``None`` -> server defaults.
"""
_default_dataset = None
_max_results = None
_timeout_ms = None
_preserve_nulls = None
_use_query_cache = None
class QueryResults(object):
"""Synchronous job: query tables.
:type query: string
:param query: SQL query string
:type client: :class:`gcloud.bigquery.client.Client`
:param client: A client which holds credentials and project configuration
for the dataset (which requires a project).
"""
def __init__(self, query, client):
self._client = client
self._properties = {}
self.query = query
self._configuration = _SyncQueryConfiguration()
@property
def project(self):
"""Project bound to the job.
:rtype: string
:returns: the project (derived from the client).
"""
return self._client.project
def _require_client(self, client):
"""Check client or verify over-ride.
:type client: :class:`gcloud.bigquery.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:rtype: :class:`gcloud.bigquery.client.Client`
:returns: The client passed in or the currently bound client.
"""
if client is None:
client = self._client
return client
@property
def cache_hit(self):
"""Query results served from cache.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#cacheHit
:rtype: boolean or ``NoneType``
:returns: True if the query results were served from cache (None
until set by the server).
"""
return self._properties.get('cacheHit')
@property
def complete(self):
"""Server completed query.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#jobComplete
:rtype: boolean or ``NoneType``
:returns: True if the query completed on the server (None
until set by the server).
"""
return self._properties.get('jobComplete')
@property
def errors(self):
"""Errors generated by the query.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#errors
:rtype: list of mapping, or ``NoneType``
:returns: Mappings describing errors generated on the server (None
until set by the server).
"""
return self._properties.get('errors')
@property
def name(self):
"""Job name, generated by the back-end.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#jobReference
:rtype: list of mapping, or ``NoneType``
:returns: Mappings describing errors generated on the server (None
until set by the server).
"""
return self._properties.get('jobReference', {}).get('jobId')
@property
def job(self):
"""Job instance used to run the query.
:rtype: :class:`gcloud.bigquery.job.QueryJob`, or ``NoneType``
:returns: Job instance used to run the query (None until
``jobReference`` property is set by the server).
"""
job_ref = self._properties.get('jobReference')
if job_ref is not None:
return QueryJob(job_ref['jobId'], self.query, self._client)
@property
def page_token(self):
"""Token for fetching next bach of results.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#pageToken
:rtype: string, or ``NoneType``
:returns: Token generated on the server (None until set by the server).
"""
return self._properties.get('pageToken')
@property
def total_rows(self):
"""Total number of rows returned by the query
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#totalRows
:rtype: integer, or ``NoneType``
:returns: Count generated on the server (None until set by the server).
"""
return self._properties.get('totalRows')
@property
def total_bytes_processed(self):
"""Total number of bytes processed by the query
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#totalBytesProcessed
:rtype: integer, or ``NoneType``
:returns: Count generated on the server (None until set by the server).
"""
return self._properties.get('totalBytesProcessed')
@property
def rows(self):
"""Query results.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#rows
:rtype: list of tuples of row values, or ``NoneType``
:returns: fields describing the schema (None until set by the server).
"""
return _rows_from_json(self._properties.get('rows', ()), self.schema)
@property
def schema(self):
"""Schema for query results.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#schema
:rtype: list of :class:`SchemaField`, or ``NoneType``
:returns: fields describing the schema (None until set by the server).
"""
return _parse_schema_resource(self._properties.get('schema', {}))
default_dataset = _TypedProperty('default_dataset', Dataset)
"""See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#defaultDataset
"""
max_results = _TypedProperty('max_results', six.integer_types)
"""See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#maxResults
"""
preserve_nulls = _TypedProperty('preserve_nulls', bool)
"""See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#preserveNulls
"""
timeout_ms = _TypedProperty('timeout_ms', six.integer_types)
"""See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#timeoutMs
"""
use_query_cache = _TypedProperty('use_query_cache', bool)
"""See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#useQueryCache
"""
def _set_properties(self, api_response):
"""Update properties from resource in body of ``api_response``
:type api_response: httplib2.Response
:param api_response: response returned from an API call
"""
self._properties.clear()
self._properties.update(api_response)
def _build_resource(self):
"""Generate a resource for :meth:`begin`."""
resource = {'query': self.query}
if self.default_dataset is not None:
resource['defaultDataset'] = {
'projectId': self.project,
'datasetId': self.default_dataset.name,
}
if self.max_results is not None:
resource['maxResults'] = self.max_results
if self.preserve_nulls is not None:
resource['preserveNulls'] = self.preserve_nulls
if self.timeout_ms is not None:
resource['timeoutMs'] = self.timeout_ms
if self.use_query_cache is not None:
resource['useQueryCache'] = self.use_query_cache
return resource
def run(self, client=None):
"""API call: run the query via a POST request
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query
:type client: :class:`gcloud.bigquery.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
"""
client = self._require_client(client)
path = '/projects/%s/queries' % (self.project,)
api_response = client.connection.api_request(
method='POST', path=path, data=self._build_resource())
self._set_properties(api_response)
def fetch_data(self, max_results=None, page_token=None, start_index=None,
timeout_ms=None, client=None):
"""API call: fetch a page of query result data via a GET request
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/getQueryResults
:type max_results: integer or ``NoneType``
:param max_results: maximum number of rows to return.
:type page_token: string or ``NoneType``
:param page_token: token representing a cursor into the table's rows.
:type start_index: integer or ``NoneType``
:param start_index: zero-based index of starting row
:type timeout_ms: integer or ``NoneType``
:param timeout_ms: timeout, in milliseconds, to wait for query to
complete
:type client: :class:`gcloud.bigquery.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:rtype: tuple
:returns: ``(row_data, total_rows, page_token)``, where ``row_data``
is a list of tuples, one per result row, containing only
the values; ``total_rows`` is a count of the total number
of rows in the table; and ``page_token`` is an opaque
string which can be used to fetch the next batch of rows
(``None`` if no further batches can be fetched).
:raises: ValueError if the query has not yet been executed.
"""
if self.name is None:
raise ValueError("Query not yet executed: call 'run()'")
client = self._require_client(client)
params = {}
if max_results is not None:
params['maxResults'] = max_results
if page_token is not None:
params['pageToken'] = page_token
if start_index is not None:
params['startIndex'] = start_index
if timeout_ms is not None:
params['timeoutMs'] = timeout_ms
path = '/projects/%s/queries/%s' % (self.project, self.name)
response = client.connection.api_request(method='GET',
path=path,
query_params=params)
self._set_properties(response)
total_rows = response.get('totalRows')
page_token = response.get('pageToken')
rows_data = _rows_from_json(response.get('rows', ()), self.schema)
return rows_data, total_rows, page_token
| apache-2.0 |
frewsxcv/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/treebuilders/etree.py | 721 | 12609 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
import re
from . import _base
from .. import ihatexml
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation, fullTree=False):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class Element(_base.Node):
def __init__(self, name, namespace=None):
self._name = name
self._namespace = namespace
self._element = ElementTree.Element(self._getETreeTag(name,
namespace))
if namespace is None:
self.nameTuple = namespaces["html"], self._name
else:
self.nameTuple = self._namespace, self._name
self.parent = None
self._childNodes = []
self._flags = []
def _getETreeTag(self, name, namespace):
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
return etree_tag
def _setName(self, name):
self._name = name
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getName(self):
return self._name
name = property(_getName, _setName)
def _setNamespace(self, namespace):
self._namespace = namespace
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getNamespace(self):
return self._namespace
namespace = property(_getNamespace, _setNamespace)
def _getAttributes(self):
return self._element.attrib
def _setAttributes(self, attributes):
# Delete existing attributes first
# XXX - there may be a better way to do this...
for key in list(self._element.attrib.keys()):
del self._element.attrib[key]
for key, value in attributes.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], key[1])
else:
name = key
self._element.set(name, value)
attributes = property(_getAttributes, _setAttributes)
def _getChildNodes(self):
return self._childNodes
def _setChildNodes(self, value):
del self._element[:]
self._childNodes = []
for element in value:
self.insertChild(element)
childNodes = property(_getChildNodes, _setChildNodes)
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self._element.text or len(self._element))
def appendChild(self, node):
self._childNodes.append(node)
self._element.append(node._element)
node.parent = self
def insertBefore(self, node, refNode):
index = list(self._element).index(refNode._element)
self._element.insert(index, node._element)
node.parent = self
def removeChild(self, node):
self._element.remove(node._element)
node.parent = None
def insertText(self, data, insertBefore=None):
if not(len(self._element)):
if not self._element.text:
self._element.text = ""
self._element.text += data
elif insertBefore is None:
# Insert the text as the tail of the last child element
if not self._element[-1].tail:
self._element[-1].tail = ""
self._element[-1].tail += data
else:
# Insert the text before the specified node
children = list(self._element)
index = children.index(insertBefore._element)
if index > 0:
if not self._element[index - 1].tail:
self._element[index - 1].tail = ""
self._element[index - 1].tail += data
else:
if not self._element.text:
self._element.text = ""
self._element.text += data
def cloneNode(self):
element = type(self)(self.name, self.namespace)
for name, value in self.attributes.items():
element.attributes[name] = value
return element
def reparentChildren(self, newParent):
if newParent.childNodes:
newParent.childNodes[-1]._element.tail += self._element.text
else:
if not newParent._element.text:
newParent._element.text = ""
if self._element.text is not None:
newParent._element.text += self._element.text
self._element.text = ""
_base.Node.reparentChildren(self, newParent)
class Comment(Element):
def __init__(self, data):
# Use the superclass constructor to set all properties on the
# wrapper element
self._element = ElementTree.Comment(data)
self.parent = None
self._childNodes = []
self._flags = []
def _getData(self):
return self._element.text
def _setData(self, value):
self._element.text = value
data = property(_getData, _setData)
class DocumentType(Element):
def __init__(self, name, publicId, systemId):
Element.__init__(self, "<!DOCTYPE>")
self._element.text = name
self.publicId = publicId
self.systemId = systemId
def _getPublicId(self):
return self._element.get("publicId", "")
def _setPublicId(self, value):
if value is not None:
self._element.set("publicId", value)
publicId = property(_getPublicId, _setPublicId)
def _getSystemId(self):
return self._element.get("systemId", "")
def _setSystemId(self, value):
if value is not None:
self._element.set("systemId", value)
systemId = property(_getSystemId, _setSystemId)
class Document(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_ROOT")
class DocumentFragment(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_FRAGMENT")
def testSerializer(element):
rv = []
def serializeElement(element, indent=0):
if not(hasattr(element, "tag")):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
rv.append("#document")
if element.text is not None:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
elif element.tag == ElementTreeCommentType:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
else:
assert isinstance(element.tag, text_type), \
"Expected unicode, got %s, %s" % (type(element.tag), element.tag)
nsmatch = tag_regexp.match(element.tag)
if nsmatch is None:
name = element.tag
else:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
name = "%s %s" % (prefix, name)
rv.append("|%s<%s>" % (' ' * indent, name))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = name
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
filter = ihatexml.InfosetFilter()
def serializeElement(element):
if isinstance(element, ElementTree.ElementTree):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
if element.text is not None:
rv.append(element.text)
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
for child in element:
serializeElement(child)
elif element.tag == ElementTreeCommentType:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (filter.fromXmlName(element.tag),))
else:
attr = " ".join(["%s=\"%s\"" % (
filter.fromXmlName(name), value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = Comment
fragmentClass = DocumentFragment
implementation = ElementTreeImplementation
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._element
else:
if self.defaultNamespace is not None:
return self.document._element.find(
"{%s}html" % self.defaultNamespace)
else:
return self.document._element.find("html")
def getFragment(self):
return _base.TreeBuilder.getFragment(self)._element
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| mpl-2.0 |
robhowley/treetl | setup.py | 1 | 1425 |
import os
import json
from os import sep as os_sep
from setuptools import setup, find_packages
# canonical python package is structured as
# package_dir_name/
# -- setup.py
# -- package_dir_name/
# get top package_dir_name and know it will contain the same
__pack_dir = os.path.split(os.path.dirname(os.path.abspath(__file__)))[1]
with open('{0}{1}pkg_info.json'.format(__pack_dir, os_sep), 'r') as f:
PKG_INFO = json.loads(f.read())
# 2.7 patch for importing package_data unicode_literal bug in setuptools
# fill in package data up here and it will be forced to str below
__pgk_data = {
PKG_INFO['name']: [
'pkg_info.json',
'cfg/*.ini'
]
}
setup(
name=PKG_INFO['name'],
version=PKG_INFO['version'],
description=PKG_INFO['description'],
long_description=open('README.rst').read(),
packages=find_packages(),
include_package_data=True,
package_data={
str(k): [ str(vi) for vi in v ]
for k, v in __pgk_data.items()
},
scripts=[
],
platforms='any',
zip_safe=False,
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Operating System :: OS Independent'
],
test_suite='tests',
)
| mit |
Triv90/Nova | nova/openstack/common/rpc/impl_zmq.py | 3 | 26578 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import socket
import sys
import types
import uuid
import eventlet
import greenlet
from oslo.config import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import processutils as utils
from nova.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('nova.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object
def _serialize(data):
"""
Serialization wrapper
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_("JSON serialization failed."))
def _deserialize(data):
"""
Deserialization wrapper
"""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if len(self.subscriptions) > 0:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error("ZeroMQ socket could not be closed.")
self.sock = None
def recv(self):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart()
def send(self, data):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr, socket_type=None, bind=False):
if socket_type is None:
socket_type = zmq.PUSH
self.outq = ZmqSocket(addr, socket_type, bind=bind)
def cast(self, msg_id, topic, data, envelope=False):
msg_id = msg_id or 0
if not (envelope or rpc_common._SEND_RPC_ENVELOPE):
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'], **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException, e:
LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# Our real method is curried into msg['args']
child_ctx = RpcContext.unmarshal(msg[0])
response = ConsumerBase.normalize_reply(
self._get_response(child_ctx, proxy, topic, msg[1]),
ctx.replies)
LOG.debug(_("Sending reply"))
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
}, _msg_id=msg_id)
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
# Method starting with - are
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_("RPC message did not include method."))
return
# Internal method
# uses internal context for safety.
if method == '-reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
proxy.dispatch(ctx, data['version'],
data['method'], **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""
A consumer class implementing a
centralized casting broker (PULL-PUSH)
for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.mapping = {}
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
zmq_type_out=None, in_bind=True, out_bind=True,
subscribe=None):
LOG.info(_("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
if not out_addr:
return
if zmq_type_out not in (zmq.PUSH, zmq.PUB):
raise RPCException("Bad output socktype")
# Items push out.
outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
self.mapping[inq] = outq
self.mapping[outq] = inq
self.sockets.append(outq)
LOG.info(_("Out reactor registered"))
def consume_in_thread(self):
def _consume(sock):
LOG.info(_("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""
A consumer class implementing a
topic-based proxy, forwarding to
IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
topic = data[1]
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
try:
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data)
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
{'data': data})
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
{'data': data})
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service"""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
if not os.path.isdir(ipc_dir):
try:
utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
ipc_dir, run_as_root=True)
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
except utils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create IPC directory %s") %
(ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL,
out_bind=True)
except zmq.ZMQError:
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = i.next()
h[k] = i.next()
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""
A consumer class implementing a
consumer for messages. Can also be
used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
if sock in self.mapping:
LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
'data': data})
self.mapping[sock].send(data)
return
proxy = self.proxies[sock]
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Register with matchmaker.
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
_get_matchmaker().stop_heartbeat()
for topic in self.topics:
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
_get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, topic, msg, timeout=None,
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'context': mcontext,
'topic': reply_topic,
'msg': [mcontext, msg]
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
_cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
except (IndexError, KeyError):
raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""
Wraps the sending of messages,
dispatches to the matchmaker and sends
message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results
if len(queues) == 0:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, envelope):
"""
Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic = topic.replace('.', '-')
cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
matchmaker = importutils.import_object(
CONF.rpc_zmq_matchmaker, *args, **kwargs)
return matchmaker
| apache-2.0 |
wanghq/goots | doc/ots-python-sdk-2.0.7/ots2/test/sdk_param_unittest.py | 6 | 10511 | #!/bin/python
# -*- coding: utf8 -*-
import logging
import unittest
import exceptions
from ots2.client import *
from ots2.metadata import *
from ots2.error import *
from ots2.test.mock_connection import MockConnection
ENDPOINT = 'http://10.97.204.97:8800'
ACCESSID = 'accessid'
ACCESSKEY = 'accesskey'
INSTANCENAME = 'instancename'
class SDKParamTest(unittest.TestCase):
def setUp(self):
logger = logging.getLogger('test')
handler=logging.FileHandler("test.log")
formatter = logging.Formatter("[%(asctime)s] [%(process)d] [%(levelname)s] " \
"[%(filename)s:%(lineno)s] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
OTSClient.connection_pool_class = MockConnection
self.ots_client = OTSClient(ENDPOINT, ACCESSID, ACCESSKEY, INSTANCENAME, logger_name='test')
def tearDown(self):
pass
def test_list_table(self):
try:
self.ots_client.list_table('one');
self.assertTrue(False)
except TypeError:
pass
def test_create_table(self):
try:
self.ots_client.create_table('one', 'two', 'three')
self.assertTrue(False)
except TypeError:
pass
try:
table_meta = TableMeta('test_table', ['PK1', 'STRING'])
capacity_unit = CapacityUnit(10, 10)
self.ots_client.create_table(table_meta, capacity_unit)
self.assertTrue(False)
except OTSClientError:
pass
try:
table_meta = TableMeta('test_table', [('PK1', 'STRING'), ('PK2', 'INTEGER')])
capacity_unit = CapacityUnit(10, None)
self.ots_client.create_table(table_meta, capacity_unit)
self.assertTrue(False)
except OTSClientError:
pass
try:
capacity_unit = CapacityUnit(10, 10)
self.ots_client.create_table('test_table', capacity_unit)
self.assertTrue(False)
except OTSClientError:
pass
try:
table_meta = TableMeta('test_table', [('PK1', 'STRING'), ('PK2', 'INTEGER')])
self.ots_client.create_table(table_meta, [1, 2])
self.assertTrue(False)
except OTSClientError:
pass
def test_delete_table(self):
try:
self.ots_client.delete_table('one', 'two')
self.assertTrue(False)
except:
pass
try:
capacity_unit = CapacityUnit(10, 10)
self.ots_client.delete_table(capacity_unit)
self.assertTrue(False)
except OTSClientError:
pass
def test_update_table(self):
try:
self.ots_client.update_table('one', 'two', 'three')
self.assertTrue(False)
except:
pass
try:
self.ots_client.update_table('test_table', (10, 10))
self.assertTrue(False)
except OTSClientError:
pass
try:
capacity_unit = CapacityUnit(None, None)
self.ots_client.update_table('test_table', capacity_unit)
self.assertTrue(False)
except OTSClientError:
pass
def test_describe_table(self):
try:
self.ots_client.describe_table('one', 'two')
self.assertTrue(False)
except:
pass
try:
response = self.ots_client.describe_table(['test_table'])
self.assertTrue(False)
except OTSClientError:
pass
def test_put_row(self):
try:
self.ots_client.put_row('one', 'two')
self.assertTrue(False)
except:
pass
try:
primary_key = {'PK1':'hello', 'PK2':100}
attribute_columns = {'COL1':'world', 'COL2':1000}
condition = Condition('InvalidCondition')
consumed = self.ots_client.put_row('test_table', condition, primary_key, attribute_columns)
self.assertTrue(False)
except OTSClientError:
pass
try:
primary_key = {'PK1':'hello', 'PK2':100}
attribute_columns = {'COL1':'world', 'COL2':1000}
consumed = self.ots_client.put_row('test_table', ['IGNORE'], primary_key, attribute_columns)
self.assertTrue(False)
except:
pass
try:
condition = Condition('IGNORE')
consumed = self.ots_client.put_row('test_table', condition, 'primary_key', 'attribute_columns')
self.assertTrue(False)
except:
pass
def test_get_row(self):
try:
self.ots_client.get_row('one', 'two')
self.assertTrue(False)
except:
pass
try:
consumed, resp_pks, resp_attribute_columns = self.ots_client.get_row('test_table', 'primary_key', 'columns_to_get')
self.assertTrue(False)
except:
pass
def test_update_row(self):
try:
self.ots_client.update_row('one', 'two', 'three')
self.assertTrue(False)
except:
pass
try:
condition = Condition('IGNORE')
consumed = self.ots_client.update_row('test_table', condition, {'PK1' : 'STRING', 'PK2' : 'INTEGER'}, 'update_of_attribute_columns')
self.assertTrue(False)
except OTSClientError as e:
pass
try:
condition = Condition('IGNORE')
consumed = self.ots_client.update_row('test_table', condition, {'PK1' : 'STRING', 'PK2' : 'INTEGER'}, {'ncv' : 1})
self.assertTrue(False)
except OTSClientError as e:
pass
try:
condition = Condition('IGNORE')
consumed = self.ots_client.update_row('test_table', condition, {'PK1' : 'STRING', 'PK2' : 'INTEGER'}, {'put' : []})
self.assertTrue(False)
except OTSClientError as e:
pass
try:
condition = Condition('IGNORE')
consumed = self.ots_client.update_row('test_table', condition, {'PK1' : 'STRING', 'PK2' : 'INTEGER'}, {'delete' : {}})
self.assertTrue(False)
except OTSClientError as e:
pass
def test_delete_row(self):
try:
self.ots_client.delete_row('one', 'two', 'three', 'four')
self.assertTrue(False)
except:
pass
try:
condition = Condition('IGNORE')
consumed = self.ots_client.delete_row('test_table', condition, 'primary_key')
self.assertTrue(False)
except:
pass
def test_batch_get_row(self):
try:
self.ots_client.batch_get_row('one', 'two')
self.assertTrue(False)
except:
pass
try:
response = self.ots_client.batch_get_row('batches')
self.assertTrue(False)
except OTSClientError:
pass
def test_batch_write_row(self):
try:
self.ots_client.batch_write_row('one', 'two')
self.assertTrue(False)
except:
pass
try:
response = self.ots_client.batch_write_row('batches')
self.assertTrue(False)
except OTSClientError:
pass
batch_list = [('test_table')]
try:
response = self.ots_client.batch_write_row(batch_list)
self.assertTrue(False)
except OTSClientError:
pass
batch_list = [{'table_name':None}]
try:
response = self.ots_client.batch_write_row(batch_list)
self.assertTrue(False)
except OTSClientError:
pass
batch_list = [{'table_name':'abc', 'put':None}]
try:
response = self.ots_client.batch_write_row(batch_list)
self.assertTrue(False)
except OTSClientError:
pass
batch_list = [{'table_name':'abc', 'put':['xxx']}]
try:
response = self.ots_client.batch_write_row(batch_list)
self.assertTrue(False)
except OTSClientError:
pass
batch_list = [{'table_name':'abc', 'Put':[]}]
try:
response = self.ots_client.batch_write_row(batch_list)
self.assertTrue(False)
except OTSClientError:
pass
batch_list = [{'table_name':'abc', 'Any':[]}]
try:
response = self.ots_client.batch_write_row(batch_list)
self.assertTrue(False)
except OTSClientError:
pass
def test_get_range(self):
try:
self.ots_client.get_range('one', 'two')
self.assertTrue(False)
except:
pass
try:
start_primary_key = {'PK1':'hello','PK2':100}
end_primary_key = {'PK1':INF_MAX,'PK2':INF_MIN}
columns_to_get = ['COL1','COL2']
response = self.ots_client.get_range('table_name', 'InvalidDirection',
start_primary_key, end_primary_key,
columns_to_get, limit=100
)
self.assertTrue(False)
except OTSClientError:
pass
try:
start_primary_key = ['PK1','hello','PK2',100]
end_primary_key = {'PK1':INF_MAX, 'PK2':INF_MIN}
columns_to_get = ['COL1', 'COL2']
response = self.ots_client.get_range('table_name', 'FORWARD',
start_primary_key, end_primary_key,
columns_to_get, limit=100
)
self.assertTrue(False)
except:
pass
try:
response = self.ots_client.get_range('table_name', 'FORWARD',
'primary_key', 'primary_key', 'columns_to_get', 100)
self.assertTrue(False)
except:
pass
def test_xget_range(self):
try:
self.ots_client.xget_range('one', 'two')
self.assertTrue(False)
except:
pass
try:
iter = self.ots_client.xget_range('one', 'two', 'three', 'four', 'five', 'six', 'seven')
iter.next()
self.assertTrue(False)
except OTSClientError:
pass
if __name__ == '__main__':
unittest.main()
| mit |
canhhs91/greenpointtrees | src/oscar/apps/search/app.py | 25 | 1040 | from django.conf.urls import url
from haystack.views import search_view_factory
from oscar.apps.search import facets
from oscar.core.application import Application
from oscar.core.loading import get_class
class SearchApplication(Application):
name = 'search'
search_view = get_class('search.views', 'FacetedSearchView')
search_form = get_class('search.forms', 'SearchForm')
def get_urls(self):
# The form class has to be passed to the __init__ method as that is how
# Haystack works. It's slightly different to normal CBVs.
urlpatterns = [
url(r'^$', search_view_factory(
view_class=self.search_view,
form_class=self.search_form,
searchqueryset=self.get_sqs()),
name='search'),
]
return self.post_process_urls(urlpatterns)
def get_sqs(self):
"""
Return the SQS required by a the Haystack search view
"""
return facets.base_sqs()
application = SearchApplication()
| mit |
YoeriDijkstra/iFlow | packages/numerical2DV/salinity_prognostic/sclosureFunction.py | 1 | 1641 | """
sclosureFunction
Date: 11-Jan-16
Authors: Y.M. Dijkstra
"""
import numpy as np
import nifty as ny
def sclosureFunction((Q, AK), F, Fopen, Fclosed, data):
jmax = data.v('grid', 'maxIndex', 'x')
nRHS = F.shape[-1]
A = np.zeros((jmax+1, jmax+1))
##### LEFT-HAND SIDE #####
x = ny.dimensionalAxis(data.slice('grid'), 'x')[:, 0, 0]
dx = x[1:]-x[:-1]
A[range(0, jmax), range(0, jmax)] = +AK[:-1]/dx # main diagonal
A[range(0, jmax), range(1, jmax+1)] = Q[:-1]-AK[1:]/dx # upper diagonal
# BC closed end
A[-1, -1] = -AK[-1]
A[-1, 0] = AK[0]
##### RIGHT-HAND SIDE #####
sRHS = np.zeros((jmax+1, nRHS))
sRHS[:-1, :] = F[:-1, :]
sRHS[-1, :] = Q[0]*Fopen - Q[-1]*Fclosed + ny.integrate(F, 'x', 0, jmax, data.slice('grid'))
##### SOLVE #####
Sx = np.zeros((jmax+1, 1, 1, nRHS))
Sx[:, 0, 0, :] = np.linalg.solve(A, sRHS)
##### INTEGRATE ######
# integrate from back to front to make sure that the landward BC is guaranteed
S = ny.integrate(Sx, 'x', jmax, range(0, jmax+1), data.slice('grid')) + Fclosed
##### CORRECTION ######
# Integration errors may cause the solution to not satisfy the boundary conditions.
# By the definition of integration here, the landward boundary condition is satisfied, but the seaward condition is not
# Apply a correction that scales the salinity profile
#for i in range(0, nRHS):
# if S[0, 0, 0, i]-Fclosed[0, i] != 0:
# S[:, 0, 0, i] = (Fopen[0, i]-Fclosed[0, i])/(S[0, 0, 0, i]-Fclosed[0, i])*(S[:, 0, 0, i]-Fclosed[0, i])+Fclosed[0, i]
return S[:, 0, 0, :], Sx[:, 0, 0, :]
| lgpl-3.0 |
40223119/w17test | static/Brython3.1.1-20150328-091302/Lib/importlib/abc.py | 743 | 14595 | """Abstract base classes related to import."""
from . import _bootstrap
from . import machinery
try:
import _frozen_importlib
except ImportError as exc:
if exc.name != '_frozen_importlib':
raise
_frozen_importlib = None
import abc
import imp
import marshal
import sys
import tokenize
import warnings
def _register(abstract_cls, *classes):
for cls in classes:
abstract_cls.register(cls)
if _frozen_importlib is not None:
frozen_cls = getattr(_frozen_importlib, cls.__name__)
abstract_cls.register(frozen_cls)
class Finder(metaclass=abc.ABCMeta):
"""Legacy abstract base class for import finders.
It may be subclassed for compatibility with legacy third party
reimplementations of the import system. Otherwise, finder
implementations should derive from the more specific MetaPathFinder
or PathEntryFinder ABCs.
"""
@abc.abstractmethod
def find_module(self, fullname, path=None):
"""An abstract method that should find a module.
The fullname is a str and the optional path is a str or None.
Returns a Loader object.
"""
raise NotImplementedError
class MetaPathFinder(Finder):
"""Abstract base class for import finders on sys.meta_path."""
@abc.abstractmethod
def find_module(self, fullname, path):
"""Abstract method which, when implemented, should find a module.
The fullname is a str and the path is a str or None.
Returns a Loader object.
"""
raise NotImplementedError
def invalidate_caches(self):
"""An optional method for clearing the finder's cache, if any.
This method is used by importlib.invalidate_caches().
"""
return NotImplemented
_register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter,
machinery.PathFinder, machinery.WindowsRegistryFinder)
class PathEntryFinder(Finder):
"""Abstract base class for path entry finders used by PathFinder."""
@abc.abstractmethod
def find_loader(self, fullname):
"""Abstract method which, when implemented, returns a module loader.
The fullname is a str. Returns a 2-tuple of (Loader, portion) where
portion is a sequence of file system locations contributing to part of
a namespace package. The sequence may be empty and the loader may be
None.
"""
raise NotImplementedError
find_module = _bootstrap._find_module_shim
def invalidate_caches(self):
"""An optional method for clearing the finder's cache, if any.
This method is used by PathFinder.invalidate_caches().
"""
return NotImplemented
_register(PathEntryFinder, machinery.FileFinder)
class Loader(metaclass=abc.ABCMeta):
"""Abstract base class for import loaders."""
@abc.abstractmethod
def load_module(self, fullname):
"""Abstract method which when implemented should load a module.
The fullname is a str."""
raise NotImplementedError
@abc.abstractmethod
def module_repr(self, module):
"""Abstract method which when implemented calculates and returns the
given module's repr."""
raise NotImplementedError
class ResourceLoader(Loader):
"""Abstract base class for loaders which can return data from their
back-end storage.
This ABC represents one of the optional protocols specified by PEP 302.
"""
@abc.abstractmethod
def get_data(self, path):
"""Abstract method which when implemented should return the bytes for
the specified path. The path must be a str."""
raise NotImplementedError
class InspectLoader(Loader):
"""Abstract base class for loaders which support inspection about the
modules they can load.
This ABC represents one of the optional protocols specified by PEP 302.
"""
@abc.abstractmethod
def is_package(self, fullname):
"""Abstract method which when implemented should return whether the
module is a package. The fullname is a str. Returns a bool."""
raise NotImplementedError
@abc.abstractmethod
def get_code(self, fullname):
"""Abstract method which when implemented should return the code object
for the module. The fullname is a str. Returns a types.CodeType."""
raise NotImplementedError
@abc.abstractmethod
def get_source(self, fullname):
"""Abstract method which should return the source code for the
module. The fullname is a str. Returns a str."""
raise NotImplementedError
_register(InspectLoader, machinery.BuiltinImporter, machinery.FrozenImporter,
machinery.ExtensionFileLoader)
class ExecutionLoader(InspectLoader):
"""Abstract base class for loaders that wish to support the execution of
modules as scripts.
This ABC represents one of the optional protocols specified in PEP 302.
"""
@abc.abstractmethod
def get_filename(self, fullname):
"""Abstract method which should return the value that __file__ is to be
set to."""
raise NotImplementedError
class FileLoader(_bootstrap.FileLoader, ResourceLoader, ExecutionLoader):
"""Abstract base class partially implementing the ResourceLoader and
ExecutionLoader ABCs."""
_register(FileLoader, machinery.SourceFileLoader,
machinery.SourcelessFileLoader)
class SourceLoader(_bootstrap.SourceLoader, ResourceLoader, ExecutionLoader):
"""Abstract base class for loading source code (and optionally any
corresponding bytecode).
To support loading from source code, the abstractmethods inherited from
ResourceLoader and ExecutionLoader need to be implemented. To also support
loading from bytecode, the optional methods specified directly by this ABC
is required.
Inherited abstractmethods not implemented in this ABC:
* ResourceLoader.get_data
* ExecutionLoader.get_filename
"""
def path_mtime(self, path):
"""Return the (int) modification time for the path (str)."""
if self.path_stats.__func__ is SourceLoader.path_stats:
raise NotImplementedError
return int(self.path_stats(path)['mtime'])
def path_stats(self, path):
"""Return a metadata dict for the source pointed to by the path (str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
"""
if self.path_mtime.__func__ is SourceLoader.path_mtime:
raise NotImplementedError
return {'mtime': self.path_mtime(path)}
def set_data(self, path, data):
"""Write the bytes to the path (if possible).
Accepts a str path and data as bytes.
Any needed intermediary directories are to be created. If for some
reason the file cannot be written because of permissions, fail
silently.
"""
raise NotImplementedError
_register(SourceLoader, machinery.SourceFileLoader)
class PyLoader(SourceLoader):
"""Implement the deprecated PyLoader ABC in terms of SourceLoader.
This class has been deprecated! It is slated for removal in Python 3.4.
If compatibility with Python 3.1 is not needed then implement the
SourceLoader ABC instead of this class. If Python 3.1 compatibility is
needed, then use the following idiom to have a single class that is
compatible with Python 3.1 onwards::
try:
from importlib.abc import SourceLoader
except ImportError:
from importlib.abc import PyLoader as SourceLoader
class CustomLoader(SourceLoader):
def get_filename(self, fullname):
# Implement ...
def source_path(self, fullname):
'''Implement source_path in terms of get_filename.'''
try:
return self.get_filename(fullname)
except ImportError:
return None
def is_package(self, fullname):
filename = os.path.basename(self.get_filename(fullname))
return os.path.splitext(filename)[0] == '__init__'
"""
@abc.abstractmethod
def is_package(self, fullname):
raise NotImplementedError
@abc.abstractmethod
def source_path(self, fullname):
"""Abstract method. Accepts a str module name and returns the path to
the source code for the module."""
raise NotImplementedError
def get_filename(self, fullname):
"""Implement get_filename in terms of source_path.
As get_filename should only return a source file path there is no
chance of the path not existing but loading still being possible, so
ImportError should propagate instead of being turned into returning
None.
"""
warnings.warn("importlib.abc.PyLoader is deprecated and is "
"slated for removal in Python 3.4; "
"use SourceLoader instead. "
"See the importlib documentation on how to be "
"compatible with Python 3.1 onwards.",
DeprecationWarning)
path = self.source_path(fullname)
if path is None:
raise ImportError(name=fullname)
else:
return path
class PyPycLoader(PyLoader):
"""Abstract base class to assist in loading source and bytecode by
requiring only back-end storage methods to be implemented.
This class has been deprecated! Removal is slated for Python 3.4. Implement
the SourceLoader ABC instead. If Python 3.1 compatibility is needed, see
PyLoader.
The methods get_code, get_source, and load_module are implemented for the
user.
"""
def get_filename(self, fullname):
"""Return the source or bytecode file path."""
path = self.source_path(fullname)
if path is not None:
return path
path = self.bytecode_path(fullname)
if path is not None:
return path
raise ImportError("no source or bytecode path available for "
"{0!r}".format(fullname), name=fullname)
def get_code(self, fullname):
"""Get a code object from source or bytecode."""
warnings.warn("importlib.abc.PyPycLoader is deprecated and slated for "
"removal in Python 3.4; use SourceLoader instead. "
"If Python 3.1 compatibility is required, see the "
"latest documentation for PyLoader.",
DeprecationWarning)
source_timestamp = self.source_mtime(fullname)
# Try to use bytecode if it is available.
bytecode_path = self.bytecode_path(fullname)
if bytecode_path:
data = self.get_data(bytecode_path)
try:
magic = data[:4]
if len(magic) < 4:
raise ImportError(
"bad magic number in {}".format(fullname),
name=fullname, path=bytecode_path)
raw_timestamp = data[4:8]
if len(raw_timestamp) < 4:
raise EOFError("bad timestamp in {}".format(fullname))
pyc_timestamp = _bootstrap._r_long(raw_timestamp)
raw_source_size = data[8:12]
if len(raw_source_size) != 4:
raise EOFError("bad file size in {}".format(fullname))
# Source size is unused as the ABC does not provide a way to
# get the size of the source ahead of reading it.
bytecode = data[12:]
# Verify that the magic number is valid.
if imp.get_magic() != magic:
raise ImportError(
"bad magic number in {}".format(fullname),
name=fullname, path=bytecode_path)
# Verify that the bytecode is not stale (only matters when
# there is source to fall back on.
if source_timestamp:
if pyc_timestamp < source_timestamp:
raise ImportError("bytecode is stale", name=fullname,
path=bytecode_path)
except (ImportError, EOFError):
# If source is available give it a shot.
if source_timestamp is not None:
pass
else:
raise
else:
# Bytecode seems fine, so try to use it.
return marshal.loads(bytecode)
elif source_timestamp is None:
raise ImportError("no source or bytecode available to create code "
"object for {0!r}".format(fullname),
name=fullname)
# Use the source.
source_path = self.source_path(fullname)
if source_path is None:
message = "a source path must exist to load {0}".format(fullname)
raise ImportError(message, name=fullname)
source = self.get_data(source_path)
code_object = compile(source, source_path, 'exec', dont_inherit=True)
# Generate bytecode and write it out.
if not sys.dont_write_bytecode:
data = bytearray(imp.get_magic())
data.extend(_bootstrap._w_long(source_timestamp))
data.extend(_bootstrap._w_long(len(source) & 0xFFFFFFFF))
data.extend(marshal.dumps(code_object))
self.write_bytecode(fullname, data)
return code_object
@abc.abstractmethod
def source_mtime(self, fullname):
"""Abstract method. Accepts a str filename and returns an int
modification time for the source of the module."""
raise NotImplementedError
@abc.abstractmethod
def bytecode_path(self, fullname):
"""Abstract method. Accepts a str filename and returns the str pathname
to the bytecode for the module."""
raise NotImplementedError
@abc.abstractmethod
def write_bytecode(self, fullname, bytecode):
"""Abstract method. Accepts a str filename and bytes object
representing the bytecode for the module. Returns a boolean
representing whether the bytecode was written or not."""
raise NotImplementedError
| gpl-3.0 |
Theer108/invenio | invenio/ext/assets/extensions.py | 7 | 9749 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Custom `Jinja2` extensions."""
import copy
import os
import warnings
from flask import _request_ctx_stack, current_app
from flask_assets import Environment, FlaskResolver
from invenio.utils.deprecation import RemovedInInvenio21Warning
from jinja2 import nodes
from jinja2.ext import Extension
import six
from webassets.bundle import is_url
from . import registry
class BundleExtension(Extension):
"""
Jinja extension for css and js bundles.
Definition of the required bundles.
.. code-block:: jinja
{%- bundles "jquery.js", "invenio.css" -%}
{%- bundle "require.js" -%}
Usage.
.. code-block:: jinja
{%- for bundle in get_bundle('js') %}
<!-- {{ bundle.output }} -->
{%- assets bundle %}
<script type="text/javascript" src="{{ ASSET_URL }}"></script>
{%- endassets %}
{%- endfor %}
</body>
</html>
"""
tags = set(('bundle', 'bundles'))
@classmethod
def storage(cls):
"""Store used bundles on request context stack."""
ctx = _request_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, "_bundles"):
setattr(ctx, "_bundles", set())
return ctx._bundles
@classmethod
def install(cls, app):
"""Install the extension into the application."""
Environment.resolver_class = InvenioResolver
env = Environment(app)
env.url = "{0}/{1}/".format(app.static_url_path,
app.config["ASSETS_BUNDLES_DIR"])
env.directory = os.path.join(app.static_folder,
app.config["ASSETS_BUNDLES_DIR"])
env.append_path(app.static_folder)
env.auto_build = app.config.get("ASSETS_AUTO_BUILD", True)
# The filters less and requirejs don't have the same behaviour by
# default. Make sure we are respecting that.
app.config.setdefault("LESS_RUN_IN_DEBUG", True)
app.config.setdefault("REQUIREJS_RUN_IN_DEBUG", False)
# Fixing some paths as we forced the output directory with the
# .directory
app.config.setdefault("REQUIREJS_BASEURL", app.static_folder)
requirejs_config = os.path.join(env.directory,
app.config["REQUIREJS_CONFIG"])
if not os.path.exists(requirejs_config):
app.config["REQUIREJS_CONFIG"] = os.path.relpath(
os.path.join(app.static_folder,
app.config["REQUIREJS_CONFIG"]),
env.directory)
app.jinja_env.add_extension(BundleExtension)
app.context_processor(BundleExtension.inject)
@classmethod
def inject(cls):
"""Inject the get_bundle function into the jinja templates."""
_bundles = {}
def get_bundle(suffix):
# lazy build the bundles
if not _bundles:
for pkg, bundle in registry.bundles:
if bundle.output in _bundles:
raise ValueError("{0} was already defined!"
.format(bundle.output))
_bundles[bundle.output] = bundle
env = current_app.jinja_env.assets_environment
# disable the compilation in debug mode iff asked.
less_debug = env.debug and \
not current_app.config.get("LESS_RUN_IN_DEBUG")
if less_debug:
warnings.warn("LESS_RUN_IN_DEBUG has been deprecated",
RemovedInInvenio21Warning)
requirejs_debug = env.debug and \
not current_app.config.get("REQUIREJS_RUN_IN_DEBUG")
static_url_path = current_app.static_url_path + "/"
bundles = []
for bundle_name in cls.storage():
if bundle_name.endswith(suffix):
bundle = _bundles[bundle_name]
if suffix == "css":
bundle.extra.update(rel="stylesheet")
bundles.append((bundle.weight, bundle))
from webassets.filter import option
def option__deepcopy__(value, memo):
"""Custom deepcopy implementation for ``option`` class."""
return option(copy.deepcopy(value[0]),
copy.deepcopy(value[1]),
copy.deepcopy(value[2]))
option.__deepcopy__ = option__deepcopy__
for _, bundle in sorted(bundles):
# A little bit of madness to read the "/" at the
# beginning of the assets in ran in debug mode as well as
# killing the filters if they are not wanted in debug mode.
if env.debug:
# Create a deep copy to avoid filter removal from
# being cached
bundle_copy = copy.deepcopy(bundle)
bundle_copy.extra.update(static_url_path=static_url_path)
if bundle.has_filter("less"):
if less_debug:
bundle_copy.filters = None
bundle_copy.extra.update(rel="stylesheet/less")
else:
bundle_copy.extra.update(static_url_path="")
if bundle.has_filter("requirejs"):
if requirejs_debug:
bundle_copy.filters = None
else:
bundle_copy.extra.update(static_url_path="")
yield bundle_copy
else:
yield bundle
return dict(get_bundle=get_bundle)
def __init__(self, environment):
"""Initialize the extension."""
super(BundleExtension, self).__init__(environment)
def _update(self, filename, bundles, caller):
"""Update the environment bundles.
:return: empty html or html comment in debug mode.
:rtype: str
"""
self.storage().update(bundles)
if current_app.debug:
return "<!-- {0}: {1} -->\n".format(filename, ", ".join(bundles))
else:
return ''
def parse(self, parser):
"""Parse the bundles block and feed the bundles environment.
Bundles entries are replaced by an empty string.
"""
lineno = next(parser.stream).lineno
bundles = []
while parser.stream.current.type != "block_end":
value = parser.parse_expression()
bundles.append(value)
parser.stream.skip_if("comma")
call = self.call_method("_update", args=[nodes.Const(parser.name),
nodes.List(bundles)])
call_block = nodes.CallBlock(call, [], [], '')
call_block.set_lineno(lineno)
return call_block
class InvenioResolver(FlaskResolver):
"""Custom resource resolver for webassets."""
def resolve_source(self, ctx, item):
"""Return the absolute path of the resource."""
if not isinstance(item, six.string_types) or is_url(item):
return item
if item.startswith(ctx.url):
item = item[len(ctx.url):]
return self.search_for_source(ctx, item)
def resolve_source_to_url(self, ctx, filepath, item):
"""Return the url of the resource.
Displaying them as is in debug mode as the web server knows where to
search for them.
:py:meth:`webassets.env.Resolver.resolve_source_to_url`
"""
if ctx.debug:
return item
return super(InvenioResolver, self).resolve_source_to_url(ctx,
filepath,
item)
def search_for_source(self, ctx, item):
"""Return absolute path of the resource.
:py:meth:`webassets.env.Resolver.search_for_source`
:param ctx: environment
:param item: resource filename
:return: absolute path
"""
try:
if ctx.load_path:
abspath = super(InvenioResolver, self) \
.search_load_path(ctx, item)
else:
abspath = super(InvenioResolver, self) \
.search_env_directory(ctx, item)
except Exception: # FIXME do not catch all!
# If a file is missing in production (non-debug mode), we want
# to not break and will use /dev/null instead. The exception
# is caught and logged.
if not current_app.debug:
error = "Error loading asset file: {0}".format(item)
current_app.logger.exception(error)
abspath = "/dev/null"
else:
raise
return abspath
| gpl-2.0 |
jumping/Diamond | src/collectors/bind/bind.py | 28 | 5681 | # coding=utf-8
"""
Collects stats from bind 9.5's statistics server
#### Dependencies
* [bind 9.5](http://www.isc.org/software/bind/new-features/9.5)
configured with libxml2 and statistics-channels
"""
import diamond.collector
import sys
import urllib2
if sys.version_info >= (2, 5):
import xml.etree.cElementTree as ElementTree
else:
import cElementTree as ElementTree
class BindCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(BindCollector, self).get_default_config_help()
config_help.update({
'host': "",
'port': "",
'publish': "Available stats:\n" +
" - resolver (Per-view resolver and cache statistics)\n" +
" - server (Incoming requests and their answers)\n" +
" - zonemgmt (Zone management requests/responses)\n" +
" - sockets (Socket statistics)\n" +
" - memory (Global memory usage)\n",
'publish_view_bind': "",
'publish_view_meta': "",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(BindCollector, self).get_default_config()
config.update({
'host': 'localhost',
'port': 8080,
'path': 'bind',
# Available stats:
# - resolver (Per-view resolver and cache statistics)
# - server (Incoming requests and their answers)
# - zonemgmt (Requests/responses related to zone management)
# - sockets (Socket statistics)
# - memory (Global memory usage)
'publish': [
'resolver',
'server',
'zonemgmt',
'sockets',
'memory',
],
# By default we don't publish these special views
'publish_view_bind': False,
'publish_view_meta': False,
})
return config
def clean_counter(self, name, value):
value = self.derivative(name, value)
if value < 0:
value = 0
self.publish(name, value)
def collect(self):
try:
req = urllib2.urlopen('http://%s:%d/' % (
self.config['host'], int(self.config['port'])))
except Exception, e:
self.log.error('Couldnt connect to bind: %s', e)
return {}
tree = ElementTree.parse(req)
if not tree:
raise ValueError("Corrupt XML file, no statistics found")
root = tree.find('bind/statistics')
if 'resolver' in self.config['publish']:
for view in root.findall('views/view'):
name = view.find('name').text
if name == '_bind' and not self.config['publish_view_bind']:
continue
if name == '_meta' and not self.config['publish_view_meta']:
continue
nzones = len(view.findall('zones/zone'))
self.publish('view.%s.zones' % name, nzones)
for counter in view.findall('rdtype'):
self.clean_counter(
'view.%s.query.%s' % (name,
counter.find('name').text),
int(counter.find('counter').text)
)
for counter in view.findall('resstat'):
self.clean_counter(
'view.%s.resstat.%s' % (name,
counter.find('name').text),
int(counter.find('counter').text)
)
for counter in view.findall('cache/rrset'):
self.clean_counter(
'view.%s.cache.%s' % (
name, counter.find('name').text.replace('!',
'NOT_')),
int(counter.find('counter').text)
)
if 'server' in self.config['publish']:
for counter in root.findall('server/requests/opcode'):
self.clean_counter(
'requests.%s' % counter.find('name').text,
int(counter.find('counter').text)
)
for counter in root.findall('server/queries-in/rdtype'):
self.clean_counter(
'queries.%s' % counter.find('name').text,
int(counter.find('counter').text)
)
for counter in root.findall('server/nsstat'):
self.clean_counter(
'nsstat.%s' % counter.find('name').text,
int(counter.find('counter').text)
)
if 'zonemgmt' in self.config['publish']:
for counter in root.findall('server/zonestat'):
self.clean_counter(
'zonestat.%s' % counter.find('name').text,
int(counter.find('counter').text)
)
if 'sockets' in self.config['publish']:
for counter in root.findall('server/sockstat'):
self.clean_counter(
'sockstat.%s' % counter.find('name').text,
int(counter.find('counter').text)
)
if 'memory' in self.config['publish']:
for counter in root.find('memory/summary').getchildren():
self.publish(
'memory.%s' % counter.tag,
int(counter.text)
)
| mit |
phlax/pootle | pootle/apps/pootle_format/abstracts.py | 5 | 1182 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django.db import models
from pootle.i18n.gettext import ugettext_lazy as _
class AbstractFileExtension(models.Model):
class Meta(object):
abstract = True
def __str__(self):
return self.name
name = models.CharField(
'Format filetype extension',
max_length=15,
unique=True,
db_index=True)
class AbstractFormat(models.Model):
class Meta(object):
abstract = True
unique_together = ["title", "extension"]
name = models.CharField(
_('Format name'),
max_length=30,
unique=True,
db_index=True)
title = models.CharField(
_('Format title'),
max_length=255,
db_index=False)
enabled = models.BooleanField(
verbose_name=_('Enabled'), default=True)
monolingual = models.BooleanField(
verbose_name=_('Monolingual format'), default=False)
| gpl-3.0 |
dudepare/django | tests/custom_managers/tests.py | 178 | 23098 | from __future__ import unicode_literals
from django.db import models
from django.test import TestCase
from django.utils import six
from .models import (
Book, Car, CustomManager, CustomQuerySet, DeconstructibleCustomManager,
FunPerson, OneToOneRestrictedModel, Person, PersonFromAbstract,
PersonManager, PublishedBookManager, RelatedModel, RestrictedModel,
)
class CustomManagerTests(TestCase):
custom_manager_names = [
'custom_queryset_default_manager',
'custom_queryset_custom_manager',
]
@classmethod
def setUpTestData(cls):
cls.b1 = Book.published_objects.create(
title="How to program", author="Rodney Dangerfield", is_published=True)
cls.b2 = Book.published_objects.create(
title="How to be smart", author="Albert Einstein", is_published=False)
cls.p1 = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
cls.droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False)
def test_custom_manager_basic(self):
"""
Test a custom Manager method.
"""
self.assertQuerysetEqual(
Person.objects.get_fun_people(), [
"Bugs Bunny"
],
six.text_type
)
def test_queryset_copied_to_default(self):
"""
The methods of a custom QuerySet are properly copied onto the
default Manager.
"""
for manager_name in self.custom_manager_names:
manager = getattr(Person, manager_name)
# Public methods are copied
manager.public_method()
# Private methods are not copied
with self.assertRaises(AttributeError):
manager._private_method()
def test_manager_honors_queryset_only(self):
for manager_name in self.custom_manager_names:
manager = getattr(Person, manager_name)
# Methods with queryset_only=False are copied even if they are private.
manager._optin_private_method()
# Methods with queryset_only=True aren't copied even if they are public.
with self.assertRaises(AttributeError):
manager.optout_public_method()
def test_manager_use_queryset_methods(self):
"""
Custom manager will use the queryset methods
"""
for manager_name in self.custom_manager_names:
manager = getattr(Person, manager_name)
queryset = manager.filter()
self.assertQuerysetEqual(queryset, ["Bugs Bunny"], six.text_type)
self.assertEqual(queryset._filter_CustomQuerySet, True)
# Test that specialized querysets inherit from our custom queryset.
queryset = manager.values_list('first_name', flat=True).filter()
self.assertEqual(list(queryset), [six.text_type("Bugs")])
self.assertEqual(queryset._filter_CustomQuerySet, True)
def test_init_args(self):
"""
The custom manager __init__() argument has been set.
"""
self.assertEqual(Person.custom_queryset_custom_manager.init_arg, 'hello')
def test_manager_attributes(self):
"""
Custom manager method is only available on the manager and not on
querysets.
"""
Person.custom_queryset_custom_manager.manager_only()
with self.assertRaises(AttributeError):
Person.custom_queryset_custom_manager.all().manager_only()
def test_queryset_and_manager(self):
"""
Queryset method doesn't override the custom manager method.
"""
queryset = Person.custom_queryset_custom_manager.filter()
self.assertQuerysetEqual(queryset, ["Bugs Bunny"], six.text_type)
self.assertEqual(queryset._filter_CustomManager, True)
def test_related_manager(self):
"""
The related managers extend the default manager.
"""
self.assertIsInstance(self.droopy.books, PublishedBookManager)
self.assertIsInstance(self.b2.authors, PersonManager)
def test_no_objects(self):
"""
The default manager, "objects", doesn't exist, because a custom one
was provided.
"""
self.assertRaises(AttributeError, lambda: Book.objects)
def test_filtering(self):
"""
Custom managers respond to usual filtering methods
"""
self.assertQuerysetEqual(
Book.published_objects.all(), [
"How to program",
],
lambda b: b.title
)
def test_fk_related_manager(self):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
self.assertQuerysetEqual(
self.b1.favorite_books.order_by('first_name').all(), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_people_favorite_books.all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='fun_people').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_gfk_related_manager(self):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
self.assertQuerysetEqual(
self.b1.favorite_things.all(), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_people_favorite_things.all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='fun_people').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_m2m_related_manager(self):
bugs = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.authors.add(bugs)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.authors.add(droopy)
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.fun_authors.add(bugs)
droopy = FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.fun_authors.add(droopy)
self.assertQuerysetEqual(
self.b1.authors.order_by('first_name').all(), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_authors.order_by('first_name').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.authors(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.authors(manager='fun_people').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_removal_through_default_fk_related_manager(self, bulk=True):
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
droopy = FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
self.b1.fun_people_favorite_books.remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.b1.fun_people_favorite_books.remove(bugs, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
bugs.favorite_book = self.b1
bugs.save()
self.b1.fun_people_favorite_books.clear(bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_default_fk_related_manager(self):
self.test_removal_through_default_fk_related_manager(bulk=False)
def test_removal_through_specified_fk_related_manager(self, bulk=True):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
# Check that the fun manager DOESN'T remove boring people.
self.b1.favorite_books(manager='fun_people').remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
# Check that the boring manager DOES remove boring people.
self.b1.favorite_books(manager='boring_people').remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='boring_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
droopy.favorite_book = self.b1
droopy.save()
# Check that the fun manager ONLY clears fun people.
self.b1.favorite_books(manager='fun_people').clear(bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='fun_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_specified_fk_related_manager(self):
self.test_removal_through_specified_fk_related_manager(bulk=False)
def test_removal_through_default_gfk_related_manager(self, bulk=True):
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
droopy = FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
self.b1.fun_people_favorite_things.remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.order_by('first_name').filter(favorite_thing_id=self.b1.pk), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.b1.fun_people_favorite_things.remove(bugs, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.order_by('first_name').filter(favorite_thing_id=self.b1.pk), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
bugs.favorite_book = self.b1
bugs.save()
self.b1.fun_people_favorite_things.clear(bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.order_by('first_name').filter(favorite_thing_id=self.b1.pk), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_default_gfk_related_manager(self):
self.test_removal_through_default_gfk_related_manager(bulk=False)
def test_removal_through_specified_gfk_related_manager(self, bulk=True):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
# Check that the fun manager DOESN'T remove boring people.
self.b1.favorite_things(manager='fun_people').remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
# Check that the boring manager DOES remove boring people.
self.b1.favorite_things(manager='boring_people').remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='boring_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
droopy.favorite_thing = self.b1
droopy.save()
# Check that the fun manager ONLY clears fun people.
self.b1.favorite_things(manager='fun_people').clear(bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='fun_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_specified_gfk_related_manager(self):
self.test_removal_through_specified_gfk_related_manager(bulk=False)
def test_removal_through_default_m2m_related_manager(self):
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.fun_authors.add(bugs)
droopy = FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.fun_authors.add(droopy)
self.b1.fun_authors.remove(droopy)
self.assertQuerysetEqual(
self.b1.fun_authors.through._default_manager.all(), [
"Bugs",
"Droopy",
],
lambda c: c.funperson.first_name,
ordered=False,
)
self.b1.fun_authors.remove(bugs)
self.assertQuerysetEqual(
self.b1.fun_authors.through._default_manager.all(), [
"Droopy",
],
lambda c: c.funperson.first_name,
ordered=False,
)
self.b1.fun_authors.add(bugs)
self.b1.fun_authors.clear()
self.assertQuerysetEqual(
self.b1.fun_authors.through._default_manager.all(), [
"Droopy",
],
lambda c: c.funperson.first_name,
ordered=False,
)
def test_removal_through_specified_m2m_related_manager(self):
bugs = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.authors.add(bugs)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.authors.add(droopy)
# Check that the fun manager DOESN'T remove boring people.
self.b1.authors(manager='fun_people').remove(droopy)
self.assertQuerysetEqual(
self.b1.authors(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
# Check that the boring manager DOES remove boring people.
self.b1.authors(manager='boring_people').remove(droopy)
self.assertQuerysetEqual(
self.b1.authors(manager='boring_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
self.b1.authors.add(droopy)
# Check that the fun manager ONLY clears fun people.
self.b1.authors(manager='fun_people').clear()
self.assertQuerysetEqual(
self.b1.authors(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.authors(manager='fun_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
def test_deconstruct_default(self):
mgr = models.Manager()
as_manager, mgr_path, qs_path, args, kwargs = mgr.deconstruct()
self.assertFalse(as_manager)
self.assertEqual(mgr_path, 'django.db.models.manager.Manager')
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_deconstruct_as_manager(self):
mgr = CustomQuerySet.as_manager()
as_manager, mgr_path, qs_path, args, kwargs = mgr.deconstruct()
self.assertTrue(as_manager)
self.assertEqual(qs_path, 'custom_managers.models.CustomQuerySet')
def test_deconstruct_from_queryset(self):
mgr = DeconstructibleCustomManager('a', 'b')
as_manager, mgr_path, qs_path, args, kwargs = mgr.deconstruct()
self.assertFalse(as_manager)
self.assertEqual(mgr_path, 'custom_managers.models.DeconstructibleCustomManager')
self.assertEqual(args, ('a', 'b',))
self.assertEqual(kwargs, {})
mgr = DeconstructibleCustomManager('x', 'y', c=3, d=4)
as_manager, mgr_path, qs_path, args, kwargs = mgr.deconstruct()
self.assertFalse(as_manager)
self.assertEqual(mgr_path, 'custom_managers.models.DeconstructibleCustomManager')
self.assertEqual(args, ('x', 'y',))
self.assertEqual(kwargs, {'c': 3, 'd': 4})
def test_deconstruct_from_queryset_failing(self):
mgr = CustomManager('arg')
msg = ("Could not find manager BaseCustomManagerFromCustomQuerySet in "
"django.db.models.manager.\n"
"Please note that you need to inherit from managers you "
"dynamically generated with 'from_queryset()'.")
with self.assertRaisesMessage(ValueError, msg):
mgr.deconstruct()
def test_abstract_model_with_custom_manager_name(self):
"""
A custom manager may be defined on an abstract model.
It will be inherited by the abstract model's children.
"""
PersonFromAbstract.abstract_persons.create(objects='Test')
self.assertQuerysetEqual(
PersonFromAbstract.abstract_persons.all(), ["Test"],
lambda c: c.objects,
)
class TestCars(TestCase):
def test_managers(self):
# Each model class gets a "_default_manager" attribute, which is a
# reference to the first manager defined in the class.
Car.cars.create(name="Corvette", mileage=21, top_speed=180)
Car.cars.create(name="Neon", mileage=31, top_speed=100)
self.assertQuerysetEqual(
Car._default_manager.order_by("name"), [
"Corvette",
"Neon",
],
lambda c: c.name
)
self.assertQuerysetEqual(
Car.cars.order_by("name"), [
"Corvette",
"Neon",
],
lambda c: c.name
)
# alternate manager
self.assertQuerysetEqual(
Car.fast_cars.all(), [
"Corvette",
],
lambda c: c.name
)
class CustomManagersRegressTestCase(TestCase):
def test_filtered_default_manager(self):
"""Even though the default manager filters out some records,
we must still be able to save (particularly, save by updating
existing records) those filtered instances. This is a
regression test for #8990, #9527"""
related = RelatedModel.objects.create(name="xyzzy")
obj = RestrictedModel.objects.create(name="hidden", related=related)
obj.name = "still hidden"
obj.save()
# If the hidden object wasn't seen during the save process,
# there would now be two objects in the database.
self.assertEqual(RestrictedModel.plain_manager.count(), 1)
def test_delete_related_on_filtered_manager(self):
"""Deleting related objects should also not be distracted by a
restricted manager on the related object. This is a regression
test for #2698."""
related = RelatedModel.objects.create(name="xyzzy")
for name, public in (('one', True), ('two', False), ('three', False)):
RestrictedModel.objects.create(name=name, is_public=public, related=related)
obj = RelatedModel.objects.get(name="xyzzy")
obj.delete()
# All of the RestrictedModel instances should have been
# deleted, since they *all* pointed to the RelatedModel. If
# the default manager is used, only the public one will be
# deleted.
self.assertEqual(len(RestrictedModel.plain_manager.all()), 0)
def test_delete_one_to_one_manager(self):
# The same test case as the last one, but for one-to-one
# models, which are implemented slightly different internally,
# so it's a different code path.
obj = RelatedModel.objects.create(name="xyzzy")
OneToOneRestrictedModel.objects.create(name="foo", is_public=False, related=obj)
obj = RelatedModel.objects.get(name="xyzzy")
obj.delete()
self.assertEqual(len(OneToOneRestrictedModel.plain_manager.all()), 0)
def test_queryset_with_custom_init(self):
"""
BaseManager.get_queryset() should use kwargs rather than args to allow
custom kwargs (#24911).
"""
qs_custom = Person.custom_init_queryset_manager.all()
qs_default = Person.objects.all()
self.assertQuerysetEqual(qs_custom, qs_default)
| bsd-3-clause |
bwasti/caffe2 | caffe2/experiments/python/convnet_benchmarks.py | 2 | 19876 | ## @package convnet_benchmarks
# Module caffe2.experiments.python.convnet_benchmarks
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
"""
Benchmark for common convnets.
(NOTE: Numbers below prior with missing parameter=update step, TODO to update)
Speed on Titan X, with 10 warmup steps and 10 main steps and with different
versions of cudnn, are as follows (time reported below is per-batch time,
forward / forward+backward):
CuDNN V3 CuDNN v4
AlexNet 32.5 / 108.0 27.4 / 90.1
OverFeat 113.0 / 342.3 91.7 / 276.5
Inception 134.5 / 485.8 125.7 / 450.6
VGG (batch 64) 200.8 / 650.0 164.1 / 551.7
Speed on Inception with varied batch sizes and CuDNN v4 is as follows:
Batch Size Speed per batch Speed per image
16 22.8 / 72.7 1.43 / 4.54
32 38.0 / 127.5 1.19 / 3.98
64 67.2 / 233.6 1.05 / 3.65
128 125.7 / 450.6 0.98 / 3.52
Speed on Tesla M40, which 10 warmup steps and 10 main steps and with cudnn
v4, is as follows:
AlexNet 68.4 / 218.1
OverFeat 210.5 / 630.3
Inception 300.2 / 1122.2
VGG (batch 64) 405.8 / 1327.7
(Note that these numbers involve a "full" backprop, i.e. the gradient
with respect to the input image is also computed.)
To get the numbers, simply run:
for MODEL in AlexNet OverFeat Inception; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 128 --model $MODEL --forward_only True
done
for MODEL in AlexNet OverFeat Inception; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 128 --model $MODEL
done
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 64 --model VGGA --forward_only True
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 64 --model VGGA
for BS in 16 32 64 128; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size $BS --model Inception --forward_only True
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size $BS --model Inception
done
Note that VGG needs to be run at batch 64 due to memory limit on the backward
pass.
"""
import argparse
import time
from caffe2.python import cnn, workspace, core
import caffe2.python.SparseTransformer as SparseTransformer
def MLP(order):
model = cnn.CNNModelHelper()
d = 256
depth = 20
width = 3
for i in range(depth):
for j in range(width):
current = "fc_{}_{}".format(i, j) if i > 0 else "data"
next_ = "fc_{}_{}".format(i + 1, j)
model.FC(
current, next_,
dim_in=d, dim_out=d,
weight_init=model.XavierInit,
bias_init=model.XavierInit)
model.Sum(["fc_{}_{}".format(depth, j)
for j in range(width)], ["sum"])
model.FC("sum", "last",
dim_in=d, dim_out=1000,
weight_init=model.XavierInit,
bias_init=model.XavierInit)
xent = model.LabelCrossEntropy(["last", "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, d
def AlexNet(order):
model = cnn.CNNModelHelper(order, name="alexnet",
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
3,
64,
11,
('XavierFill', {}),
('ConstantFill', {}),
stride=4,
pad=2
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=3, stride=2)
conv2 = model.Conv(
pool1,
"conv2",
64,
192,
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=3, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
192,
384,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = model.Relu(conv3, "conv3")
conv4 = model.Conv(
relu3,
"conv4",
384,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = model.Relu(conv4, "conv4")
conv5 = model.Conv(
relu4,
"conv5",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = model.Relu(conv5, "conv5")
pool5 = model.MaxPool(relu5, "pool5", kernel=3, stride=2)
fc6 = model.FC(
pool5, "fc6", 256 * 6 * 6, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = model.Relu(fc6, "fc6")
fc7 = model.FC(
relu6, "fc7", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = model.Relu(fc7, "fc7")
fc8 = model.FC(
relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = model.Softmax(fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, 224
def OverFeat(order):
model = cnn.CNNModelHelper(order, name="overfeat",
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
3,
96,
11,
('XavierFill', {}),
('ConstantFill', {}),
stride=4
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
conv2 = model.Conv(
pool1, "conv2", 96, 256, 5, ('XavierFill', {}), ('ConstantFill', {})
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=2, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
256,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = model.Relu(conv3, "conv3")
conv4 = model.Conv(
relu3,
"conv4",
512,
1024,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = model.Relu(conv4, "conv4")
conv5 = model.Conv(
relu4,
"conv5",
1024,
1024,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = model.Relu(conv5, "conv5")
pool5 = model.MaxPool(relu5, "pool5", kernel=2, stride=2)
fc6 = model.FC(
pool5, "fc6", 1024 * 6 * 6, 3072, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = model.Relu(fc6, "fc6")
fc7 = model.FC(
relu6, "fc7", 3072, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = model.Relu(fc7, "fc7")
fc8 = model.FC(
relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = model.Softmax(fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, 231
def VGGA(order):
model = cnn.CNNModelHelper(order, name='vgg-a',
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
3,
64,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
conv2 = model.Conv(
pool1,
"conv2",
64,
128,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=2, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
128,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = model.Relu(conv3, "conv3")
conv4 = model.Conv(
relu3,
"conv4",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = model.Relu(conv4, "conv4")
pool4 = model.MaxPool(relu4, "pool4", kernel=2, stride=2)
conv5 = model.Conv(
pool4,
"conv5",
256,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = model.Relu(conv5, "conv5")
conv6 = model.Conv(
relu5,
"conv6",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu6 = model.Relu(conv6, "conv6")
pool6 = model.MaxPool(relu6, "pool6", kernel=2, stride=2)
conv7 = model.Conv(
pool6,
"conv7",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu7 = model.Relu(conv7, "conv7")
conv8 = model.Conv(
relu7,
"conv8",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu8 = model.Relu(conv8, "conv8")
pool8 = model.MaxPool(relu8, "pool8", kernel=2, stride=2)
fcix = model.FC(
pool8, "fcix", 512 * 7 * 7, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
reluix = model.Relu(fcix, "fcix")
fcx = model.FC(
reluix, "fcx", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relux = model.Relu(fcx, "fcx")
fcxi = model.FC(
relux, "fcxi", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = model.Softmax(fcxi, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, 231
def net_DAG_Builder(model):
print("====================================================")
print(" Start Building DAG ")
print("====================================================")
net_root = SparseTransformer.netbuilder(model)
return net_root
def _InceptionModule(
model, input_blob, input_depth, output_name, conv1_depth, conv3_depths,
conv5_depths, pool_depth
):
# path 1: 1x1 conv
conv1 = model.Conv(
input_blob, output_name + ":conv1", input_depth, conv1_depth, 1,
('XavierFill', {}), ('ConstantFill', {})
)
conv1 = model.Relu(conv1, conv1)
# path 2: 1x1 conv + 3x3 conv
conv3_reduce = model.Conv(
input_blob, output_name +
":conv3_reduce", input_depth, conv3_depths[0],
1, ('XavierFill', {}), ('ConstantFill', {})
)
conv3_reduce = model.Relu(conv3_reduce, conv3_reduce)
conv3 = model.Conv(
conv3_reduce,
output_name + ":conv3",
conv3_depths[0],
conv3_depths[1],
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
conv3 = model.Relu(conv3, conv3)
# path 3: 1x1 conv + 5x5 conv
conv5_reduce = model.Conv(
input_blob, output_name +
":conv5_reduce", input_depth, conv5_depths[0],
1, ('XavierFill', {}), ('ConstantFill', {})
)
conv5_reduce = model.Relu(conv5_reduce, conv5_reduce)
conv5 = model.Conv(
conv5_reduce,
output_name + ":conv5",
conv5_depths[0],
conv5_depths[1],
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
conv5 = model.Relu(conv5, conv5)
# path 4: pool + 1x1 conv
pool = model.MaxPool(
input_blob,
output_name + ":pool",
kernel=3,
stride=1,
pad=1
)
pool_proj = model.Conv(
pool, output_name + ":pool_proj", input_depth, pool_depth, 1,
('XavierFill', {}), ('ConstantFill', {})
)
pool_proj = model.Relu(pool_proj, pool_proj)
output = model.Concat([conv1, conv3, conv5, pool_proj], output_name)
return output
def Inception(order):
model = cnn.CNNModelHelper(order, name="inception",
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
3,
64,
7,
('XavierFill', {}),
('ConstantFill', {}),
stride=2,
pad=3
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=3, stride=2, pad=1)
conv2a = model.Conv(
pool1, "conv2a", 64, 64, 1, ('XavierFill', {}), ('ConstantFill', {})
)
conv2a = model.Relu(conv2a, conv2a)
conv2 = model.Conv(
conv2a,
"conv2",
64,
192,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=3, stride=2, pad=1)
# Inception modules
inc3 = _InceptionModule(
model, pool2, 192, "inc3", 64, [96, 128], [16, 32], 32
)
inc4 = _InceptionModule(
model, inc3, 256, "inc4", 128, [128, 192], [32, 96], 64
)
pool5 = model.MaxPool(inc4, "pool5", kernel=3, stride=2, pad=1)
inc5 = _InceptionModule(
model, pool5, 480, "inc5", 192, [96, 208], [16, 48], 64
)
inc6 = _InceptionModule(
model, inc5, 512, "inc6", 160, [112, 224], [24, 64], 64
)
inc7 = _InceptionModule(
model, inc6, 512, "inc7", 128, [128, 256], [24, 64], 64
)
inc8 = _InceptionModule(
model, inc7, 512, "inc8", 112, [144, 288], [32, 64], 64
)
inc9 = _InceptionModule(
model, inc8, 528, "inc9", 256, [160, 320], [32, 128], 128
)
pool9 = model.MaxPool(inc9, "pool9", kernel=3, stride=2, pad=1)
inc10 = _InceptionModule(
model, pool9, 832, "inc10", 256, [160, 320], [32, 128], 128
)
inc11 = _InceptionModule(
model, inc10, 832, "inc11", 384, [192, 384], [48, 128], 128
)
pool11 = model.AveragePool(inc11, "pool11", kernel=7, stride=1)
fc = model.FC(
pool11, "fc", 1024, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
# It seems that Soumith's benchmark does not have softmax on top
# for Inception. We will add it anyway so we can have a proper
# backward pass.
pred = model.Softmax(fc, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, 224
def AddInput(model, batch_size, db, db_type):
"""Adds the data input part."""
data_uint8, label = model.TensorProtosDBInput(
[], ["data_uint8", "label"], batch_size=batch_size,
db=db, db_type=db_type
)
data = model.Cast(data_uint8, "data_nhwc", to=core.DataType.FLOAT)
data = model.NHWC2NCHW(data, "data")
data = model.Scale(data, data, scale=float(1. / 256))
data = model.StopGradient(data, data)
return data, label
def AddParameterUpdate(model):
""" Simple plain SGD update -- not tuned to actually train the models """
ITER = model.Iter("iter")
LR = model.LearningRate(
ITER, "LR", base_lr=-1e-8, policy="step", stepsize=10000, gamma=0.999)
ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
for param in model.params:
param_grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, param_grad, LR], param)
def Benchmark(model_gen, arg):
model, input_size = model_gen(arg.order)
model.Proto().type = arg.net_type
model.Proto().num_workers = arg.num_workers
# In order to be able to run everything without feeding more stuff, let's
# add the data and label blobs to the parameter initialization net as well.
if arg.order == "NCHW":
input_shape = [arg.batch_size, 3, input_size, input_size]
else:
input_shape = [arg.batch_size, input_size, input_size, 3]
if arg.model == "MLP":
input_shape = [arg.batch_size, input_size]
model.param_init_net.GaussianFill(
[],
"data",
shape=input_shape,
mean=0.0,
std=1.0
)
model.param_init_net.UniformIntFill(
[],
"label",
shape=[arg.batch_size, ],
min=0,
max=999
)
if arg.forward_only:
print('{}: running forward only.'.format(arg.model))
else:
print('{}: running forward-backward.'.format(arg.model))
model.AddGradientOperators(["loss"])
AddParameterUpdate(model)
if arg.order == 'NHWC':
print(
'==WARNING==\n'
'NHWC order with CuDNN may not be supported yet, so I might\n'
'exit suddenly.'
)
if not arg.cpu:
model.param_init_net.RunAllOnGPU()
model.net.RunAllOnGPU()
if arg.dump_model:
# Writes out the pbtxt for benchmarks on e.g. Android
with open(
"{0}_init_batch_{1}.pbtxt".format(arg.model, arg.batch_size), "w"
) as fid:
fid.write(str(model.param_init_net.Proto()))
with open("{0}.pbtxt".format(arg.model,
arg.batch_size), "w") as fid:
fid.write(str(model.net.Proto()))
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
for i in range(arg.warmup_iterations):
workspace.RunNet(model.net.Proto().name)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("run", model.net, arg.iterations))
start = time.time()
workspace.RunPlan(plan)
print('Spent: {}'.format((time.time() - start) / arg.iterations))
if arg.layer_wise_benchmark:
print('Layer-wise benchmark.')
workspace.BenchmarkNet(model.net.Proto().name, 1, arg.iterations, True)
def GetArgumentParser():
parser = argparse.ArgumentParser(description="Caffe2 benchmark.")
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size."
)
parser.add_argument("--model", type=str, help="The model to benchmark.")
parser.add_argument(
"--order",
type=str,
default="NCHW",
help="The order to evaluate."
)
parser.add_argument(
"--cudnn_ws",
type=int,
default=-1,
help="The cudnn workspace size."
)
parser.add_argument(
"--iterations",
type=int,
default=10,
help="Number of iterations to run the network."
)
parser.add_argument(
"--warmup_iterations",
type=int,
default=10,
help="Number of warm-up iterations before benchmarking."
)
parser.add_argument(
"--forward_only",
action='store_true',
help="If set, only run the forward pass."
)
parser.add_argument(
"--layer_wise_benchmark",
action='store_true',
help="If True, run the layer-wise benchmark as well."
)
parser.add_argument(
"--cpu",
action='store_true',
help="If True, run testing on CPU instead of GPU."
)
parser.add_argument(
"--dump_model",
action='store_true',
help="If True, dump the model prototxts to disk."
)
parser.add_argument("--net_type", type=str, default="dag")
parser.add_argument("--num_workers", type=int, default=2)
return parser
if __name__ == '__main__':
args = GetArgumentParser().parse_args()
if (
not args.batch_size or not args.model or not args.order or
not args.cudnn_ws
):
GetArgumentParser().print_help()
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
model_map = {
'AlexNet': AlexNet,
'OverFeat': OverFeat,
'VGGA': VGGA,
'Inception': Inception,
'MLP': MLP,
}
Benchmark(model_map[args.model], args)
| apache-2.0 |
FireBladeNooT/Medusa_1_6 | lib/unidecode/x065.py | 252 | 4638 | data = (
'Pan ', # 0x00
'Yang ', # 0x01
'Lei ', # 0x02
'Sa ', # 0x03
'Shu ', # 0x04
'Zan ', # 0x05
'Nian ', # 0x06
'Xian ', # 0x07
'Jun ', # 0x08
'Huo ', # 0x09
'Li ', # 0x0a
'La ', # 0x0b
'Han ', # 0x0c
'Ying ', # 0x0d
'Lu ', # 0x0e
'Long ', # 0x0f
'Qian ', # 0x10
'Qian ', # 0x11
'Zan ', # 0x12
'Qian ', # 0x13
'Lan ', # 0x14
'San ', # 0x15
'Ying ', # 0x16
'Mei ', # 0x17
'Rang ', # 0x18
'Chan ', # 0x19
'[?] ', # 0x1a
'Cuan ', # 0x1b
'Xi ', # 0x1c
'She ', # 0x1d
'Luo ', # 0x1e
'Jun ', # 0x1f
'Mi ', # 0x20
'Li ', # 0x21
'Zan ', # 0x22
'Luan ', # 0x23
'Tan ', # 0x24
'Zuan ', # 0x25
'Li ', # 0x26
'Dian ', # 0x27
'Wa ', # 0x28
'Dang ', # 0x29
'Jiao ', # 0x2a
'Jue ', # 0x2b
'Lan ', # 0x2c
'Li ', # 0x2d
'Nang ', # 0x2e
'Zhi ', # 0x2f
'Gui ', # 0x30
'Gui ', # 0x31
'Qi ', # 0x32
'Xin ', # 0x33
'Pu ', # 0x34
'Sui ', # 0x35
'Shou ', # 0x36
'Kao ', # 0x37
'You ', # 0x38
'Gai ', # 0x39
'Yi ', # 0x3a
'Gong ', # 0x3b
'Gan ', # 0x3c
'Ban ', # 0x3d
'Fang ', # 0x3e
'Zheng ', # 0x3f
'Bo ', # 0x40
'Dian ', # 0x41
'Kou ', # 0x42
'Min ', # 0x43
'Wu ', # 0x44
'Gu ', # 0x45
'He ', # 0x46
'Ce ', # 0x47
'Xiao ', # 0x48
'Mi ', # 0x49
'Chu ', # 0x4a
'Ge ', # 0x4b
'Di ', # 0x4c
'Xu ', # 0x4d
'Jiao ', # 0x4e
'Min ', # 0x4f
'Chen ', # 0x50
'Jiu ', # 0x51
'Zhen ', # 0x52
'Duo ', # 0x53
'Yu ', # 0x54
'Chi ', # 0x55
'Ao ', # 0x56
'Bai ', # 0x57
'Xu ', # 0x58
'Jiao ', # 0x59
'Duo ', # 0x5a
'Lian ', # 0x5b
'Nie ', # 0x5c
'Bi ', # 0x5d
'Chang ', # 0x5e
'Dian ', # 0x5f
'Duo ', # 0x60
'Yi ', # 0x61
'Gan ', # 0x62
'San ', # 0x63
'Ke ', # 0x64
'Yan ', # 0x65
'Dun ', # 0x66
'Qi ', # 0x67
'Dou ', # 0x68
'Xiao ', # 0x69
'Duo ', # 0x6a
'Jiao ', # 0x6b
'Jing ', # 0x6c
'Yang ', # 0x6d
'Xia ', # 0x6e
'Min ', # 0x6f
'Shu ', # 0x70
'Ai ', # 0x71
'Qiao ', # 0x72
'Ai ', # 0x73
'Zheng ', # 0x74
'Di ', # 0x75
'Zhen ', # 0x76
'Fu ', # 0x77
'Shu ', # 0x78
'Liao ', # 0x79
'Qu ', # 0x7a
'Xiong ', # 0x7b
'Xi ', # 0x7c
'Jiao ', # 0x7d
'Sen ', # 0x7e
'Jiao ', # 0x7f
'Zhuo ', # 0x80
'Yi ', # 0x81
'Lian ', # 0x82
'Bi ', # 0x83
'Li ', # 0x84
'Xiao ', # 0x85
'Xiao ', # 0x86
'Wen ', # 0x87
'Xue ', # 0x88
'Qi ', # 0x89
'Qi ', # 0x8a
'Zhai ', # 0x8b
'Bin ', # 0x8c
'Jue ', # 0x8d
'Zhai ', # 0x8e
'[?] ', # 0x8f
'Fei ', # 0x90
'Ban ', # 0x91
'Ban ', # 0x92
'Lan ', # 0x93
'Yu ', # 0x94
'Lan ', # 0x95
'Wei ', # 0x96
'Dou ', # 0x97
'Sheng ', # 0x98
'Liao ', # 0x99
'Jia ', # 0x9a
'Hu ', # 0x9b
'Xie ', # 0x9c
'Jia ', # 0x9d
'Yu ', # 0x9e
'Zhen ', # 0x9f
'Jiao ', # 0xa0
'Wo ', # 0xa1
'Tou ', # 0xa2
'Chu ', # 0xa3
'Jin ', # 0xa4
'Chi ', # 0xa5
'Yin ', # 0xa6
'Fu ', # 0xa7
'Qiang ', # 0xa8
'Zhan ', # 0xa9
'Qu ', # 0xaa
'Zhuo ', # 0xab
'Zhan ', # 0xac
'Duan ', # 0xad
'Zhuo ', # 0xae
'Si ', # 0xaf
'Xin ', # 0xb0
'Zhuo ', # 0xb1
'Zhuo ', # 0xb2
'Qin ', # 0xb3
'Lin ', # 0xb4
'Zhuo ', # 0xb5
'Chu ', # 0xb6
'Duan ', # 0xb7
'Zhu ', # 0xb8
'Fang ', # 0xb9
'Xie ', # 0xba
'Hang ', # 0xbb
'Yu ', # 0xbc
'Shi ', # 0xbd
'Pei ', # 0xbe
'You ', # 0xbf
'Mye ', # 0xc0
'Pang ', # 0xc1
'Qi ', # 0xc2
'Zhan ', # 0xc3
'Mao ', # 0xc4
'Lu ', # 0xc5
'Pei ', # 0xc6
'Pi ', # 0xc7
'Liu ', # 0xc8
'Fu ', # 0xc9
'Fang ', # 0xca
'Xuan ', # 0xcb
'Jing ', # 0xcc
'Jing ', # 0xcd
'Ni ', # 0xce
'Zu ', # 0xcf
'Zhao ', # 0xd0
'Yi ', # 0xd1
'Liu ', # 0xd2
'Shao ', # 0xd3
'Jian ', # 0xd4
'Es ', # 0xd5
'Yi ', # 0xd6
'Qi ', # 0xd7
'Zhi ', # 0xd8
'Fan ', # 0xd9
'Piao ', # 0xda
'Fan ', # 0xdb
'Zhan ', # 0xdc
'Guai ', # 0xdd
'Sui ', # 0xde
'Yu ', # 0xdf
'Wu ', # 0xe0
'Ji ', # 0xe1
'Ji ', # 0xe2
'Ji ', # 0xe3
'Huo ', # 0xe4
'Ri ', # 0xe5
'Dan ', # 0xe6
'Jiu ', # 0xe7
'Zhi ', # 0xe8
'Zao ', # 0xe9
'Xie ', # 0xea
'Tiao ', # 0xeb
'Xun ', # 0xec
'Xu ', # 0xed
'Xu ', # 0xee
'Xu ', # 0xef
'Gan ', # 0xf0
'Han ', # 0xf1
'Tai ', # 0xf2
'Di ', # 0xf3
'Xu ', # 0xf4
'Chan ', # 0xf5
'Shi ', # 0xf6
'Kuang ', # 0xf7
'Yang ', # 0xf8
'Shi ', # 0xf9
'Wang ', # 0xfa
'Min ', # 0xfb
'Min ', # 0xfc
'Tun ', # 0xfd
'Chun ', # 0xfe
'Wu ', # 0xff
)
| gpl-3.0 |
helldorado/ansible | test/units/modules/network/nxos/test_nxos_feature.py | 68 | 3017 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_feature
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosFeatureModule(TestNxosModule):
module = nxos_feature
def setUp(self):
super(TestNxosFeatureModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_feature.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_feature.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_capabilities = patch('ansible.modules.network.nxos.nxos_feature.get_capabilities')
self.get_capabilities = self.mock_get_capabilities.start()
self.get_capabilities.return_value = {'network_api': 'cliconf'}
def tearDown(self):
super(TestNxosFeatureModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
self.mock_get_capabilities.stop()
def load_fixtures(self, commands=None, device=''):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = '%s.txt' % str(command).replace(' ', '_')
output.append(load_fixture('nxos_feature', filename))
return output
self.run_commands.side_effect = load_from_file
self.load_config.return_value = None
def test_nxos_feature_enable(self):
set_module_args(dict(feature='nve', state='enabled'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['terminal dont-ask', 'feature nv overlay'])
def test_nxos_feature_disable(self):
set_module_args(dict(feature='ospf', state='disabled'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['terminal dont-ask', 'no feature ospf'])
| gpl-3.0 |
alekstorm/backports.ssl | backports/ssl/monkey.py | 3 | 1918 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import sys
# borrowed largely from gevent 1.0
__all__ = ['patch']
if sys.version_info[0] >= 3:
string_types = str,
else:
import __builtin__
string_types = __builtin__.basestring
# maps module name -> attribute name -> original item
# e.g. "time" -> "sleep" -> built-in function sleep
saved = {}
def _get_original(name, items):
d = saved.get(name, {})
values = []
module = None
for item in items:
if item in d:
values.append(d[item])
else:
if module is None:
module = __import__(name)
values.append(getattr(module, item))
return values
def get_original(name, item):
if isinstance(item, string_types):
return _get_original(name, [item])[0]
else:
return _get_original(name, item)
def patch_item(module, attr, newitem):
NONE = object()
olditem = getattr(module, attr, NONE)
if olditem is not NONE:
saved.setdefault(module.__name__, {}).setdefault(attr, olditem)
setattr(module, attr, newitem)
def remove_item(module, attr):
NONE = object()
olditem = getattr(module, attr, NONE)
if olditem is NONE:
return
saved.setdefault(module.__name__, {}).setdefault(attr, olditem)
delattr(module, attr)
def patch_module(name, items=None):
backported_module = getattr(__import__('backports').ssl, name)
module_name = getattr(backported_module, '__target__', name)
module = __import__(module_name)
if items is None:
items = getattr(backported_module, '__implements__', None)
if items is None:
raise AttributeError('%r does not have __implements__' % backported_module)
for attr in items:
patch_item(module, attr, getattr(backported_module, attr))
def patch():
patch_module('core')
| mit |
s0hvaperuna/Not-a-bot | char_rnn/model.py | 1 | 5892 | import numpy as np
import tensorflow as tf
from tensorflow.contrib import legacy_seq2seq
from tensorflow.contrib import rnn
# Cloned from https://github.com/sherjilozair/char-rnn-tensorflow
# Used to sample trained models without having to call sample.py every time
# which is extremely slow. Instead we load the data once in utils.init_tf
# and use the data provided in the bot
class Model:
def __init__(self, args, training=True):
self.args = args
if not training:
args.batch_size = 1
args.seq_length = 1
# choose different rnn cell
if args.model == 'rnn':
cell_fn = rnn.RNNCell
elif args.model == 'gru':
cell_fn = rnn.GRUCell
elif args.model == 'lstm':
cell_fn = rnn.LSTMCell
elif args.model == 'nas':
cell_fn = rnn.NASCell
else:
raise Exception("model type not supported: {}".format(args.model))
# warp multi layered rnn cell into one cell with dropout
cells = []
for _ in range(args.num_layers):
cell = cell_fn(args.rnn_size)
if training and (args.output_keep_prob < 1.0 or args.input_keep_prob < 1.0):
cell = rnn.DropoutWrapper(cell,
input_keep_prob=args.input_keep_prob,
output_keep_prob=args.output_keep_prob)
cells.append(cell)
self.cell = cell = rnn.MultiRNNCell(cells, state_is_tuple=True)
# input/target data (int32 since input is char-level)
self.input_data = tf.placeholder(
tf.int32, [args.batch_size, args.seq_length])
self.targets = tf.placeholder(
tf.int32, [args.batch_size, args.seq_length])
self.initial_state = cell.zero_state(args.batch_size, tf.float32)
# softmax output layer, use softmax to classify
with tf.variable_scope('rnnlm'):
softmax_w = tf.get_variable("softmax_w",
[args.rnn_size, args.vocab_size])
softmax_b = tf.get_variable("softmax_b", [args.vocab_size])
# transform input to embedding
embedding = tf.get_variable("embedding", [args.vocab_size, args.rnn_size])
inputs = tf.nn.embedding_lookup(embedding, self.input_data)
# dropout beta testing: double check which one should affect next line
if training and args.output_keep_prob:
inputs = tf.nn.dropout(inputs, args.output_keep_prob)
# unstack the input to fits in rnn model
inputs = tf.split(inputs, args.seq_length, 1)
inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
# loop function for rnn_decoder, which take the previous i-th cell's output and generate the (i+1)-th cell's input
def loop(prev, _):
prev = tf.matmul(prev, softmax_w) + softmax_b
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
# rnn_decoder to generate the ouputs and final state. When we are not training the model, we use the loop function.
outputs, last_state = legacy_seq2seq.rnn_decoder(inputs, self.initial_state, cell, loop_function=loop if not training else None, scope='rnnlm')
output = tf.reshape(tf.concat(outputs, 1), [-1, args.rnn_size])
# output layer
self.logits = tf.matmul(output, softmax_w) + softmax_b
self.probs = tf.nn.softmax(self.logits)
# loss is calculate by the log loss and taking the average.
loss = legacy_seq2seq.sequence_loss_by_example(
[self.logits],
[tf.reshape(self.targets, [-1])],
[tf.ones([args.batch_size * args.seq_length])])
with tf.name_scope('cost'):
self.cost = tf.reduce_sum(loss) / args.batch_size / args.seq_length
self.final_state = last_state
self.lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
# calculate gradients
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
args.grad_clip)
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(self.lr)
# apply gradient change to the all the trainable variable.
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
# instrument tensorboard
tf.summary.histogram('logits', self.logits)
tf.summary.histogram('loss', loss)
tf.summary.scalar('train_loss', self.cost)
def sample(self, sess, chars, vocab, num=200, prime='The ', sampling_type=1):
state = sess.run(self.cell.zero_state(1, tf.float32))
for char in prime[:-1]:
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {self.input_data: x, self.initial_state: state}
[state] = sess.run([self.final_state], feed)
def weighted_pick(weights):
t = np.cumsum(weights)
s = np.sum(weights)
return int(np.searchsorted(t, np.random.rand(1)*s))
ret = prime
char = prime[-1]
for _ in range(num):
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {self.input_data: x, self.initial_state: state}
[probs, state] = sess.run([self.probs, self.final_state], feed)
p = probs[0]
if sampling_type == 0:
sample = np.argmax(p)
elif sampling_type == 2:
if char == ' ':
sample = weighted_pick(p)
else:
sample = np.argmax(p)
else: # sampling_type == 1 default:
sample = weighted_pick(p)
pred = chars[sample]
ret += pred
char = pred
return ret
| mit |
prat0318/bravado-core | tests/conftest.py | 3 | 2118 | import base64
import os
import simplejson as json
import pytest
import bravado_core.formatter
from bravado_core.spec import Spec
@pytest.fixture
def empty_swagger_spec():
return Spec(spec_dict={})
@pytest.fixture
def minimal_swagger_dict():
"""Return minimal dict that respresents a swagger spec - useful as a base
template.
"""
return {
'swagger': '2.0',
'info': {
'title': 'Test',
'version': '1.0',
},
'paths': {
},
'definitions': {
},
}
@pytest.fixture
def minimal_swagger_spec(minimal_swagger_dict):
return Spec.from_dict(minimal_swagger_dict)
@pytest.fixture
def petstore_dict():
my_dir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(my_dir, '../test-data/2.0/petstore/swagger.json')
with open(fpath) as f:
return json.loads(f.read())
@pytest.fixture
def petstore_spec(petstore_dict):
return Spec.from_dict(petstore_dict)
def del_base64():
del bravado_core.formatter.DEFAULT_FORMATS['base64']
@pytest.fixture
def base64_format():
return bravado_core.formatter.SwaggerFormat(
format='base64',
to_wire=base64.b64encode,
to_python=base64.b64decode,
validate=base64.b64decode,
description='Base64')
@pytest.fixture(scope='function')
def register_base64_format(base64_format, request):
request.addfinalizer(del_base64)
bravado_core.formatter.register_format(base64_format)
@pytest.fixture
def node_spec():
"""Used in tests that have recursive $refs
"""
return {
'type': 'object',
'properties': {
'name': {
'type': 'string'
},
'child': {
'$ref': '#/definitions/Node',
},
},
'required': ['name']
}
@pytest.fixture
def recursive_swagger_spec(minimal_swagger_dict, node_spec):
"""
Return a swager_spec with a #/definitions/Node that is
recursive.
"""
minimal_swagger_dict['definitions']['Node'] = node_spec
return Spec(minimal_swagger_dict)
| bsd-3-clause |
morrisonwudi/zipline | zipline/errors.py | 13 | 11028 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ZiplineError(Exception):
msg = None
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.message = str(self)
def __str__(self):
msg = self.msg.format(**self.kwargs)
return msg
__unicode__ = __str__
__repr__ = __str__
class WrongDataForTransform(ZiplineError):
"""
Raised whenever a rolling transform is called on an event that
does not have the necessary properties.
"""
msg = "{transform} requires {fields}. Event cannot be processed."
class UnsupportedSlippageModel(ZiplineError):
"""
Raised if a user script calls the override_slippage magic
with a slipage object that isn't a VolumeShareSlippage or
FixedSlipapge
"""
msg = """
You attempted to override slippage with an unsupported class. \
Please use VolumeShareSlippage or FixedSlippage.
""".strip()
class OverrideSlippagePostInit(ZiplineError):
# Raised if a users script calls override_slippage magic
# after the initialize method has returned.
msg = """
You attempted to override slippage outside of `initialize`. \
You may only call override_slippage in your initialize method.
""".strip()
class RegisterTradingControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set a trading control outside of `initialize`. \
Trading controls may only be set in your initialize method.
""".strip()
class RegisterAccountControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set an account control outside of `initialize`. \
Account controls may only be set in your initialize method.
""".strip()
class UnsupportedCommissionModel(ZiplineError):
"""
Raised if a user script calls the override_commission magic
with a commission object that isn't a PerShare, PerTrade or
PerDollar commission
"""
msg = """
You attempted to override commission with an unsupported class. \
Please use PerShare or PerTrade.
""".strip()
class OverrideCommissionPostInit(ZiplineError):
"""
Raised if a users script calls override_commission magic
after the initialize method has returned.
"""
msg = """
You attempted to override commission outside of `initialize`. \
You may only call override_commission in your initialize method.
""".strip()
class TransactionWithNoVolume(ZiplineError):
"""
Raised if a transact call returns a transaction with zero volume.
"""
msg = """
Transaction {txn} has a volume of zero.
""".strip()
class TransactionWithWrongDirection(ZiplineError):
"""
Raised if a transact call returns a transaction with a direction that
does not match the order.
"""
msg = """
Transaction {txn} not in same direction as corresponding order {order}.
""".strip()
class TransactionWithNoAmount(ZiplineError):
"""
Raised if a transact call returns a transaction with zero amount.
"""
msg = """
Transaction {txn} has an amount of zero.
""".strip()
class TransactionVolumeExceedsOrder(ZiplineError):
"""
Raised if a transact call returns a transaction with a volume greater than
the corresponding order.
"""
msg = """
Transaction volume of {txn} exceeds the order volume of {order}.
""".strip()
class UnsupportedOrderParameters(ZiplineError):
"""
Raised if a set of mutually exclusive parameters are passed to an order
call.
"""
msg = "{msg}"
class BadOrderParameters(ZiplineError):
"""
Raised if any impossible parameters (nan, negative limit/stop)
are passed to an order call.
"""
msg = "{msg}"
class OrderDuringInitialize(ZiplineError):
"""
Raised if order is called during initialize()
"""
msg = "{msg}"
class AccountControlViolation(ZiplineError):
"""
Raised if the account violates a constraint set by a AccountControl.
"""
msg = """
Account violates account constraint {constraint}.
""".strip()
class TradingControlViolation(ZiplineError):
"""
Raised if an order would violate a constraint set by a TradingControl.
"""
msg = """
Order for {amount} shares of {asset} at {datetime} violates trading constraint
{constraint}.
""".strip()
class IncompatibleHistoryFrequency(ZiplineError):
"""
Raised when a frequency is given to history which is not supported.
At least, not yet.
"""
msg = """
Requested history at frequency '{frequency}' cannot be created with data
at frequency '{data_frequency}'.
""".strip()
class MultipleSymbolsFound(ZiplineError):
"""
Raised when a symbol() call contains a symbol that changed over
time and is thus not resolvable without additional information
provided via as_of_date.
"""
msg = """
Multiple symbols with the name '{symbol}' found. Use the
as_of_date' argument to to specify when the date symbol-lookup
should be valid.
Possible options:{options}
""".strip()
class SymbolNotFound(ZiplineError):
"""
Raised when a symbol() call contains a non-existant symbol.
"""
msg = """
Symbol '{symbol}' was not found.
""".strip()
class RootSymbolNotFound(ZiplineError):
"""
Raised when a lookup_future_chain() call contains a non-existant symbol.
"""
msg = """
Root symbol '{root_symbol}' was not found.
""".strip()
class SidNotFound(ZiplineError):
"""
Raised when a retrieve_asset() call contains a non-existent sid.
"""
msg = """
Asset with sid '{sid}' was not found.
""".strip()
class InvalidAssetType(ZiplineError):
"""
Raised when an AssetFinder tries to build an Asset with an invalid
AssetType.
"""
msg = """
AssetMetaData contained an invalid Asset type: '{asset_type}'.
""".strip()
class UpdateAssetFinderTypeError(ZiplineError):
"""
Raised when TradingEnvironment.update_asset_finder() gets an asset_finder
arg that is not of AssetFinder class.
"""
msg = """
TradingEnvironment can not set asset_finder to object of class {cls}.
""".strip()
class ConsumeAssetMetaDataError(ZiplineError):
"""
Raised when AssetFinder.consume() is called on an invalid object.
"""
msg = """
AssetFinder can not consume metadata of type {obj}. Metadata must be a dict, a
DataFrame, or a tables.Table. If the provided metadata is a Table, the rows
must contain both or one of 'sid' or 'symbol'.
""".strip()
class MapAssetIdentifierIndexError(ZiplineError):
"""
Raised when AssetMetaData.map_identifier_index_to_sids() is called on an
index of invalid objects.
"""
msg = """
AssetFinder can not map an index with values of type {obj}. Asset indices of
DataFrames or Panels must be integer sids, string symbols, or Asset objects.
""".strip()
class SidAssignmentError(ZiplineError):
"""
Raised when an AssetFinder tries to build an Asset that does not have a sid
and that AssetFinder is not permitted to assign sids.
"""
msg = """
AssetFinder metadata is missing a SID for identifier '{identifier}'.
""".strip()
class NoSourceError(ZiplineError):
"""
Raised when no source is given to the pipeline
"""
msg = """
No data source given.
""".strip()
class PipelineDateError(ZiplineError):
"""
Raised when only one date is passed to the pipeline
"""
msg = """
Only one simulation date given. Please specify both the 'start' and 'end' for
the simulation, or neither. If neither is given, the start and end of the
DataSource will be used. Given start = '{start}', end = '{end}'
""".strip()
class WindowLengthTooLong(ZiplineError):
"""
Raised when a trailing window is instantiated with a lookback greater than
the length of the underlying array.
"""
msg = (
"Can't construct a rolling window of length "
"{window_length} on an array of length {nrows}."
).strip()
class WindowLengthNotPositive(ZiplineError):
"""
Raised when a trailing window would be instantiated with a length less than
1.
"""
msg = (
"Expected a window_length greater than 0, got {window_length}."
).strip()
class InputTermNotAtomic(ZiplineError):
"""
Raised when a non-atomic term is specified as an input to an FFC term with
a lookback window.
"""
msg = (
"Can't compute {parent} with non-atomic input {child}."
)
class TermInputsNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = "{termname} requires inputs, but no inputs list was passed."
class WindowLengthNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = (
"{termname} requires a window_length, but no window_length was passed."
)
class BadPercentileBounds(ZiplineError):
"""
Raised by API functions accepting percentile bounds when the passed bounds
are invalid.
"""
msg = (
"Percentile bounds must fall between 0.0 and 100.0, and min must be "
"less than max."
"\nInputs were min={min_percentile}, max={max_percentile}."
)
class UnknownRankMethod(ZiplineError):
"""
Raised during construction of a Rank factor when supplied a bad Rank
method.
"""
msg = (
"Unknown ranking method: '{method}'. "
"`method` must be one of {choices}"
)
class AddTermPostInit(ZiplineError):
"""
Raised when a user tries to call add_{filter,factor,classifier}
outside of initialize.
"""
msg = (
"Attempted to add a new filter, factor, or classifier "
"outside of initialize.\n"
"New FFC terms may only be added during initialize."
)
class UnsupportedDataType(ZiplineError):
"""
Raised by FFC CustomFactors with unsupported dtypes.
"""
msg = "CustomFactors with dtype {dtype} are not supported."
class NoFurtherDataError(ZiplineError):
"""
Raised by calendar operations that would ask for dates beyond the extent of
our known data.
"""
# This accepts an arbitrary message string because it's used in more places
# that can be usefully templated.
msg = '{msg}'
| apache-2.0 |
solos/pylons | pylons/util.py | 1 | 8988 | """Paste Template and Pylons utility functions
PylonsTemplate is a Paste Template sub-class that configures the source
directory and default plug-ins for a new Pylons project. The minimal
template a more minimal template with less additional directories and
layout.
The functions used in this module are to assist Pylons in creating new
projects, and handling deprecation warnings for moved Pylons functions.
"""
import logging
import sys
import warnings
import pkg_resources
from paste.deploy.converters import asbool
from paste.script.appinstall import Installer
from paste.script.templates import Template, var
from tempita import paste_script_template_renderer
import pylons
import pylons.configuration
import pylons.i18n
__all__ = ['AttribSafeContextObj', 'ContextObj', 'PylonsContext',
'class_name_from_module_name', 'call_wsgi_application']
pylons_log = logging.getLogger(__name__)
def func_move(name, moved_to='pylons.i18n'):
return ("The %s function has moved to %s, please update your import "
"statements to reflect the move" % (name, moved_to))
def deprecated(func, message):
def deprecated_method(*args, **kargs):
warnings.warn(message, DeprecationWarning, 2)
return func(*args, **kargs)
try:
deprecated_method.__name__ = func.__name__
except TypeError: # Python < 2.4
pass
deprecated_method.__doc__ = "%s\n\n%s" % (message, func.__doc__)
return deprecated_method
get_lang = deprecated(pylons.i18n.get_lang, func_move('get_lang'))
set_lang = deprecated(pylons.i18n.set_lang, func_move('set_lang'))
_ = deprecated(pylons.i18n._, func_move('_'))
# Avoid circular import and a double warning
def log(*args, **kwargs):
"""Deprecated: Use the logging module instead.
Log a message to the output log.
"""
import pylons.helpers
return pylons.helpers.log(*args, **kwargs)
def get_prefix(environ, warn=True):
"""Deprecated: Use environ.get('SCRIPT_NAME', '') instead"""
if warn:
warnings.warn("The get_prefix function is deprecated, please use "
"environ.get('SCRIPT_NAME', '') instead",
DeprecationWarning, 2)
prefix = pylons.config.get('prefix', '')
if not prefix:
if environ.get('SCRIPT_NAME', '') != '':
prefix = environ['SCRIPT_NAME']
return prefix
def call_wsgi_application(application, environ, catch_exc_info=False):
"""
Call the given WSGI application, returning ``(status_string,
headerlist, app_iter)``
Be sure to call ``app_iter.close()`` if it's there.
If catch_exc_info is true, then returns ``(status_string,
headerlist, app_iter, exc_info)``, where the fourth item may
be None, but won't be if there was an exception. If you don't
do this and there was an exception, the exception will be
raised directly.
"""
captured = []
output = []
def start_response(status, headers, exc_info=None):
if exc_info is not None and not catch_exc_info:
raise exc_info[0], exc_info[1], exc_info[2]
captured[:] = [status, headers, exc_info]
return output.append
app_iter = application(environ, start_response)
if not captured or output:
try:
output.extend(app_iter)
finally:
if hasattr(app_iter, 'close'):
app_iter.close()
app_iter = output
if catch_exc_info:
return (captured[0], captured[1], app_iter, captured[2])
else:
return (captured[0], captured[1], app_iter)
def class_name_from_module_name(module_name):
"""Takes a module name and returns the name of the class it
defines.
If the module name contains dashes, they are replaced with
underscores.
Example::
>>> class_name_from_module_name('with-dashes')
'WithDashes'
>>> class_name_from_module_name('with_underscores')
'WithUnderscores'
>>> class_name_from_module_name('oneword')
'Oneword'
"""
words = module_name.replace('-', '_').split('_')
return ''.join([w.title() for w in words])
class PylonsContext(object):
"""Pylons context object
All the Pylons Stacked Object Proxies are also stored here, for use
in generators and async based operation where the globals can't be
used.
This object is attached in
:class:`~pylons.controllers.core.WSGIController` instances as
:attr:`~WSGIController._py_object`. For example::
class MyController(WSGIController):
def index(self):
pyobj = self._py_object
return "Environ is %s" % pyobj.request.environ
"""
pass
class ContextObj(object):
"""The :term:`tmpl_context` object, with strict attribute access
(raises an Exception when the attribute does not exist)"""
def __repr__(self):
attrs = [(name, value)
for name, value in self.__dict__.items()
if not name.startswith('_')]
attrs.sort()
parts = []
for name, value in attrs:
value_repr = repr(value)
if len(value_repr) > 70:
value_repr = value_repr[:60] + '...' + value_repr[-5:]
parts.append(' %s=%s' % (name, value_repr))
return '<%s.%s at %s%s>' % (
self.__class__.__module__,
self.__class__.__name__,
hex(id(self)),
','.join(parts))
class AttribSafeContextObj(ContextObj):
"""The :term:`tmpl_context` object, with lax attribute access (
returns '' when the attribute does not exist)"""
def __getattr__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
pylons_log.debug("No attribute called %s found on c object, "
"returning empty string", name)
return ''
class PylonsTemplate(Template):
_template_dir = ('pylons', 'templates/default_project')
template_renderer = staticmethod(paste_script_template_renderer)
summary = 'Pylons application template'
egg_plugins = ['PasteScript', 'Pylons']
vars = [
var('template_engine', 'mako/genshi/jinja2/etc: Template language',
default='mako'),
var('sqlalchemy', 'True/False: Include SQLAlchemy 0.5 configuration',
default=False),
]
ensure_names = ['description', 'author', 'author_email', 'url']
def pre(self, command, output_dir, vars):
"""Called before template is applied."""
package_logger = vars['package']
if package_logger == 'root':
# Rename the app logger in the rare case a project is named 'root'
package_logger = 'app'
vars['package_logger'] = package_logger
template_engine = \
vars.setdefault('template_engine',
pylons.configuration.default_template_engine)
if template_engine == 'mako':
# Support a Babel extractor default for Mako
vars['babel_templates_extractor'] = \
("('templates/**.mako', 'mako', {'input_encoding': 'utf-8'})"
",\n%s#%s" % (' ' * 4, ' ' * 8))
else:
vars['babel_templates_extractor'] = ''
# Ensure these exist in the namespace
for name in self.ensure_names:
vars.setdefault(name, '')
vars['version'] = vars.get('version', '0.1')
vars['zip_safe'] = asbool(vars.get('zip_safe', 'false'))
vars['sqlalchemy'] = asbool(vars.get('sqlalchemy', 'false'))
class MinimalPylonsTemplate(PylonsTemplate):
_template_dir = ('pylons', 'templates/minimal_project')
summary = 'Pylons minimal application template'
vars = [
var('template_engine', 'mako/genshi/jinja2/etc: Template language',
default='mako'),
]
class PylonsInstaller(Installer):
use_cheetah = False
config_file = 'config/deployment.ini_tmpl'
def config_content(self, command, vars):
"""
Called by ``self.write_config``, this returns the text content
for the config file, given the provided variables.
"""
modules = [line.strip()
for line in self.dist.get_metadata_lines('top_level.txt')
if line.strip() and not line.strip().startswith('#')]
if not modules:
print >> sys.stderr, 'No modules are listed in top_level.txt'
print >> sys.stderr, \
'Try running python setup.py egg_info to regenerate that file'
for module in modules:
if pkg_resources.resource_exists(module, self.config_file):
return self.template_renderer(
pkg_resources.resource_string(module, self.config_file),
vars, filename=self.config_file)
# Legacy support for the old location in egg-info
return super(PylonsInstaller, self).config_content(command, vars)
| bsd-3-clause |
kaarolch/ansible | lib/ansible/modules/network/illumos/ipadm_prop.py | 48 | 7495 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Adam Števko <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ipadm_prop
short_description: Manage protocol properties on Solaris/illumos systems.
description:
- Modify protocol properties on Solaris/illumos systems.
version_added: "2.2"
author: Adam Števko (@xen0l)
options:
protocol:
description:
- Specifies the procotol for which we want to manage properties.
required: true
property:
description:
- Specifies the name of property we want to manage.
required: true
value:
description:
- Specifies the value we want to set for the property.
required: false
temporary:
description:
- Specifies that the property value is temporary. Temporary
property values do not persist across reboots.
required: false
default: false
choices: [ "true", "false" ]
state:
description:
- Set or reset the property value.
required: false
default: present
choices: [ "present", "absent", "reset" ]
'''
EXAMPLES = '''
# Set TCP receive buffer size
ipadm_prop: protocol=tcp property=recv_buf value=65536
# Reset UDP send buffer size to the default value
ipadm_prop: protocol=udp property=send_buf state=reset
'''
RETURN = '''
protocol:
description: property's protocol
returned: always
type: string
sample: "TCP"
property:
description: name of the property
returned: always
type: string
sample: "recv_maxbuf"
state:
description: state of the target
returned: always
type: string
sample: "present"
temporary:
description: property's persistence
returned: always
type: boolean
sample: "True"
value:
description: value of the property
returned: always
type: int/string (depends on property)
sample: 1024/never
'''
SUPPORTED_PROTOCOLS = ['ipv4', 'ipv6', 'icmp', 'tcp', 'udp', 'sctp']
class Prop(object):
def __init__(self, module):
self.module = module
self.protocol = module.params['protocol']
self.property = module.params['property']
self.value = module.params['value']
self.temporary = module.params['temporary']
self.state = module.params['state']
def property_exists(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-prop')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.protocol)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
self.module.fail_json(msg='Unknown property "%s" for protocol %s' %
(self.property, self.protocol),
protocol=self.protocol,
property=self.property)
def property_is_modified(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-prop')
cmd.append('-c')
cmd.append('-o')
cmd.append('current,default')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.protocol)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
(value, default) = out.split(':')
if rc == 0 and value == default:
return True
else:
return False
def property_is_set(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-prop')
cmd.append('-c')
cmd.append('-o')
cmd.append('current')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.protocol)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
if rc == 0 and self.value == out:
return True
else:
return False
def set_property(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('set-prop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property + "=" + self.value)
cmd.append(self.protocol)
return self.module.run_command(cmd)
def reset_property(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('reset-prop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.protocol)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
protocol=dict(required=True, choices=SUPPORTED_PROTOCOLS),
property=dict(required=True),
value=dict(required=False),
temporary=dict(default=False, type='bool'),
state=dict(
default='present', choices=['absent', 'present', 'reset']),
),
supports_check_mode=True
)
prop = Prop(module)
rc = None
out = ''
err = ''
result = {}
result['protocol'] = prop.protocol
result['property'] = prop.property
result['state'] = prop.state
result['temporary'] = prop.temporary
if prop.value:
result['value'] = prop.value
if prop.state == 'absent' or prop.state == 'reset':
if prop.property_exists():
if not prop.property_is_modified():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = prop.reset_property()
if rc != 0:
module.fail_json(protocol=prop.protocol,
property=prop.property,
msg=err,
rc=rc)
elif prop.state == 'present':
if prop.value is None:
module.fail_json(msg='Value is mandatory with state "present"')
if prop.property_exists():
if not prop.property_is_set():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = prop.set_property()
if rc != 0:
module.fail_json(protocol=prop.protocol,
property=prop.property,
msg=err,
rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
cpaulik/pyscaffold | src/pyscaffold/api/helpers.py | 1 | 10567 | # -*- coding: utf-8 -*-
"""
Useful functions for manipulating the action list and project structure.
"""
from __future__ import absolute_import
from copy import deepcopy
from ..contrib.six import string_types
from ..exceptions import ActionNotFound
from ..log import logger
from ..structure import FileOp, define_structure
logger = logger # Sphinx workaround to force documenting imported members
"""Logger wrapper, that provides methods like :obj:`~.ReportLogger.report`.
See :class:`~.ReportLogger`.
"""
NO_OVERWRITE = FileOp.NO_OVERWRITE
"""Do not overwrite an existing file during update
(still created if not exists)
"""
NO_CREATE = FileOp.NO_CREATE
"""Do not create the file during an update"""
# -------- Project Structure --------
def ensure(struct, path, content=None, update_rule=None):
"""Ensure a file exists in the representation of the project tree
with the provided content.
All the parent directories are automatically created.
Args:
struct (dict): project representation as (possibly) nested
:obj:`dict`. See :obj:`~.merge`.
path (str or list): file path relative to the structure root.
The directory separator should be ``/`` (forward slash) if
present.
Alternatively, a list with the parts of the path can be
provided, ordered from the structure root to the file itself.
The following examples are equivalent::
'docs/api/index.html'
['docs', 'api', 'index.html']
content (str): file text contents
update_rule: see :class:`~.FileOp`, ``None`` by default
Returns:
dict: updated project tree representation
Note:
Use an empty string as content to ensure a file is created empty.
"""
# Ensure path is a list.
if isinstance(path, string_types):
path = path.split('/')
# Walk the entire path, creating parents if necessary.
root = deepcopy(struct)
last_parent = root
name = path[-1]
for parent in path[:-1]:
if parent not in last_parent:
last_parent[parent] = {}
last_parent = last_parent[parent]
# Get the old value if existent.
old_value = last_parent.get(name, (None, None))
# Update the value.
new_value = (content, update_rule)
last_parent[name] = _merge_file_leaf(old_value, new_value)
return root
def reject(struct, path):
"""Remove a file from the project tree representation if existent.
Args:
struct (dict): project representation as (possibly) nested
:obj:`dict`. See :obj:`~.merge`.
path (str or list): file path relative to the structure root.
The directory separator should be ``/`` (forward slash) if
present.
Alternatively, a list with the parts of the path can be
provided, ordered from the structure root to the file itself.
The following examples are equivalent::
'docs/api/index.html'
['docs', 'api', 'index.html']
Returns:
dict: modified project tree representation
"""
# Ensure path is a list.
if isinstance(path, string_types):
path = path.split('/')
# Walk the entire path, creating parents if necessary.
root = deepcopy(struct)
last_parent = root
name = path[-1]
for parent in path[:-1]:
if parent not in last_parent:
return root # one ancestor already does not exist, do nothing
last_parent = last_parent[parent]
if name in last_parent:
del last_parent[name]
return root
def merge(old, new):
"""Merge two dict representations for the directory structure.
Basically a deep dictionary merge, except from the leaf update method.
Args:
old (dict): directory descriptor that takes low precedence
during the merge
new (dict): directory descriptor that takes high precedence
during the merge
The directory tree is represented as a (possibly nested) dictionary.
The keys indicate the path where a file will be generated, while the
value indicates the content. Additionally, tuple values are allowed in
order to specify the rule that will be followed during an ``update``
operation (see :class:`~.FileOp`). In this case, the first element is
the file content and the second element is the update rule. For
example, the dictionary::
{'project': {
'namespace': {
'module.py': ('print("Hello World!")',
helpers.NO_OVERWRITE)}}
represents a ``project/namespace/module.py`` file with content
``print("Hello World!")``, that will be created only if not
present.
Returns:
dict: resulting merged directory representation
Note:
Use an empty string as content to ensure a file is created empty.
"""
return _inplace_merge(deepcopy(old), new)
def _inplace_merge(old, new):
"""Similar to :obj:`~.merge` but modifies the first dict."""
for key, value in new.items():
old_value = old.get(key, None)
new_is_dict = isinstance(value, dict)
old_is_dict = isinstance(old_value, dict)
if new_is_dict and old_is_dict:
old[key] = _inplace_merge(old_value, value)
elif old_value is not None and not new_is_dict and not old_is_dict:
# both are defined and final leaves
old[key] = _merge_file_leaf(old_value, value)
else:
old[key] = deepcopy(value)
return old
def _merge_file_leaf(old_value, new_value):
"""Merge leaf values for the directory tree representation.
The leaf value is expected to be a tuple ``(content, update_rule)``.
When a string is passed, it is assumed to be the content and
``None`` is used for the update rule.
Args:
old_value (tuple or str): descriptor for the file that takes low
precedence during the merge
new_value (tuple or str): descriptor for the file that takes high
precedence during the merge
Note:
``None`` contents are ignored, use and empty string to force empty
contents.
Returns:
tuple or str: resulting value for the merged leaf
"""
if not isinstance(old_value, (list, tuple)):
old_value = (old_value, None)
if not isinstance(new_value, (list, tuple)):
new_value = (new_value, None)
content = new_value[0] if new_value[0] is not None else old_value[0]
rule = new_value[1] if new_value[1] is not None else old_value[1]
if rule is None:
return content
return (content, rule)
# -------- Action List --------
def register(actions, action, before=None, after=None):
"""Register a new action to be performed during scaffold.
Args:
actions (list): previous action list.
action (callable): function with two arguments: the first one is a
(nested) dict representing the file structure of the project
and the second is a dict with scaffold options.
This function **MUST** return a tuple with two elements similar
to its arguments. Example::
def do_nothing(struct, opts):
return (struct, opts)
**kwargs (dict): keyword arguments make it possible to choose a
specific order when executing actions: when ``before`` or
``after`` keywords are provided, the argument value is used as
a reference position for the new action. Example::
helpers.register(actions, do_nothing,
after='create_structure')
# Look for the first action with a name
# `create_structure` and inserts `do_nothing` after it.
# If more than one registered action is named
# `create_structure`, the first one is selected.
helpers.register(
actions, do_nothing,
before='pyscaffold.structure:create_structure')
# Similar to the previous example, but the probability
# of name conflict is decreased by including the module
# name.
When no keyword argument is provided, the default execution
order specifies that the action will be performed after the
project structure is defined, but before it is written to the
disk. Example::
helpers.register(actions, do_nothing)
# The action will take place after
# `pyscaffold.structure:define_structure`
Returns:
list: modified action list.
"""
reference = before or after or get_id(define_structure)
position = _find(actions, reference)
if not before:
position += 1
clone = actions[:]
clone.insert(position, action)
return clone
def unregister(actions, reference):
"""Prevent a specific action to be executed during scaffold.
Args:
actions (list): previous action list.
reference (str): action identifier. Similarly to the keyword
arguments of :obj:`~.register` it can assume two formats:
- the name of the function alone,
- the name of the module followed by ``:`` and the name
of the function
Returns:
list: modified action list.
"""
position = _find(actions, reference)
return actions[:position] + actions[position+1:]
def get_id(function):
"""Given a function, calculate its identifier.
A identifier is a string in the format ``<module name>:<function name>``,
similarly to the convention used for setuptools entry points.
Note:
This function does not return a Python 3 ``__qualname__`` equivalent.
If the function is nested inside another function or class, the parent
name is ignored.
Args:
function (callable): function object
Returns:
str: identifier
"""
return '{}:{}'.format(function.__module__, function.__name__)
def _find(actions, name):
"""Find index of name in actions"""
if ':' in name:
names = [get_id(action) for action in actions]
else:
names = [action.__name__ for action in actions]
try:
return names.index(name)
except ValueError:
raise ActionNotFound(name)
| mit |
boddulavineela/mase | python101/code/widget_demo.py | 14 | 3669 | """Solution to an exercise from
Think Python: An Introduction to Software Design
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
This program requires Gui.py, which is part of
Swampy; you can download it from thinkpython.com/swampy.
This program demonstrates how to use the Gui module
to create and operate on Tkinter widgets.
The documentation for the widgets is at
http://www.pythonware.com/library/tkinter/introduction/
"""
from swampy.Gui import *
# create the Gui: the debug flag makes the frames visible
g = Gui(debug=False)
# the topmost structure is a row of widgets
g.row()
# FRAME 1
# the first frame is a column of widgets
g.col()
# la is for label
la1 = g.la(text='This is a label.')
# en is for entry
en = g.en()
en.insert(END, 'This is an entry widget.')
la2 = g.la(text='')
def press_me():
"""this callback gets invoked when the user presses the button"""
text = en.get()
la2.configure(text=text)
# bu is for button
bu = g.bu(text='Press me', command=press_me)
# end of the first frame
g.endcol()
# FRAME 2
g.col()
# ca is for canvas
ca = g.ca(width=200, height=200)
item1 = ca.circle([0, 0], 70, 'red')
item2 = ca.rectangle([[0, 0], [60, 60]], 'blue')
item3 = ca.text([0, 0], 'This is a canvas.', 'white')
# mb is for menubutton
mb = g.mb(text='Choose a color')
def set_color(color):
ca.itemconfig(item2, fill=color)
# mi is for menuitem
for color in ['red', 'green', 'blue']:
# Callable is an object that can be used like a function
g.mi(mb, color, command=Callable(set_color, color))
g.endcol()
# FRAME 3
g.col()
def get_selection():
t = lb.curselection()
try:
index = int(t[0])
color = lb.get(index)
return color
except:
return None
def print_selection(event):
print get_selection()
def apply_color():
color = get_selection()
if color:
ca.itemconfig(item1, fill=color)
la = g.la(text='List of colors:')
g.row()
# lb is for listbox
lb = g.lb()
lb.bind('<ButtonRelease-1>', print_selection)
# sb is for scrollbar
sb = g.sb()
g.endrow()
bu = g.bu(text='Apply color', command=apply_color)
g.endcol()
# fill the listbox with color names
fp = open('/etc/X11/rgb.txt')
fp.readline()
for line in fp:
t = line.split('\t')
name = t[2].strip()
lb.insert(END, name)
# tell the listbox and the scrollbar about each other
lb.configure(yscrollcommand=sb.set)
sb.configure(command=lb.yview)
# FRAME 4
g.col()
# te is for text entry
te = g.te(height=5, width=40)
te.insert(END, "This is a Text widget.\n")
te.insert(END, "It's like a little text editor.\n")
te.insert(END, "It has more than one line, unlike an Entry widget.\n")
# st is for scrollable text
st = g.st()
st.text.configure(height=5, width=40)
st.text.insert(END, "This is a Scrollable Text widget.\n")
st.text.insert(END, "It is defined in Gui.py\n")
for i in range(100):
st.text.insert(END, "All work and no play.\n")
g.endcol()
# FRAME 5
# gr is for grid: start a grid with three columns
# the rweights control how extra space is divided among the rows
g.gr(3, rweights=[1,1,1])
for i in range(1, 10):
g.bu(text=str(i))
g.endgr()
# FRAME 6
g.col()
def print_var(obj):
print obj.var.get()
g.la(text='Font:')
fontsize = IntVar()
# rb is for radiobutton
for size in [10, 12, 14, 16, 18]:
rb = g.rb(text=str(size), variable=fontsize, value=size)
rb.configure(command=Callable(print_var, rb))
# cb is for checkbutton
b1 = g.cb(text='Bold')
b1.configure(command=Callable(print_var, b1))
b2 = g.cb(text='Italic')
b2.configure(command=Callable(print_var, b2))
g.endcol()
g.mainloop()
| unlicense |
slaughterjames/static | modules/readelf.py | 1 | 1610 | #python imports
import sys
import os
import subprocess
#third-party imports
#No third-party imports
#programmer generated imports
from logger import logger
from fileio import fileio
'''
***BEGIN DESCRIPTION***
Uses readelf to pull the header information from an ELF file
***END DESCRIPTION***
'''
def POE(logdir, target, logging, debug):
if (logging == True):
LOG = logger()
newlogentry = ''
readelf_dump = ''
readelf_output_data = ''
output = logdir + 'Readelf.txt'
FI = fileio()
if (logging == True):
newlogentry = 'Running readelf against: <strong>' + target.filename + '</strong>'
LOG.WriteLog(logdir, target.filename, newlogentry)
subproc = subprocess.Popen('readelf -h ' + target.filename, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for readelf_data in subproc.stdout.readlines():
readelf_output_data += readelf_data
if (debug == True):
print readelf_data
try:
FI.WriteLogFile(output, readelf_output_data)
print '[*] Readelf data had been written to file here: ' + output
if (logging == True):
newlogentry = 'Readelf file has been generated to file here: <a href=\"' + output + '\"> Readelf Output </a>'
LOG.WriteLog(logdir, target.filename, newlogentry)
except:
print '[x] Unable to write readelf data to file'
if (logging == True):
newlogentry = 'Unable to write readelf data to file'
LOG.WriteLog(logdir, target.filename, newlogentry)
return -1
return 0
| gpl-2.0 |
Mellthas/quodlibet | quodlibet/tests/test_qltk_tracker.py | 2 | 3475 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import shutil
from tests import TestCase, mkdtemp
from gi.repository import Gtk
from quodlibet import config
from quodlibet.formats import AudioFile
from quodlibet.player.nullbe import NullPlayer
from quodlibet.qltk.tracker import SongTracker, FSInterface
from quodlibet.library import SongLibrary
class TSongTracker(TestCase):
def setUp(self):
config.init()
self.p = NullPlayer()
self.w = SongLibrary()
self.s1 = AudioFile(
{"~#playcount": 0, "~#skipcount": 0, "~#lastplayed": 10,
"~filename": "foo", "~#length": 1.5})
self.s2 = AudioFile(
{"~#playcount": 0, "~#skipcount": 0, "~#lastplayed": 10,
"~filename": "foo", "~#length": 1.5})
self.cm = SongTracker(self.w, self.p, self)
self.current = None
def do(self):
while Gtk.events_pending():
Gtk.main_iteration()
def test_destroy(self):
self.cm.destroy()
def test_play(self):
import time
# Allow at least 2 second to elapse to simulate playing
self.p.song = self.s1
self.p.paused = False
time.sleep(2)
self.do()
self.p.emit('song-ended', self.s1, False)
self.do()
t = time.time()
self.assertEquals(self.s1["~#playcount"], 1)
self.assertEquals(self.s1["~#skipcount"], 0)
self.failUnless(t - self.s1["~#lastplayed"] <= 1)
def test_skip(self):
self.p.emit('song-ended', self.s1, True)
self.do()
self.assertEquals(self.s1["~#playcount"], 0)
self.assertEquals(self.s1["~#skipcount"], 1)
self.failUnless(self.s1["~#lastplayed"], 10)
def test_error(self):
self.current = self.p.song = self.s1
self.p._error('Test error')
self.do()
self.assertEquals(self.s1["~#playcount"], 0)
self.assertEquals(self.s1["~#skipcount"], 0)
self.failUnless(self.s1["~#lastplayed"], 10)
def test_restart(self):
self.current = self.s1
self.p.emit('song-ended', self.s1, True)
self.do()
self.assertEquals(self.s1["~#playcount"], 0)
self.assertEquals(self.s1["~#skipcount"], 0)
def tearDown(self):
self.w.destroy()
config.quit()
class TFSInterface(TestCase):
def setUp(self):
self.p = NullPlayer()
self.dir = mkdtemp()
self.filename = os.path.join(self.dir, "foo")
self.fs = FSInterface(self.filename, self.p)
def tearDown(self):
self.p.destroy()
shutil.rmtree(self.dir)
def do(self):
while Gtk.events_pending():
Gtk.main_iteration()
def test_init(self):
self.do()
self.failIf(os.path.exists(self.filename))
def test_start(self):
self.p.emit('song_started', AudioFile({"woo": "bar", "~#length": 10}))
self.do()
with open(self.filename, "rb") as h:
self.failUnless(b"woo=bar\n" in h.read())
def test_song_ended(self):
self.p.emit('song-started', AudioFile({"woo": "bar", "~#length": 10}))
self.do()
self.p.emit('song-ended', {}, False)
self.do()
self.failIf(os.path.exists(self.filename))
| gpl-2.0 |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/email/__init__.py | 56 | 1766 | # Copyright (C) 2001-2007 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""A package for parsing, handling, and generating email messages."""
__all__ = [
'base64mime',
'charset',
'encoders',
'errors',
'feedparser',
'generator',
'header',
'iterators',
'message',
'message_from_file',
'message_from_binary_file',
'message_from_string',
'message_from_bytes',
'mime',
'parser',
'quoprimime',
'utils',
]
# Some convenience routines. Don't import Parser and Message as side-effects
# of importing email since those cascadingly import most of the rest of the
# email package.
def message_from_string(s, *args, **kws):
"""Parse a string into a Message object model.
Optional _class and strict are passed to the Parser constructor.
"""
from email.parser import Parser
return Parser(*args, **kws).parsestr(s)
def message_from_bytes(s, *args, **kws):
"""Parse a bytes string into a Message object model.
Optional _class and strict are passed to the Parser constructor.
"""
from email.parser import BytesParser
return BytesParser(*args, **kws).parsebytes(s)
def message_from_file(fp, *args, **kws):
"""Read a file and parse its contents into a Message object model.
Optional _class and strict are passed to the Parser constructor.
"""
from email.parser import Parser
return Parser(*args, **kws).parse(fp)
def message_from_binary_file(fp, *args, **kws):
"""Read a binary file and parse its contents into a Message object model.
Optional _class and strict are passed to the Parser constructor.
"""
from email.parser import BytesParser
return BytesParser(*args, **kws).parse(fp)
| gpl-3.0 |
NixaSoftware/CVis | venv/bin/tools/regression/xsl_reports/test/test.py | 30 | 1121 | import sys
sys.path.append( '..' )
import os
import boost_wide_report
import common
import utils
import shutil
import time
tag = "CVS-HEAD"
if os.path.exists( "results/incoming/CVS-HEAD/processed/merged" ):
shutil.rmtree( "results/incoming/CVS-HEAD/processed/merged" )
boost_wide_report.ftp_task = lambda ftp_site, site_path, incoming_dir: 1
boost_wide_report.unzip_archives_task = lambda incoming_dir, processed_dir, unzip: 1
boost_wide_report.execute_tasks(
tag = tag
, user = None
, run_date = common.format_timestamp( time.gmtime() )
, comment_file = os.path.abspath( "comment.html" )
, results_dir = os.path.abspath( "results" )
, output_dir = os.path.abspath( "output" )
, reports = [ "i", "x", "ds", "dd", "dsr", "ddr", "us", "ud", "usr", "udr" ]
, warnings = [ 'Warning text 1', 'Warning text 2' ]
, extended_test_results = os.path.abspath( "output/extended_test_results.xml" )
, dont_collect_logs = 1
, expected_results_file = os.path.abspath( "expected_results.xml" )
, failures_markup_file = os.path.abspath( "explicit-failures-markup.xml" )
)
| apache-2.0 |
harshilasu/GraphicMelon | y/google-cloud-sdk/lib/googlecloudsdk/core/util/resource_registration.py | 5 | 2073 | # Copyright 2013 Google Inc. All Rights Reserved.
"""One-line documentation for resource_registration module.
A detailed description of resource_registration.
"""
from googlecloudsdk.core import resources
def RegisterReleasedAPIs():
"""Register all official versions of released Cloud APIs.
"""
# pylint:disable=g-import-not-at-top
from googlecloudapis.bigquery import v2 as bigquery_v2
from googlecloudapis.compute import v1 as compute_v1
from googlecloudapis.developerprojects import v2beta1 as projects_v2beta1
from googlecloudapis.dns import v1beta1 as dns_v1beta1
from googlecloudapis.manager import v1beta2 as manager_v1beta2
from googlecloudapis.replicapool import v1beta1 as replicapool_v1beta1
from googlecloudapis.resourceviews import v1beta1 as resourceviews_v1beta1
from googlecloudapis.sqladmin import v1beta3 as sqladmin_v1beta3
resources.RegisterAPI(bigquery_v2.BigqueryV2(get_credentials=False))
resources.RegisterAPI(compute_v1.ComputeV1(get_credentials=False))
resources.RegisterAPI(
projects_v2beta1.DeveloperprojectsV2beta1(get_credentials=False))
resources.RegisterAPI(dns_v1beta1.DnsV1beta1(get_credentials=False))
resources.RegisterAPI(manager_v1beta2.ManagerV1beta2(get_credentials=False))
resources.RegisterAPI(
replicapool_v1beta1.ReplicapoolV1beta1(get_credentials=False))
resources.RegisterAPI(
resourceviews_v1beta1.ResourceviewsV1beta1(get_credentials=False))
resources.RegisterAPI(sqladmin_v1beta3.SqladminV1beta3(get_credentials=False))
from googlecloudapis.autoscaler import v1beta2 as autoscaler_v1beta2
resources.RegisterAPI(
autoscaler_v1beta2.AutoscalerV1beta2(get_credentials=False))
from googlecloudapis.replicapool import v1beta2 as replicapool_v1beta2
resources.RegisterAPI(
replicapool_v1beta2.ReplicapoolV1beta2(get_credentials=False))
from googlecloudapis.replicapoolupdater import v1beta1 as updater_v1beta1
resources.RegisterAPI(
updater_v1beta1.ReplicapoolupdaterV1beta1(get_credentials=False))
def RegisterUnreleasedAPIs():
pass
| gpl-3.0 |
openstack/murano | murano/tests/unit/cmd/test_manage.py | 1 | 9360 | # Copyright (c) 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_config import cfg
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from murano.cmd import manage
from murano.db.catalog import api as db_catalog_api
from murano.db import models
from murano.db import session as db_session
from murano.tests.unit import base as test_base
CONF = cfg.CONF
class TestManage(test_base.MuranoWithDBTestCase):
def setUp(self):
super(TestManage, self).setUp()
session = db_session.get_session()
# Create environment.
self.test_environment = models.Environment(
name=b'test_environment', tenant_id=b'test_tenant_id',
version=1
)
# Create categories.
self.test_categories = [
models.Category(name=b'test_category_1'),
models.Category(name=b'test_category_2')
]
# Create tags.
self.test_tags = [
models.Tag(name=b'test_tag_1'),
models.Tag(name=b'test_tag_2')
]
# Add environment, categories and tags to DB.
with session.begin():
session.add(self.test_environment)
session.add_all(self.test_categories)
session.add_all(self.test_tags)
# Create package.
self.test_package = models.Package(
fully_qualified_name=b'test_fqn', name=b'test_name',
logo=b'test_logo', supplier_logo=b'test_supplier_logo',
type=b'test_type', description=b'test_desc', is_public=True,
archive=b'test_archive', ui_definition=b'test_ui_definition',
categories=self.test_categories, tags=self.test_tags,
owner_id=self.test_environment.tenant_id,)
# Add the package to the DB.
with session.begin():
session.add(self.test_package)
# Create class definitions and assign their FKs to test_package.id.
self.test_class_definitions = [
models.Class(name=b'test_class_definition_1',
package_id=self.test_package.id),
models.Class(name=b'test_class_definition_2',
package_id=self.test_package.id)
]
# Add the class definitions to the DB and update the FK reference for
# test_package.class_definitions.
with session.begin():
session.add_all(self.test_class_definitions)
self.test_package.class_definitions = self.test_class_definitions
session.add(self.test_package)
# Create mock object that resembles loaded package from
# load_utils.load_from_dir
self.mock_loaded_package = mock.MagicMock(
full_name=self.test_package.fully_qualified_name,
display_name=self.test_package.name,
package_type=self.test_package.type,
author=self.test_package.author,
supplier=self.test_package.supplier,
description=self.test_package.description,
tags=[tag.name for tag in self.test_package.tags],
classes=[cls.name for cls in self.test_package.class_definitions],
logo=self.test_package.logo,
supplier_logo=self.test_package.supplier_logo,
ui=self.test_package.ui_definition,
blob=self.test_package.archive)
@mock.patch('murano.cmd.manage.LOG')
@mock.patch('murano.cmd.manage.load_utils')
def test_do_import_package(self, mock_load_utils, mock_log):
manage.CONF = mock.MagicMock()
manage.CONF.command = mock.MagicMock(
directory='test_dir',
categories=[cat.name for cat in self.test_package.categories],
update=True)
mock_load_utils.load_from_dir.return_value = self.mock_loaded_package
manage.do_import_package()
# Assert that the function ran to completion.
self.assertIn("Finished import of package",
str(mock_log.info.mock_calls[0]))
# Check that the package was uploaded to the DB.
filter_params = {
'name': self.test_package.name,
'fully_qualified_name': self.test_package.fully_qualified_name,
'type': self.test_package.type,
'description': self.test_package.description
}
retrieved_package = None
session = db_session.get_session()
with session.begin():
retrieved_package = session.query(models.Package)\
.filter_by(**filter_params).first()
self.assertIsNotNone(retrieved_package)
self.assertNotEqual(self.test_package.id, retrieved_package.id)
@mock.patch('murano.cmd.manage.LOG')
@mock.patch('murano.cmd.manage.load_utils')
@mock.patch('murano.cmd.manage.db_catalog_api')
def test_do_import_package_without_update(self, mock_db_catalog_api,
mock_load_utils, mock_log):
mock_db_catalog_api.package_search.return_value =\
[self.test_package]
mock_load_utils.load_from_dir.return_value =\
mock.MagicMock(full_name='test_full_name')
manage.CONF = mock.MagicMock()
manage.CONF.command = mock.MagicMock(
directory='test_dir',
categories=[],
update=False)
manage.do_import_package()
mock_log.error.assert_called_once_with(
"Package '{name}' exists ({pkg_id}). Use --update."
.format(name='test_full_name', pkg_id=self.test_package.id))
@mock.patch('sys.stdout', new_callable=StringIO)
def test_do_list_categories(self, mock_stdout):
expected_output = ">> Murano package categories:* "\
"test_category_1* test_category_2"
manage.do_list_categories()
self.assertEqual(expected_output,
mock_stdout.getvalue().replace('\n', '')
.replace('b\'', '').replace('\'', ''))
@mock.patch('murano.cmd.manage.db_catalog_api')
@mock.patch('sys.stdout', new_callable=StringIO)
def test_do_list_categories_with_no_categories(self, mock_stdout,
mock_db_catalog_api):
mock_db_catalog_api.category_get_names.return_value = []
expected_output = "No categories were found"
manage.do_list_categories()
self.assertEqual(
expected_output, mock_stdout.getvalue().replace('\n', ''))
@mock.patch('sys.stdout', new_callable=StringIO)
def test_do_add_category(self, mock_stdout):
manage.CONF = mock.MagicMock()
manage.CONF.command.category_name = 'test_category_name'
expected_output = ">> Successfully added category test_category_name"
manage.do_add_category()
self.assertEqual(expected_output,
mock_stdout.getvalue().replace('\n', ''))
@mock.patch('sys.stdout', new_callable=StringIO)
def test_do_add_category_except_duplicate_error(self, mock_stdout):
manage.CONF = mock.MagicMock()
manage.CONF.command.category_name = 'test_category_name'
expected_output = ">> ERROR: Category \'test_category_name\' already "\
"exists"
db_catalog_api.category_add('test_category_name')
manage.do_add_category()
self.assertEqual(expected_output,
mock_stdout.getvalue().replace('\n', ''))
def test_add_command_parsers(self):
mock_parser = mock.MagicMock()
mock_subparsers = mock.MagicMock()
mock_subparsers.add_parser.return_value = mock_parser
manage.add_command_parsers(mock_subparsers)
mock_subparsers.add_parser.assert_any_call('import-package')
mock_subparsers.add_parser.assert_any_call('category-list')
mock_subparsers.add_parser.assert_any_call('category-add')
mock_parser.set_defaults.assert_any_call(func=manage.do_import_package)
mock_parser.set_defaults.assert_any_call(
func=manage.do_list_categories)
mock_parser.set_defaults.assert_any_call(func=manage.do_add_category)
self.assertEqual(4, mock_parser.add_argument.call_count)
@mock.patch('murano.cmd.manage.CONF')
def test_main_except_runtime_error(self, mock_conf):
mock_conf.side_effect = RuntimeError
with self.assertRaisesRegex(SystemExit, 'ERROR:'):
manage.main()
@mock.patch('murano.cmd.manage.CONF')
def test_main_except_general_exception(self, mock_conf):
mock_conf.command.func.side_effect = Exception
expected_err_msg = "murano-manage command failed:"
with self.assertRaisesRegex(SystemExit, expected_err_msg):
manage.main()
| apache-2.0 |
stachenov/PyLeetCode | tests/test_trapping_rain_2.py | 1 | 75081 | import pytest
from problems.trapping_rain_2 import Solution
@pytest.mark.parametrize("heightMap, expected", [
([], 0),
([[2, 2, 2],
[2, 1, 2],
[2, 2, 2]
], 1),
([
[1, 4, 3, 1, 3, 2],
[3, 2, 1, 3, 2, 4],
[2, 3, 3, 2, 3, 1]
], 4),
([
[5, 5, 5, 5, 5],
[5, 2, 5, 2, 5],
[5, 5, 5, 5, 5],
], 6),
([
[5, 5, 5, 5, 5],
[5, 2, 3, 2, 5],
[5, 5, 5, 5, 5],
], 8),
([
[5, 5, 15, 15, 15],
[5, 2, 15, 2, 15],
[5, 5, 15, 15, 15],
], 16),
([
[6, 6, 15, 15, 15],
[6, 2, 5, 1, 15],
[6, 6, 15, 3, 15],
], 5),
([[1103,1106,1107,1105,1103,1105,1106,1102,1109,1101,1102,1107,1100,1109,1103,1106,1100,1106,1102,1106,1101,1108,1107,1109,1102,1100,1102,1103,1107,1105,1109,1102,1102,1108,1109,1107,1103,1106,1101,1102,1109,1103,1101,1109,1104,1107,1108,1104,1105,1100],[1103,536,101,990,966,883,872,180,1006,291,315,935,94,337,346,515,856,739,323,867,134,905,592,555,824,377,444,374,53,760,97,818,286,188,798,594,413,661,764,409,942,70,686,378,749,22,236,596,104,549],[1105,580,444,388,477,611,107,912,327,502,662,766,311,290,296,451,875,699,454,629,450,739,41,127,107,781,491,685,719,937,577,866,507,363,596,975,316,693,229,634,538,881,742,839,513,29,280,378,718,725],[1100,159,806,733,628,255,856,461,931,565,389,498,774,238,851,360,203,510,44,774,134,924,997,866,753,501,237,375,869,946,442,561,447,238,285,417,484,131,868,405,39,247,245,803,828,438,153,21,938,539],[1106,414,453,773,623,548,616,850,914,828,138,698,379,927,927,1006,334,753,480,193,500,509,782,735,654,600,515,149,964,796,679,92,552,474,207,517,365,814,358,621,632,838,309,353,756,578,350,432,321,820],[1105,811,671,740,888,315,330,746,454,636,532,475,718,426,292,268,934,647,72,634,610,46,462,909,389,560,478,81,983,141,891,940,943,904,670,173,209,991,909,1006,969,783,823,678,200,105,936,476,94,350],[1100,694,386,552,946,117,455,766,189,428,897,422,358,182,669,19,346,220,352,597,216,311,723,382,331,265,829,609,731,914,949,821,950,677,715,238,137,160,994,668,930,234,432,279,406,91,640,94,302,982],[1102,860,635,395,232,309,650,52,908,723,308,200,534,600,219,591,829,346,742,165,1004,14,389,779,283,786,860,265,870,152,589,894,1003,215,631,577,514,623,971,764,336,269,954,212,212,516,794,31,852,878],[1108,199,882,918,968,508,46,818,763,258,313,343,143,658,900,764,577,756,378,539,510,56,798,807,259,1000,313,43,373,507,263,902,696,135,162,1006,985,198,167,739,446,470,424,931,470,314,38,37,60,758],[1106,912,804,707,709,53,49,12,438,413,510,691,657,548,169,161,545,144,349,702,225,137,514,639,59,974,295,439,353,345,187,910,248,981,959,299,377,998,302,805,753,154,839,400,692,350,551,579,836,242],[1101,52,370,127,33,771,91,319,200,435,1006,377,687,244,700,636,534,67,624,178,215,368,322,396,110,356,736,1004,926,562,588,539,956,300,657,980,61,90,641,603,867,637,322,896,224,365,522,100,422,489],[1100,979,199,284,365,651,630,443,997,898,348,576,780,294,866,427,616,270,859,247,215,69,227,528,955,793,883,468,883,647,299,493,617,488,767,324,481,739,110,469,628,448,35,398,84,243,167,691,503,368],[1100,709,427,849,579,373,632,804,183,857,441,472,692,400,302,801,67,125,531,167,584,501,957,961,241,31,547,750,64,40,108,335,91,526,526,12,241,149,806,414,348,590,228,31,980,872,822,389,987,695],[1106,914,186,493,217,769,867,754,509,921,137,960,246,570,828,115,573,59,254,721,815,944,301,385,965,624,599,778,1003,928,815,892,832,992,727,40,103,584,136,603,496,263,553,84,824,723,189,387,772,785],[1108,929,720,742,304,27,356,245,147,701,163,953,583,338,935,301,720,28,227,846,973,65,100,868,140,914,581,671,643,695,799,83,614,861,815,260,878,513,495,16,205,649,959,130,977,236,773,687,606,991],[1105,570,46,965,780,528,221,352,542,206,389,331,280,994,182,437,244,50,293,82,408,840,73,357,960,40,583,724,69,532,57,934,92,445,242,214,964,453,908,496,650,288,169,272,272,693,51,858,733,334],[1102,132,164,345,831,467,375,757,181,786,279,228,711,713,663,943,917,969,738,816,807,730,94,318,344,708,1001,386,908,725,62,181,199,569,516,20,26,234,119,549,10,388,119,63,91,124,348,999,436,77],[1107,233,797,241,542,132,291,885,860,189,600,264,360,141,823,867,504,191,91,613,730,443,992,191,497,425,306,835,414,732,902,561,307,42,144,191,516,425,67,718,605,1009,972,307,493,786,164,987,319,597],[1102,392,31,276,573,870,692,221,695,96,295,940,1000,593,324,486,126,830,902,535,538,849,535,500,146,370,628,653,347,938,592,631,320,965,898,235,825,580,447,863,18,732,793,360,667,107,837,136,279,81],[1101,159,920,538,649,408,898,620,403,587,900,986,209,562,941,97,787,109,667,576,962,27,651,745,378,308,194,205,786,815,276,438,964,538,318,603,288,207,565,682,784,455,10,335,1007,293,422,137,392,431],[1103,344,449,344,431,169,995,967,364,771,772,982,551,726,862,860,672,492,409,227,164,183,25,516,861,374,800,273,501,182,47,547,869,838,881,290,997,866,600,351,980,362,675,521,79,527,371,93,361,122],[1100,516,648,677,374,499,42,164,114,885,689,151,422,548,979,646,180,966,854,770,659,824,475,324,336,896,193,49,979,545,162,631,403,800,299,119,641,683,274,745,558,305,887,323,843,208,959,365,165,803],[1108,166,970,943,833,296,181,368,687,150,255,191,771,1000,333,60,110,964,85,374,52,634,669,929,299,854,479,248,561,986,393,29,143,353,314,966,991,485,676,21,977,922,202,739,912,878,141,12,184,217],[1108,226,193,387,497,482,583,967,72,135,943,807,506,428,151,163,736,484,990,403,495,958,315,40,39,569,908,170,572,434,729,290,651,912,20,490,736,593,799,150,718,733,948,567,503,441,720,230,915,700],[1103,401,648,280,431,677,839,681,190,753,105,909,34,98,164,396,579,242,979,720,383,40,443,673,597,289,104,659,509,361,349,474,752,340,96,525,359,925,196,891,21,644,143,397,732,297,783,653,529,752],[1104,254,134,149,269,73,428,363,722,279,715,414,743,809,744,829,325,445,97,863,679,460,497,812,847,572,99,620,215,970,714,921,567,839,413,826,902,831,532,615,453,589,371,538,388,457,710,55,892,797],[1109,561,599,396,363,436,958,804,46,516,117,102,427,674,931,830,490,176,1004,364,133,447,943,494,327,322,941,27,719,175,166,618,79,755,1005,432,181,305,579,569,811,686,662,581,350,935,753,182,101,99],[1107,576,888,822,60,206,134,343,223,196,509,380,804,578,125,151,352,649,447,273,208,600,949,212,523,641,138,267,814,581,356,693,148,235,505,550,431,982,236,644,168,735,366,962,655,482,456,349,121,893],[1103,671,835,552,226,349,184,354,606,340,277,304,23,767,529,870,660,302,842,886,289,1000,963,645,305,608,117,751,947,580,986,550,594,811,93,810,502,619,506,450,949,773,745,314,883,616,174,533,261,359],[1101,540,349,714,175,996,312,635,89,601,557,417,494,141,571,929,941,63,538,437,504,829,553,591,133,778,197,649,653,448,998,404,330,690,108,496,28,762,473,108,705,20,515,189,152,76,108,435,482,988],[1103,976,807,758,557,282,526,96,922,169,887,910,563,207,942,13,45,961,117,508,59,164,871,916,344,13,335,794,438,807,773,643,125,570,391,24,195,907,110,107,418,339,359,323,889,644,326,924,595,785],[1105,996,940,636,902,626,639,579,762,419,376,525,405,843,438,786,857,623,36,310,72,796,639,773,110,518,407,426,785,992,554,550,330,836,528,575,804,509,144,556,918,863,72,313,696,852,442,544,817,820],[1104,879,606,825,994,706,334,392,475,461,726,371,353,47,197,871,612,991,370,98,889,630,951,303,934,638,145,718,172,952,880,1006,173,476,821,510,525,497,244,342,300,960,703,643,349,890,504,303,223,864],[1102,454,485,333,748,761,961,883,821,475,178,691,823,693,509,987,545,24,474,779,356,117,82,401,750,421,633,597,67,846,803,449,291,630,124,381,381,428,606,544,893,774,577,707,810,77,684,345,443,500],[1107,142,959,539,533,700,302,157,639,359,345,432,150,978,53,265,349,776,35,946,663,270,62,230,967,214,297,993,550,731,836,1007,215,137,888,738,179,180,237,808,530,573,231,670,893,626,277,233,392,302],[1101,45,563,573,618,872,778,905,208,670,978,386,19,183,513,897,264,683,67,491,833,939,406,54,952,290,22,219,865,757,864,376,144,769,291,752,983,411,648,181,423,968,909,432,494,765,671,100,790,81],[1103,613,10,330,10,952,962,22,514,817,769,368,535,904,127,168,646,100,570,636,624,983,947,875,758,431,630,419,873,410,842,796,14,843,468,366,137,420,378,641,579,138,351,456,384,468,615,20,911,175],[1109,877,500,936,742,248,709,715,10,572,467,842,358,471,27,817,179,507,579,548,138,149,28,480,595,402,290,552,764,543,717,753,410,560,31,495,798,730,200,150,644,657,335,993,471,704,152,640,201,73],[1100,330,564,548,152,502,940,432,44,695,318,104,790,718,654,812,555,794,532,97,935,167,745,612,502,558,306,996,540,850,59,61,522,966,599,664,458,882,438,492,567,98,586,347,807,230,149,704,15,24],[1102,292,533,879,246,25,427,894,363,309,734,764,360,246,720,302,252,168,174,33,651,731,121,579,420,270,800,912,965,157,926,99,791,449,968,27,816,385,911,521,684,988,275,387,576,986,679,171,144,843],[1106,137,916,1009,707,326,270,849,580,577,996,496,18,777,287,976,146,445,703,47,956,729,377,222,106,944,550,127,105,684,960,641,812,218,640,861,535,252,700,457,171,686,944,179,805,573,145,941,361,190],[1100,307,910,698,871,1006,984,411,124,79,438,426,62,592,635,692,443,512,287,133,959,800,161,245,970,956,809,457,239,512,638,559,809,538,599,23,886,573,776,1000,994,204,769,46,786,394,81,219,248,710],[1104,549,500,845,785,460,791,936,260,372,438,888,274,589,768,863,954,644,779,721,987,115,267,746,152,44,482,575,605,720,275,642,259,117,477,386,568,611,312,170,973,92,48,237,24,806,443,968,440,564],[1109,417,669,937,505,811,323,977,728,270,39,345,902,641,453,722,17,363,323,672,523,638,106,561,866,120,709,651,79,491,205,100,899,864,379,746,18,692,714,736,305,743,424,197,374,867,261,734,220,574],[1108,733,203,844,636,411,955,335,404,376,816,599,466,57,805,836,794,813,870,850,892,165,583,658,705,300,515,956,376,77,873,114,800,418,300,778,171,245,103,565,611,261,154,420,661,301,598,445,457,458],[1105,691,966,210,339,661,852,844,959,570,911,174,674,53,582,965,821,743,552,266,650,506,869,146,268,520,438,856,307,885,304,934,566,260,135,895,263,329,81,565,890,334,729,906,377,654,213,540,739,756],[1106,380,604,655,868,862,518,296,708,815,523,354,740,431,957,217,668,210,888,739,117,768,63,189,17,782,185,220,312,914,318,450,636,912,96,495,116,956,133,814,761,647,511,843,420,458,402,79,10,281],[1100,118,391,566,297,398,338,472,961,993,728,269,433,355,524,871,192,982,817,667,139,921,304,640,754,67,88,147,136,88,770,638,196,151,194,835,892,875,649,843,858,368,454,633,65,320,495,599,293,654],[1106,422,565,903,52,310,960,130,799,438,560,559,66,747,52,251,924,934,468,564,119,668,274,564,291,329,226,128,270,509,773,516,273,328,409,315,980,711,787,121,139,338,22,196,427,65,789,693,989,599],[1107,99,257,863,1005,890,534,221,1009,794,721,124,653,336,794,52,642,117,106,771,228,235,451,241,773,220,296,904,904,627,845,493,68,92,347,63,325,223,627,324,1008,690,790,651,16,574,45,648,33,141]], 353397),
([[4146,3361,1062,2376,4447,242,4925,3773,3558,1790,2984,4137,2889,38,1077,3621,3112,4922,1269,1215,4418,878,2938,4723,2085,3070,1263,1908,4642,949,3404,2345,66,3807,1292,20,2061,2205,3968,3338,4905,733,3367,829,1338,2465,1369,1312,4703,4517,2973,4231,808,4397,2412,3650,1921,4829,3724,2781,1763,2403,4565,2938,3191,2723,823,2485,1446,4029,3494,3646,3611,698,4312,2777,127,4608,894,44,4198,4376,1609,3350,1015,3183,2923,4697,2235,2562,303,1404,4582,837,4025,4472,4637,3107,3631,3841,2078,2436,601,3614,3705,7,2640,3437,3669],[658,19742,17542,16427,1868,16524,19847,17983,4262,18055,15884,15410,1682,15212,15551,15063,1715,16758,15547,18225,4495,17202,16513,18157,2483,18930,18527,15303,683,15158,18926,16010,3970,16738,17734,19621,2047,16035,19106,16611,2667,15792,18506,16836,175,17389,18007,15392,3884,15289,17199,18032,2637,15380,19049,17226,1020,16931,17283,15333,4048,15070,19689,17846,3674,15359,15732,18118,3075,18812,18950,17144,3343,18868,17502,16271,2299,19454,17965,18193,2961,15748,17799,18378,3456,18515,15879,19708,4982,19919,17307,15802,825,16309,17287,15860,658,19192,15565,18534,822,15804,18340,19726,2267,15027,15954,18810,2321],[3856,17062,4741,16111,2114,18709,3153,19876,3595,19577,416,18902,4809,17931,3045,15697,3412,17433,2586,18257,1796,18730,3236,19759,421,15519,1508,18758,3448,16296,4730,18863,4817,18442,4459,15199,3513,16580,2048,15299,3496,15542,3397,17855,2741,19088,1435,18539,3368,17053,4835,18447,1961,17926,2564,16604,2518,17273,26,19644,2385,19462,22,19616,504,17543,1077,19144,2247,15567,1440,19639,3319,19558,1897,17071,4758,19467,4351,18346,931,19737,2080,18001,1886,18949,2930,17347,2162,16317,3406,16964,4768,16691,2496,18627,3489,17917,3975,18519,3871,16652,1680,16220,3836,18636,2765,15180,2566],[4950,18078,1600,18461,2086,15139,2082,17143,1897,17387,4019,16211,368,15625,3689,16195,3385,15153,3899,19906,1240,19166,2853,17127,3230,16716,4833,16607,475,17794,4773,16236,1810,15154,3456,17942,2752,16755,1030,17337,4491,18552,3465,19310,1302,16364,3828,16024,1346,15244,4851,19180,3654,16850,2672,19394,1950,18646,2189,15963,2967,15418,2988,18888,4955,19772,3996,17012,3777,16992,3447,16667,287,19248,1955,18061,1641,15982,3638,15506,107,17275,2831,19681,3453,19895,722,15212,1458,17142,3345,15923,3194,18341,3823,18294,4540,18723,4260,19614,1659,18112,1843,19566,3194,18248,1399,19377,4121],[4372,16046,1235,15567,4847,19484,2869,18707,3979,17957,880,17621,4324,18097,1085,19144,4455,18289,4559,16024,547,16486,2942,17444,2761,18205,2869,18045,1182,18238,1026,17644,3558,16891,2762,17027,4922,19642,1356,19275,4996,15968,2136,17813,786,16497,4097,15137,2428,15144,178,19883,130,18674,805,16747,2353,15568,3631,18216,4943,15279,4138,15402,1522,19421,1734,16621,683,15277,3454,17918,2393,15953,4859,17526,2297,19692,4665,19400,4580,15512,2473,18449,1627,19636,3276,19868,4968,18541,1923,18468,2720,18987,3821,19213,1778,15572,1805,18928,2115,15347,4866,18098,4549,17019,1514,18031,1801],[270,15863,2797,15301,1835,19203,2558,17367,3526,19358,3628,16468,3222,17502,2790,19077,27,18632,2806,17482,1995,16225,3243,16371,2052,16865,1748,19506,4980,18392,3129,19803,4881,15798,4621,18367,1003,19641,4360,16336,3104,17886,2057,18066,1480,17724,4976,15497,4526,17129,3754,16773,2073,19935,945,15890,3369,18275,812,19194,290,19711,1255,15366,3568,16877,2943,18311,4359,18151,373,18551,1498,16844,2629,15871,438,17562,4499,18770,205,17794,1181,15939,4939,16895,54,16586,2588,17333,3268,15035,939,17296,3336,16450,2267,17820,2368,16804,1206,18896,2545,15665,1045,15139,2434,16423,4664],[4418,17562,3060,15037,2320,16809,3027,16571,4103,18635,1387,16778,2485,16502,1576,18491,520,17659,1994,16035,4223,15990,3361,15952,4158,16341,3560,19572,3560,17327,2719,15402,3635,17186,1536,16221,4540,19153,3605,17152,622,19255,1027,15632,1986,16156,2596,16572,3450,19350,34,17947,1894,17912,1010,15241,3602,18841,2667,18647,2399,17341,4613,19513,4331,18951,3911,17709,1602,18747,522,18081,262,19337,2039,15764,1913,18388,2605,17423,3489,17371,3995,16192,4853,19959,1636,16372,956,18157,1518,16155,4454,18507,2334,17058,853,17319,1652,15152,288,18467,4291,15756,4000,17577,4583,15885,4718],[1841,18065,4467,17535,2303,17787,3688,17624,1956,19161,2822,19810,1144,15265,1583,15888,1940,19962,3881,18758,4763,17194,4401,15399,906,18844,2219,17235,2184,16823,4800,17788,2612,19374,1900,16025,3230,18780,2171,16680,2470,16520,606,19162,4268,17046,1193,15480,3768,17243,1981,18252,1897,19538,1343,18148,1408,15695,4387,16215,640,17635,72,16396,2303,16436,4252,15484,2699,16018,1460,18295,3489,17094,1987,17495,2597,15511,2648,16365,4212,15158,3452,19819,299,18158,4296,18915,3140,17788,4230,19436,1478,17205,3820,16130,4033,17293,4952,19805,4552,19133,2160,17631,2379,16053,3006,16979,1512],[4017,15158,445,19311,4835,17203,468,18379,4323,15430,203,15482,4310,16132,558,15067,4671,19938,1765,15537,3792,15791,1955,19080,2402,16420,92,16241,1147,16141,3807,17293,4574,18331,295,15059,229,16424,2208,17848,4761,15107,3347,17065,1796,19344,3662,17552,3368,17054,4140,18178,1194,18379,1803,19700,3238,18822,2835,16802,2943,15159,1906,15954,4665,17367,2518,18736,2521,17718,1388,16010,1581,18230,4190,18820,4437,17090,3455,16223,1054,18299,1473,16446,3041,16506,2907,16758,682,17071,3817,16250,1455,15044,3501,16306,1214,16774,2216,19485,2222,19373,3827,18026,2104,15867,1738,16624,456],[3008,16445,700,18461,3025,15782,1939,19223,1071,19241,2039,15476,731,19984,163,17196,4882,18668,560,18449,1426,15620,1194,15505,1472,19303,1650,16756,2240,17519,213,19079,2679,17209,3646,17418,4336,17871,756,15337,54,16635,1051,16893,1419,16226,933,15834,733,17722,832,16883,35,16895,4418,15797,351,19190,3905,19982,1771,16137,177,19854,3014,17703,1100,18146,366,15398,1656,16674,3103,17573,2726,18337,1462,18292,3241,18257,1896,18887,4846,18434,4683,15303,2564,15041,3693,18625,866,17008,267,19470,943,17679,1091,15424,3302,15195,1885,15328,293,16075,320,18115,3876,15364,4868],[2087,17448,2415,19927,1119,17632,4576,16659,2817,16414,390,16495,4983,17967,4494,15385,4229,19023,1154,15305,436,16121,4428,18715,471,15394,101,18262,218,18929,4583,18461,372,16044,2769,16287,4457,19411,1703,18255,3380,17747,1047,18551,3643,19742,3937,19115,1435,18886,4429,18463,3534,15373,2722,17111,735,19658,21,18269,3519,16146,2704,15812,448,15680,2246,17041,939,16604,4536,18653,1786,16575,1767,18300,1599,17253,560,15268,1883,18521,4528,16996,317,15637,4929,19114,1195,19172,4953,17408,4976,17133,3958,16758,858,19610,4710,16604,4758,16671,3809,16968,1834,16314,1987,16420,952],[1323,15762,215,15448,3568,17790,4627,18087,1163,18769,4183,17912,2169,16221,3592,18751,750,17716,1445,18028,1344,17432,2646,16638,4347,19544,3468,16216,2253,15163,2541,19058,4695,15646,1699,18626,4002,16921,1898,18301,45,15569,167,19789,835,19683,2586,15527,3113,15804,1834,16447,2196,17985,673,18089,2707,18118,2919,19299,4429,16257,3985,15922,1701,17426,1574,15681,3887,16763,2628,18001,3502,17576,4413,19557,3226,17964,4500,16191,2303,18994,1947,18059,4623,18162,2767,18223,3055,17565,2914,17567,4281,15453,4923,15757,3117,17873,2278,17645,3631,19077,2822,16068,2038,18458,2891,16394,563],[3235,17502,3738,19063,2649,17133,1023,18185,4547,17038,4204,17375,1096,16031,4542,17832,2170,19540,1715,18561,3701,19402,2116,18828,1822,18567,4044,19192,2352,18933,2423,16415,68,19321,481,19694,2798,16719,3995,19701,1894,18548,1821,15938,809,17935,2281,15751,2004,16153,3465,16289,4677,17266,79,16295,2876,19604,2003,17921,2431,19070,960,18372,3773,18293,4402,18233,1801,15773,1337,19830,3,19860,3288,18097,2909,18050,3826,18920,1970,19117,480,17310,547,16871,3457,16120,2960,18886,1066,18139,1664,15471,4442,17945,3705,17946,4540,18744,1875,18281,4782,17246,2929,19422,86,15414,2417],[2266,19380,2472,16851,2333,19701,2836,17511,3919,18511,4622,17318,4868,16485,412,16224,554,17344,3892,15733,3751,16304,3798,16854,221,18668,2997,16379,298,15596,4939,18823,1325,17950,3182,16711,3071,16071,1649,17292,4251,15479,431,17680,4974,19004,725,16757,3534,15179,3531,18977,2062,17622,180,15266,2601,15252,2786,15482,2284,19487,577,17938,273,16538,3160,16271,4507,17882,3879,16777,4406,16624,1931,15374,1049,15878,2825,15132,1902,18649,2433,16053,728,16680,150,16738,648,19499,1433,19184,4175,18715,2560,15729,994,15703,3525,16919,11,19930,2129,17438,2466,18132,1895,18909,244],[3110,17410,3648,17295,1145,15862,2775,15951,522,18227,489,16514,4430,17194,2538,17087,2046,18740,4900,17995,3001,18655,2442,17293,695,16628,3233,18124,1095,17728,4016,18469,4340,18942,2366,18161,3709,15678,1480,18538,4222,15007,4004,16794,1378,19832,4311,17502,3679,16738,1480,19075,3155,15541,835,16936,4927,17184,780,18185,4122,18029,2170,17543,1678,16351,1160,17090,2154,15517,1787,18806,2452,15910,364,16230,1244,18194,3264,17136,1791,19964,550,18706,4907,19135,2988,18681,3851,17197,2029,16860,3290,19963,3751,18510,4505,17362,1089,15292,3928,16404,840,18821,3783,19414,1288,16583,1167],[3570,16701,2975,15090,2058,17320,4848,16527,2784,15821,3928,18161,898,17119,4047,18451,4839,18681,1443,17864,1561,15243,607,17400,4195,18779,4243,15309,1605,16531,3118,17512,1848,15711,1171,19898,3198,15716,3709,15585,2108,17004,554,16008,4778,16285,1039,17058,4112,16183,3292,19995,3170,16529,3091,18890,2565,17787,3031,19448,2558,15624,2125,17036,1291,15604,1959,15953,4701,18316,3632,18599,1195,15524,3255,18437,109,16930,249,17350,3281,16749,92,18296,4710,18484,4525,16665,3068,15279,267,17918,3134,15820,3105,19742,4959,15707,4209,15289,4853,18207,4374,15564,4018,15878,2224,19418,3486],[4561,17583,515,19884,953,19899,462,16579,1204,19561,749,19565,1914,18398,3350,15355,1983,19446,2713,15905,2255,19841,555,19286,3055,16319,3098,16576,3807,19395,4781,16741,2769,17958,2729,16355,1059,18240,1183,16348,1068,15915,3369,15451,3162,18207,243,15551,2242,18693,730,16995,621,16285,1057,15165,3286,16640,1740,17770,4513,19755,300,17571,1501,18664,2669,16697,1830,15632,4375,17228,65,16816,4879,17427,2679,15818,4622,16094,1203,18118,1793,17684,4201,16385,4462,15243,1465,19533,3143,18166,1117,19448,1169,15271,158,15407,2482,18397,1356,16730,1130,16732,2884,19100,23,18771,376],[1292,15033,2221,15908,3559,15397,2567,19570,4620,16256,2612,19117,1498,16182,3670,16801,2132,19058,1796,17225,432,18520,3883,16926,4360,15522,3323,16095,1883,17470,3718,15438,3166,15229,3059,18788,149,19657,4592,19915,3132,19727,1586,18795,1626,15262,913,15951,1615,17542,569,17280,907,18070,622,15348,4531,19797,4968,18055,4014,15075,3960,16312,2225,18833,2036,15539,4942,16910,3877,18475,2318,15807,1644,15937,1562,15910,2104,15965,1910,19002,1573,19164,3171,16311,3494,18778,4377,15423,4688,16503,3474,17874,4865,15052,3516,15243,30,18240,2654,18086,3860,19898,549,19233,2873,15631,4519],[4916,18850,2719,17641,870,18701,2359,15437,694,18787,4675,16328,2447,16301,2321,15527,1403,17581,3669,17315,4961,19616,4308,17066,868,15608,2636,19560,3113,15353,2537,18662,238,16783,4255,15127,1699,18377,1574,18635,1845,17570,196,16465,4408,18739,2780,16323,4208,18190,4914,16276,1948,15441,2839,19703,4560,16309,60,17742,1037,16856,758,18272,368,18347,4607,17294,2834,15853,3646,19024,3963,19320,2880,19794,3636,18581,2853,17757,2338,15066,1034,16866,506,16904,1814,17712,4192,19041,1885,16226,422,17559,2809,16249,4974,18559,4645,18438,1933,15935,1631,16294,1331,15748,1608,18387,4567],[1090,15847,3068,19621,1723,19360,3220,15747,4766,17990,2512,18088,3494,18076,302,15770,1089,17712,576,15237,4705,18566,424,19469,4357,15323,1327,16129,1612,16314,4007,17313,1603,18019,2337,15403,2508,19228,3459,16633,2639,19359,3403,18587,546,18393,2273,16688,3926,15766,391,16851,347,19077,1246,18341,4027,15127,3062,18638,2656,18860,266,16756,4696,18238,163,17244,3877,19964,69,16169,1323,19201,2166,18302,352,16697,4756,17890,2250,18239,1833,17932,3021,17461,3463,16330,2883,18453,3252,16815,3742,18561,4109,19995,4952,18358,1963,16059,4201,18677,2063,16781,3707,17089,1806,19032,290],[4092,16491,3720,19588,2196,18770,566,17402,707,18852,459,19962,1153,17848,453,18906,3634,15994,2368,17556,2685,15617,3043,19917,3599,15727,29,16084,889,16340,645,18025,4670,16398,1138,15198,664,17146,3767,19923,174,18897,4,19462,4958,16471,1322,15409,3956,16437,1521,17790,480,18458,4929,18792,3902,15894,3661,16451,3400,19427,1288,18316,566,16025,4103,16490,3457,19000,3118,17118,2736,16631,3554,18625,2604,17518,2160,16818,2393,16478,80,18942,174,16761,1687,18621,530,16455,2236,18014,964,19082,438,16204,1287,18637,3517,16201,4900,18683,552,17018,4182,15980,1884,15028,2462],[2512,18114,4017,15140,4156,19216,1513,15018,3868,18249,1945,16723,607,15752,4355,17652,2123,16445,3926,17057,2874,16751,3315,18305,1179,18655,4831,16816,3404,18917,2545,18408,2050,17131,1953,16566,268,17189,3037,19816,1301,19584,3432,18588,3676,16070,2947,18194,462,16903,3885,19581,2312,19552,2779,17409,908,15280,1174,16046,430,16279,3725,18766,840,15147,4621,15039,378,19793,767,18027,3876,19278,3697,16568,4949,17892,2303,16598,523,17244,1888,17772,4305,15668,3007,16458,1821,16914,891,19319,1846,15578,2726,19663,48,16658,2851,19704,233,17646,1588,18081,2506,17244,3443,19111,783],[4951,17951,749,16682,2111,17061,2502,15184,2767,17212,4564,15982,4874,17210,167,17372,114,17613,1382,17749,917,15594,4816,17081,4135,18772,3523,16346,3586,18021,681,17703,1340,17291,3091,17551,3699,18052,786,18810,116,19074,443,19395,425,18785,4948,19685,3754,19798,2918,15718,3940,15824,1608,17793,3451,15644,4859,18387,4244,16052,4033,18297,103,16990,4140,19302,3360,15436,4113,17649,1037,17281,1642,15102,1719,18696,1925,15485,2591,17534,108,17600,1079,17322,2214,18264,3393,16485,2902,17615,2541,15850,3800,15390,3588,17823,4335,18753,252,16309,2859,17620,3490,18611,3846,15040,93],[3146,15712,1385,17218,2792,19757,2658,19639,1606,19894,3082,19493,3378,15960,71,15776,1899,19443,2619,19698,4467,17494,4398,15020,4954,15637,4803,17887,3747,17548,577,17962,3542,18902,2612,18721,3167,15283,186,18226,3433,17799,1812,17549,351,17551,1673,15501,4751,18322,667,17685,2810,15895,4288,19627,2632,16371,4277,19499,4739,16211,3636,19999,704,15703,3147,15460,695,15774,4201,18414,1772,19508,1842,16092,2930,19606,739,16009,1944,18368,864,16985,2277,16571,1196,15903,1226,18949,2044,18002,3628,15452,1017,19933,1364,15424,1140,18551,277,15664,1311,19667,2077,19277,3359,17304,170],[2232,16946,3504,17635,3972,19959,1210,15616,841,17489,4489,19381,1315,19740,2503,18249,1844,15532,1148,18161,1384,17800,110,17631,4239,19140,4693,18512,1454,15215,2984,19212,283,18877,3653,17892,3374,16181,3772,18502,3541,15319,3643,18038,3400,15574,845,17187,1554,18754,2108,16156,2881,16299,4387,18202,2987,15745,4208,15357,860,19632,200,19725,2746,19134,4301,15451,1327,15093,774,15164,4472,16004,46,19280,4439,17713,3846,16685,3292,15598,3175,15812,1676,15150,869,15034,630,16705,1164,15492,3721,17479,3217,19609,4884,19259,3279,19242,3845,18998,133,17497,480,16196,2399,17569,3817],[1698,19918,2288,18488,3703,19609,2661,15744,1652,18419,296,19827,2843,17724,3815,16824,3178,19833,285,17803,2187,19763,1548,18874,3545,16365,2552,15828,1998,16166,2215,17883,1661,15783,4554,16747,1567,15643,1010,18008,2968,19124,2895,17518,1376,18761,4020,15314,4034,17689,2813,17863,454,16805,1171,19858,4359,15588,906,17905,40,17867,3892,16745,1867,15151,682,16840,3666,16901,1899,18470,3513,17039,2888,18176,239,18604,2831,15976,1233,16739,4628,19668,4262,17607,2445,16812,1937,15659,615,18481,3242,16559,4850,18445,1386,18261,1617,16288,775,16878,1799,15380,3530,17824,2027,19402,4441],[3638,16740,4648,19408,680,17289,1006,15809,3737,16310,2127,16992,4045,17121,2840,15895,3898,19833,4107,15579,3230,15552,2160,18975,2954,18552,4334,19428,2004,15462,2635,16612,4805,16074,249,16674,3873,16088,26,18604,2403,17510,2349,18979,398,18517,3849,17464,2976,19277,919,18963,1900,17983,4633,16458,1969,18581,156,16704,2054,15510,3094,15520,265,16458,4554,17802,3620,19649,832,16703,4090,16287,3632,19831,4380,15233,2710,18199,4907,16063,4920,18168,4126,15913,1982,15147,4577,18730,3934,15780,31,19512,2686,15581,4231,18100,2556,19911,2561,15983,4213,15030,3460,17628,2837,19720,883],[4472,16274,276,15213,3311,18474,4300,15428,236,18517,974,19440,1486,18682,3802,15616,3475,15296,140,15295,2124,15684,2970,18301,4116,15377,159,15286,520,19832,3029,17942,328,18136,206,17651,4680,18175,3153,17628,4811,15175,2863,17037,1696,17005,2193,18897,3469,16722,56,17621,4497,18336,1741,18230,3737,18958,4099,15703,1456,18888,246,16659,424,16329,2193,19462,785,15180,3198,19269,3766,15308,4761,19341,3635,15723,236,18367,522,15718,4582,17757,2600,16995,4486,16806,3019,15448,3277,19095,3474,15994,4044,18824,781,18804,300,17962,3398,16335,447,17133,1482,17527,418,18753,4753],[555,18920,1967,15373,1597,19657,109,15489,2652,18369,990,15880,3347,16939,917,17056,163,16913,792,19868,2134,15357,4069,17411,2636,18395,3938,19814,1792,18687,1464,19308,1843,19464,1314,15857,3139,16860,603,16454,1258,15292,1174,18587,1656,18126,209,17074,2411,19637,535,17575,462,15977,2807,15113,2772,15494,2207,19258,4673,17768,15,19559,2160,17773,1556,15430,4151,16497,1353,19164,3422,18790,3288,18676,514,17053,4437,19523,4411,17219,4759,17510,4354,18038,2517,15401,677,18194,4186,19900,169,17852,3792,19906,4912,18753,954,17312,1997,15661,4821,19016,3178,18301,4472,15539,2446],[1081,16823,4442,19242,519,17239,3311,16580,3500,18794,4026,18182,380,18506,777,16784,4167,19154,237,16903,3701,15612,2650,17075,1661,17198,4607,17551,3932,16294,2651,19681,4133,17808,2858,16415,3397,17124,504,19926,4383,18245,4364,15246,1329,15530,3393,18748,2881,15967,3326,15463,33,16664,1845,15046,4990,15411,3201,17388,736,16139,279,17203,4661,19700,289,15525,2065,17849,4412,18801,2800,16394,2423,15612,2283,18941,39,15961,2245,17267,2007,16160,481,18147,1251,15309,3199,19556,1585,17105,2164,18020,3632,16502,4973,16682,411,17410,3081,15713,1599,19396,4102,16949,4463,17182,3388],[3064,19125,686,18791,487,18378,747,18525,4344,15902,895,17707,4140,17049,4514,16584,3165,15133,989,19048,4586,19520,4076,16457,3194,19948,4840,19701,4803,16573,3857,15545,1077,18579,348,16881,3600,17726,2843,17044,4243,15167,1430,19291,3399,15138,2586,17033,2230,17765,1246,17626,83,16496,2684,16182,1766,17652,1770,16197,1581,17039,3815,17866,4626,15517,3581,16367,1995,18369,3457,15371,909,18014,730,19277,4332,17370,477,18424,1447,19496,2144,15917,4497,19853,3977,17125,476,16628,2316,18714,3781,17476,2018,19660,3769,17881,4489,15739,2112,19621,2667,19484,1177,18788,3260,16876,2281],[1573,18907,3765,15056,2851,18923,4756,15882,1975,16447,1793,18620,4127,19830,2566,17717,3725,19683,4375,19077,2132,18938,2686,15961,4570,19265,4846,17846,3750,17633,756,17712,4904,19478,610,18922,198,18854,3196,16378,2476,19555,4632,17519,427,15461,4845,17262,673,15113,2759,19680,4374,17375,3343,17552,2684,19556,1871,18096,2240,15184,3829,19560,4581,19580,130,18254,4299,17924,3211,19545,1476,19925,1077,17079,2664,17511,3260,16077,3102,18674,3749,18488,4682,19118,4890,19496,2015,18888,1693,15567,129,17536,1630,16153,2577,17094,3012,16457,3216,18507,878,17885,2028,19013,2522,18259,2990],[1614,16157,1944,19968,4960,17240,3914,15249,3573,18408,3200,17876,2782,19470,4923,15114,3242,16119,4698,18528,1513,18398,1919,16364,231,15674,2710,17063,3468,17708,4211,16755,3651,18283,4410,18998,4147,19613,568,16928,2491,19818,2789,17282,3748,18532,64,18711,2453,16647,3438,15224,1281,19088,2044,18391,4494,17815,323,19917,3007,17460,2044,18793,2009,16042,2633,18893,3528,19927,1563,15484,1539,15864,1180,15819,719,19768,375,19326,1594,16006,4856,19283,3984,15060,2417,19979,4870,19258,3977,19598,1458,16247,534,18145,1610,16729,3883,18000,4991,15980,2037,17310,3980,17189,1693,16430,639],[4847,17028,3829,17197,4131,17765,1875,17297,2007,18325,3867,17218,2751,17090,175,19162,4573,15025,3918,19321,2754,19031,2609,16934,4660,17468,3136,17177,395,17437,1301,16081,2621,17515,612,19459,2926,17321,4728,18916,518,19679,2170,16534,355,15112,4020,16320,2209,18675,355,17028,3436,19089,1807,17272,4100,18919,3773,19949,3273,19282,3640,18663,2055,15367,3439,17331,1866,15305,29,18189,3313,18838,143,18569,3505,16337,2486,19507,1015,15441,2503,15068,104,19659,3384,19321,597,19423,4811,18762,1026,19307,282,17380,2012,18377,1771,15431,4592,17261,1202,16239,2137,18187,1531,17673,3548],[2914,16032,2826,16485,1206,18534,2340,16590,3528,15924,1450,19382,4693,18115,1807,17410,967,18582,1347,19270,67,17792,1532,16473,1968,18213,3111,18403,3793,17117,4112,19079,3695,19493,1217,16013,575,15761,4549,19310,1726,17866,3175,19428,1177,18583,4856,15094,864,19875,3870,16799,2897,16097,403,16411,2885,19828,4289,17884,3199,17565,294,18924,1283,16744,4370,17341,2538,15998,3182,16940,4558,16865,3145,17067,499,17991,4589,16781,618,17638,3283,17897,3467,18555,1222,16700,2119,16395,3798,19433,3192,15178,2181,17879,2463,18682,1256,19776,4810,16012,25,18714,1869,18506,1081,19571,1535],[4108,15064,2110,16531,1179,15941,4929,19803,2599,15409,97,16186,3047,17733,3150,15501,3622,18450,1075,19712,2324,19682,2168,19151,393,18179,742,17388,2601,18491,1492,18784,3005,15199,1290,19334,876,19406,2946,18705,536,18775,2739,18399,1787,15032,969,17597,1895,18597,1716,18966,1789,16528,2997,19771,426,18021,651,15032,4291,16342,504,16786,3297,15879,1604,15800,2530,19713,343,15349,20,17116,2095,17409,2945,15803,763,15466,103,16813,1698,19156,110,15399,1495,18386,983,16168,3749,19427,4328,15222,4730,17037,2817,15398,2308,15576,764,15244,3988,15232,715,16975,2699,16817,908],[3994,15054,3993,18425,1063,19901,4404,17009,4416,18831,1727,19983,2624,19031,2174,16423,3035,19411,3214,18247,1902,17736,2539,16497,3143,16236,188,19750,2937,18110,1673,19462,499,17891,1236,15343,2940,17690,152,16994,1747,19221,4214,19360,2248,16861,4722,19376,2546,19228,313,18103,3843,15045,2116,17701,2326,18331,2950,17612,4351,19699,4720,15760,582,19694,3178,16308,4467,19254,1941,15612,728,19530,1119,16816,1235,17190,3587,19299,1174,16461,4015,19155,1237,16282,2522,18138,982,19423,3454,16567,3846,19402,4537,19919,1968,16985,2312,19460,2027,18502,80,18817,3864,19569,752,17337,2416],[4170,18448,2334,18566,410,19456,2788,18529,4674,19210,4503,18950,4886,17778,626,18012,3037,16024,2232,16361,1485,16067,4305,18564,4714,19938,4469,15505,207,19397,3085,16579,995,18485,3470,18761,54,15089,2492,19389,3396,19989,1302,19423,3569,17328,851,19220,4231,17941,1705,15502,3029,19077,642,19931,1780,19243,1069,15587,3746,18767,1685,18320,4195,17356,1332,16222,2901,17367,4226,18110,2988,15268,2136,18364,2471,15465,4399,15863,2827,19304,3974,17249,2633,19509,3326,19152,1347,19440,4667,15722,2977,15610,4034,16373,1401,15682,2958,16652,4633,16562,4485,19551,2059,16456,2909,15416,845],[24,19406,4002,15341,1005,18471,3485,19442,2734,16562,1503,15925,667,19534,1836,18174,589,17094,1799,19790,1127,16426,4491,17311,351,16334,386,16739,32,15167,2699,18918,228,15632,2382,15770,3023,19328,4720,18090,782,17671,1447,19077,2834,17934,774,18676,3341,16748,3157,17529,3024,19908,3810,15095,4562,16782,4766,19007,1531,15307,3333,15033,3637,17868,3002,18592,708,15657,113,17594,964,16572,2795,17656,2494,19768,4232,17028,3891,15282,1185,17730,3538,17919,3295,17175,471,15335,4912,15969,3224,16447,4518,19715,507,16191,4425,17890,1561,16756,1311,19588,2907,19974,1883,18375,1207],[4508,17930,782,17236,28,17983,4411,17558,3988,16865,3075,18022,1761,18397,4824,18783,341,18382,2369,16444,1020,15381,1466,19620,1436,17607,1085,16644,991,16188,996,19235,4967,17038,1695,17826,1305,17055,172,15717,155,17389,3747,18311,2601,17770,1661,15762,2767,17072,1418,15553,1727,19855,3389,17444,4184,17355,173,15843,2744,17621,1338,19348,2345,15235,38,18580,4140,15871,1338,16873,750,17358,3239,18190,2743,17326,3940,15382,4392,19427,710,17093,2387,16465,4372,18041,2224,15304,3088,19260,1745,16154,3331,19375,4663,15401,4910,16787,3104,16756,2772,18750,3775,15455,2794,17438,710],[1826,17023,1790,18976,404,16773,2643,18571,3797,15334,3444,19655,1184,17353,1440,16297,2159,19825,3859,17746,1971,18954,4819,18101,4594,16019,3497,18750,4997,15622,4312,19226,2692,18169,3397,18379,1649,16513,96,16898,2687,15847,1819,19762,609,19814,4156,19899,1717,19990,2516,15648,4211,19439,4119,17264,54,16951,1510,16959,2550,19028,1396,16208,882,19078,1000,17470,2599,19607,324,15437,4154,17062,3773,16301,3502,19365,1400,17859,1295,17622,1183,19396,4427,17200,2387,19947,2595,15411,4536,17240,2938,19173,2820,16409,2725,15185,3910,15261,1935,19102,1464,18762,693,18736,3883,15547,4196],[2597,15246,3603,18917,4371,17166,3837,17490,4037,19895,1440,16664,1862,18737,1749,17032,2243,18451,2393,15593,340,16389,2226,19529,2151,19630,2779,16947,4828,19198,2674,16932,4272,19033,3763,15775,2561,15904,2656,16527,370,15689,3704,15388,3146,19826,4642,18430,3710,17177,1371,15523,405,15283,2518,17570,3607,19621,1252,19129,2336,17602,779,15813,4498,16874,2054,15132,1209,19347,3413,19601,3719,19032,266,15721,3425,15384,2210,19856,2236,17256,1870,16007,917,19108,3826,17664,3388,18977,1992,19982,3509,18369,966,19282,4064,17138,2406,16487,2189,16798,3492,17690,2828,15930,2709,19001,1733],[3218,15884,247,17376,813,18219,4705,18177,276,19936,86,19445,158,19891,3677,15416,2938,18128,2624,18161,1870,18570,4749,15734,1058,19679,2275,17594,847,19955,1851,16098,2187,17432,2798,17928,1191,18306,4323,17854,3120,16851,3196,17471,2548,15393,1321,15730,402,16891,4827,16719,3237,18119,4613,17204,3597,16713,4826,18329,2881,16801,949,18646,337,17599,3159,19540,2171,16076,2721,18399,1785,19984,451,18935,659,16251,3736,16266,421,18659,745,19360,4772,15274,714,16829,1725,17521,4472,15702,3834,16048,406,18155,2750,15427,40,18826,1481,15877,3643,16168,3770,17716,2521,15112,4349],[64,18859,4233,18301,1785,18271,3141,19705,3541,15460,314,18241,1220,18422,2753,16250,2835,17175,98,15431,1184,17944,3782,19713,762,19683,1212,18179,2506,18190,649,19738,725,16837,534,18527,817,16032,937,16884,2153,18528,1573,18107,2006,15920,4144,16239,3596,18557,2008,16098,3269,18576,1762,17155,2305,16915,2555,18084,3009,17819,3929,16223,4127,19882,4467,16532,1901,16231,2418,18457,4091,17943,4274,17706,1453,18857,4283,15990,1447,19703,3590,16470,3323,19499,4529,16108,617,18917,3618,19361,2467,18697,1141,18588,4678,17867,4566,18930,4035,19251,2121,16480,3811,18096,3354,17215,3701],[4421,15903,2418,18569,3065,19046,55,18227,4463,18866,3914,16997,3825,18477,4141,18608,4263,15466,4010,15242,3781,16179,1857,19886,401,19774,4589,16021,1551,15941,1488,15319,3333,17730,1375,16564,4719,17200,3985,19725,4826,17877,660,18091,3157,16882,625,15462,242,18304,2905,18726,1466,18570,1964,18119,352,18656,1480,15647,1562,19658,2455,19468,2302,17848,3007,18968,1157,15220,401,18798,3897,17863,2726,15709,3297,17475,4766,15043,1545,17331,2314,18112,858,15080,3882,18817,4013,19933,867,15811,1115,16188,1341,16729,1845,19844,3237,17879,3124,15725,4271,19855,418,15441,2928,15255,2553],[2199,17271,1966,15003,3044,18776,4175,19363,766,16813,3910,18315,4899,15457,4349,18545,723,16627,1434,16552,4551,17295,4883,19658,1744,18051,3643,15384,3960,17016,4674,17581,3967,18631,175,17062,2551,18880,4499,19371,2457,18632,4256,17655,3756,18776,3019,16068,2382,18237,3633,18225,1569,18514,2119,19728,57,15160,1130,18516,2437,17489,1043,19213,3876,16968,4553,19461,3139,18263,1415,15261,4143,18073,965,19431,2058,16070,1250,18231,2309,18776,3359,15860,3808,18745,3039,15949,3241,18529,3,17927,3067,19735,85,19326,48,19885,899,19546,3719,17298,2945,18453,2658,17108,3606,17906,1983],[3130,15312,2139,15168,2639,15866,244,18792,3049,16359,4511,18231,3022,15381,3861,15174,4757,17261,4719,18433,4453,15514,3709,19630,481,16483,4436,17317,3660,18303,3498,17699,2390,16863,1202,17937,778,16417,4014,19710,1222,18196,2453,16685,3629,19186,1889,19996,1626,16421,4084,19794,3718,17528,4827,18047,3690,16293,1067,16738,4471,19363,772,16868,2094,17515,4813,18956,4469,17890,3358,18979,4557,19546,4560,18147,4368,15407,4409,19560,2387,19914,4173,16417,1557,18250,4240,19983,276,15016,2548,15881,1843,15709,459,18616,4225,17041,932,17256,3853,18076,4173,16773,3886,19529,1314,15444,4284],[742,18431,4023,18586,1406,19764,9,19646,4120,15331,2326,19499,2465,19961,2820,19438,1600,15646,809,16576,4034,16373,2674,18783,980,18003,2740,18953,963,17545,289,19298,3164,17632,1307,17886,3549,16284,4258,16131,4892,15416,526,18273,1758,17303,1116,17553,2394,15259,2090,17214,699,15108,2136,16358,2765,15928,4770,16629,4205,19123,2032,16104,156,15462,4822,15231,3972,15110,4819,17836,4900,16737,3066,19602,3135,16010,3828,19003,767,18573,3316,16340,3500,15209,3563,16884,857,16798,3065,19755,2808,16608,4468,18733,476,17967,4815,16956,1414,18391,2210,15247,3788,18755,4744,17387,572],[3634,19108,187,15598,2146,18510,3469,19770,3325,18303,2639,19554,4455,18864,937,19456,2457,19528,2178,16925,142,16702,3515,16258,528,15941,1543,19196,4780,15215,1859,18758,2875,15379,1874,15472,2803,16083,4512,17057,1861,16772,3235,18714,4833,17608,1522,16358,4977,19842,1559,16291,4144,15512,718,16155,3815,16679,4637,18303,884,19715,3260,17682,1072,18298,3094,16868,1769,17665,2947,18018,4960,17207,4619,16193,1684,18055,71,18027,38,16955,3599,15380,3468,16804,685,19527,3002,19270,3500,15225,153,19538,239,19437,3232,18649,4780,18048,883,17282,3530,18944,2118,17058,2153,17101,3484],[2565,19569,4284,19069,2610,15609,297,15016,3113,15612,3674,17699,4419,17268,2378,17968,2598,18383,2576,19936,3363,16109,2874,19511,4920,16042,2560,19593,1084,17793,2361,16362,3400,19068,2581,17203,1485,16713,914,16041,481,17201,50,15911,4499,17425,4891,15713,854,16810,4341,16390,3721,17634,1306,15708,4997,18770,2943,15394,3586,19420,2465,19542,75,16623,411,19657,1607,19734,1136,17557,1492,16272,4487,17701,3540,18001,2292,15785,2067,19156,1604,19225,2990,16337,3988,18545,1200,17009,868,17468,1921,18857,1008,15652,4215,16726,3385,18935,165,19129,4405,15604,2385,17351,1692,17077,949],[3101,19000,355,16759,1218,19811,2120,19608,3867,18604,172,16130,1506,16535,1307,19758,138,19067,4193,18963,2064,17791,1142,15168,1543,17395,4906,19909,4246,16276,835,15298,959,15625,1371,17751,3420,15900,3352,15422,4342,19443,753,16084,4594,17239,122,17410,3840,15735,2890,17521,1942,17269,1570,19611,1784,16258,1223,17528,2182,18583,2381,17631,1440,17587,4078,15524,2857,19920,2912,17286,4720,15527,565,15489,478,15329,2934,16485,888,16792,2020,16470,1801,17302,520,15216,3976,15019,1043,19055,203,17854,1401,19341,3276,15529,50,16474,1301,17240,2703,16504,3332,15698,3649,18750,1869],[2797,16495,2799,16449,3369,16781,1762,17654,1692,15675,673,18858,4492,18976,3232,15464,175,19973,4526,18423,3134,16963,3161,17725,2279,19049,33,19710,1079,18367,3035,18873,3618,16324,1312,17298,591,16953,215,17421,4438,16969,1980,19772,4091,15808,1993,19477,2694,15826,1419,19716,4870,16659,4148,16011,3811,17592,4655,17942,2443,19587,1184,15931,4716,15109,4668,16192,1417,15801,2975,18070,4843,19868,2821,15819,2126,16684,1011,17255,3752,19990,1011,19937,870,15641,3180,17563,2652,19549,3671,15458,968,16458,3154,16551,469,17008,467,17963,3396,15380,1619,16443,2393,19284,4304,18099,207],[3279,19580,1204,15951,583,18746,4140,15052,2227,16959,1416,16558,1510,17058,4290,17072,688,18130,4893,15656,1739,15129,169,17890,1156,19295,4399,15982,1241,18254,3852,16237,355,18781,3491,19290,2658,19574,490,16337,575,19232,909,18028,2338,19121,4377,17607,4173,16749,3051,16741,1679,19853,2815,16229,4542,18486,3849,19531,4973,17984,764,17346,2953,19447,1722,19004,3920,16391,3513,18411,29,17223,3429,18958,4395,17050,3629,19163,1845,18115,1067,15059,4370,18785,2446,19310,4485,18844,3370,18459,1600,17157,4971,19723,2200,18981,1139,18949,491,17624,4554,17880,22,18112,4336,15355,2945],[3784,16527,3878,17289,989,17298,867,16411,280,16008,376,15668,4453,18379,3574,19284,3521,18737,4933,15036,2578,17360,1181,17753,771,16102,2573,19102,188,18156,569,15429,1152,15915,3659,17938,2285,19875,1047,17000,3248,18987,2483,16498,3597,16647,1476,17671,4209,17175,738,15260,3769,17201,1610,16239,4315,15993,4625,18535,3558,17946,1218,17994,2996,18110,121,17173,4938,17647,2077,15425,3386,19157,1656,15690,4521,18020,4358,15605,1859,19477,370,15782,1196,17189,4586,16905,1837,16361,4834,18731,2346,17933,447,18710,3688,17601,2268,16176,2867,17716,3244,15294,1239,16684,2894,18528,2019],[1008,15335,3221,17141,1763,15760,591,19870,643,16234,3292,17218,2040,19156,3521,19063,1460,17162,3768,19037,4445,17726,1311,18470,782,19796,549,18075,1251,15014,823,19928,4399,18160,11,18965,87,19368,1220,19263,275,16887,1017,16629,2360,19216,2460,18421,4004,15876,4876,19960,4351,18899,723,19814,1753,18584,2086,16417,1487,15627,730,19871,1395,17056,2528,17206,3810,15773,401,19271,1608,17115,4846,19865,1292,16281,900,19519,1301,18331,770,19274,4400,18354,1939,16269,2105,19387,322,17256,3704,18607,2768,18638,787,16365,2515,16355,1761,15376,4416,17971,4983,18527,1246,15233,1596],[718,17370,945,16328,3722,17295,359,15353,3515,19551,1002,19193,1734,18099,1970,19663,2919,18437,2226,17844,310,18983,2850,18405,4542,19786,3265,18110,1150,16445,4961,15982,2616,17287,1621,15998,2433,16107,1885,18825,4515,15880,2783,16842,3407,19070,4570,17022,4060,17126,3621,15719,422,17533,2839,17443,588,16478,413,17906,724,19955,4837,16522,128,18438,4051,17141,345,19052,4110,16406,4564,15625,1594,19733,3625,16320,3930,15195,4580,19716,2429,15853,3828,18271,4503,19695,3180,18193,4501,15546,1699,16689,1970,17485,1474,19317,2398,15707,4371,17738,2053,18108,1394,17911,2253,16996,211],[2896,19578,1659,19818,683,17467,1808,15184,3287,17108,4350,18568,1664,16467,4050,18137,3121,16547,2223,17565,3196,18125,1993,18750,297,15550,2656,15486,1878,18700,4639,16078,2433,17837,2607,18831,3676,17200,3857,18120,1679,19771,3302,19940,4758,18784,2176,18565,4457,18535,3992,16141,2601,15103,15,19758,3699,17459,2915,18852,4640,19381,384,15356,4763,19050,2787,18126,3520,18644,4400,18123,1173,19091,3470,15692,2617,15868,4268,19834,372,15118,564,18759,163,19524,4949,19046,1277,16852,1558,15423,3201,15554,1298,18975,729,18614,2834,18580,3651,16514,4185,18442,1941,15215,26,19767,800],[3183,18696,1939,16848,1755,16186,3685,19617,1985,19616,2974,16445,4133,19154,4565,17467,2680,15255,2504,16363,813,16496,947,18340,1976,16181,1440,17237,4458,18839,28,17804,2784,17977,4115,15039,2993,16092,1559,17369,2772,15638,1420,16858,1132,15479,1486,18886,1192,17261,981,17703,2399,17472,3099,16326,3092,17164,577,17278,2370,16749,4519,16445,1688,18400,3884,16487,220,19698,3872,19946,1367,16832,3514,19910,1539,19945,4208,16242,2787,16670,2935,15462,1512,19279,3956,19747,3735,19634,462,15512,4838,18112,1157,19977,3684,17509,4915,18409,1680,18279,3602,18318,1839,18576,955,19171,1385],[1689,17288,3346,18382,769,19459,2490,19355,1323,19403,1539,18379,1619,17091,1545,19172,688,18965,4935,17747,4935,18220,1087,18629,832,16105,2058,16144,3164,19186,2167,15147,4031,16401,2161,18512,1411,16277,908,16853,585,19586,3867,19155,1609,18168,2088,18942,2340,17890,1678,19785,3262,16789,4291,18979,970,16186,4904,15965,3375,16153,764,16816,2669,15615,4910,15010,2760,15844,3168,15550,4263,19781,4634,17235,1357,17774,3443,16253,4808,16683,86,18054,2169,18541,3568,19845,3889,16104,4440,18898,2158,17731,1606,17325,1836,16887,3891,16209,1832,18347,2354,17623,1965,19712,743,19178,625],[2074,18108,1526,17134,4118,16556,3598,19722,1463,16253,3533,16928,3814,19794,4135,17284,988,18649,1517,15406,2988,15036,4349,17154,3852,18378,2847,19319,19,17471,2315,18479,241,16388,3015,15910,4447,17513,2282,17287,3763,19345,4416,18333,4898,18046,1290,17474,1684,18660,3203,19262,898,16426,2369,19909,1440,19529,429,19364,3818,18779,4431,15083,521,15194,1953,16617,1758,19867,518,15665,2479,15337,350,17434,3,19711,3415,18846,1527,19345,3893,18931,3708,18701,3116,19855,2860,18893,1018,15961,674,19803,1710,17445,4338,15987,3838,17084,2457,16533,2888,15988,1688,16500,2063,15446,1482],[2260,17572,3806,16825,2853,15957,1587,18321,3646,16492,4128,15014,569,19754,876,19860,1966,15073,3585,15241,3833,15706,2998,17258,1778,19482,343,17737,4964,16941,2178,19290,465,15627,86,17738,368,19715,1193,17612,1122,16711,2780,16676,347,18504,2970,15962,2080,19211,1482,19002,546,17412,3342,19313,4438,15386,2063,19220,1070,15740,4267,16898,3653,17341,4664,17722,4692,16515,1365,15944,279,19672,659,19562,208,16482,2705,16135,1339,19619,2671,17768,3297,18176,2547,15545,974,17049,2397,17217,1821,15746,3794,19654,1040,15944,407,17656,3537,15454,152,19642,2738,15005,557,19315,1690],[1505,17698,3393,17123,2490,19445,4714,16763,823,15851,4527,18486,2171,15704,1551,16082,4825,19663,3817,16991,445,19143,1448,16643,4108,18581,3075,19537,651,18793,4946,16742,1631,17577,3591,19557,753,17061,4692,15104,4096,18069,2104,17158,4243,15488,4984,15253,134,15993,3551,15136,790,18288,2270,18229,4790,15511,4320,16937,1491,16910,4088,15802,2452,15213,2188,17417,1543,15652,4636,18127,923,17897,3663,15450,2932,19678,1729,16542,1201,15514,1147,19672,4235,15397,1381,15933,2148,17025,195,18808,3628,18803,4822,16077,3118,19486,2676,17312,3798,15921,4813,17678,919,18136,1972,16676,1172],[4136,19219,3792,17708,81,19618,1435,19991,1402,15502,3790,19005,3494,19247,85,18618,4100,19792,3402,16549,2647,16510,1863,16353,4075,15499,772,16299,2529,18861,1867,15276,4278,16770,153,19807,2248,19158,2124,18894,2210,17231,2646,16024,4118,18719,2803,17877,4169,19494,3933,16344,866,16460,3269,18374,4064,16394,3478,18436,1985,17066,3091,16922,3460,16347,4063,18804,4844,15329,560,17509,4127,16757,2688,19148,4111,15415,3328,19477,1894,18735,3206,16690,723,17555,3129,19682,1120,17070,3970,16495,400,19275,4287,16690,3948,16834,2392,16149,1887,18527,1598,15540,1919,15453,1116,19200,1889],[3241,17255,4551,18206,570,19819,2146,19501,2021,18683,1867,19643,470,17253,3220,17927,4250,15647,1326,18838,4150,19360,4011,19565,407,15046,4374,15025,886,18922,166,15726,4864,15632,1932,18125,742,19572,2598,15228,3645,19445,2609,18979,2621,18412,2276,19462,1857,19837,4913,17367,1921,17176,3007,16271,3867,19665,3508,16338,488,16880,3384,16135,3800,17764,2632,16524,4898,19182,4265,17865,2776,18196,4859,19216,1684,16078,1752,16563,2579,15721,2200,16585,1296,19642,3606,17227,3039,17391,2304,15395,2521,16177,2961,18627,1053,15927,3970,18551,697,15360,274,16344,542,18010,641,17874,2238],[422,16684,2149,15272,1991,16616,3421,18351,4948,17387,2608,17324,1381,16691,95,17666,1948,15759,4534,16832,1025,18805,3715,16415,1815,19290,1738,17256,389,18136,2325,15807,1273,18556,3091,18072,3573,15684,726,18596,500,19108,1929,17125,4031,17275,138,17946,4092,18709,3805,17309,1270,18660,1502,17993,3314,17866,4433,19476,4345,18874,4769,17093,1031,17784,4407,17960,1139,18707,2426,15910,1070,16861,103,17059,4087,18488,4305,18871,3249,18979,3884,16416,2735,18756,3151,15740,3608,17906,4751,19991,36,17348,3653,17336,1604,17881,308,15957,1167,18114,2573,15057,1840,15226,4339,18268,3975],[123,15880,861,15110,2764,19157,850,19070,3442,16432,4067,17523,393,15775,11,19263,4923,16522,943,19231,2737,19831,1222,17397,4289,18648,1068,19916,1181,17794,282,18696,2073,17397,2178,17779,1355,18138,3371,15286,4393,17835,4568,16058,1519,19143,1644,17435,3394,19205,2334,15204,2407,15876,4024,15515,370,19711,2429,16298,4566,18641,2976,19985,2216,18610,730,16090,1548,18916,1989,15130,959,15574,3948,18748,4204,16308,3465,19740,3768,16114,202,15478,2506,15980,3535,15269,2652,19977,3746,16241,1660,18750,1288,16405,320,15283,1343,18204,37,18438,2487,15213,4527,17121,2634,15137,4136],[2866,17379,4903,18096,4404,19874,3549,17645,1582,15448,1859,15870,841,15648,860,19329,965,17732,955,15353,132,18842,2434,15196,4236,17286,4491,16242,2403,17019,1061,18479,4497,18523,4071,19236,4460,17927,2021,19059,4476,16653,2345,16272,3140,15870,2830,16762,4877,19840,992,15479,2273,17781,353,15938,167,19678,4653,19209,758,16872,10,17203,4764,18841,114,18957,1326,15596,2970,17216,2571,16201,2910,18398,1977,16509,3040,16201,1809,17146,3922,18869,1394,16808,2818,18146,4672,19211,3173,17556,3129,17527,1006,18036,4263,19501,4835,19547,3532,19390,2220,18440,3379,17139,3013,15357,193],[1802,18939,1417,17040,4270,18720,1925,17964,4976,18327,442,18538,2510,15594,1926,16818,153,17428,815,15335,74,17957,247,16321,1361,17049,3271,16494,2405,17787,3859,15563,3074,17336,294,19019,1576,18047,3875,17481,378,18434,4877,15021,2853,18335,3079,17865,4039,18974,3661,16215,2137,17720,4406,16023,4000,17520,2230,17306,3254,18387,1563,16654,509,17128,3899,16797,1983,18006,1396,19746,2620,17952,616,16465,2932,19859,2427,15368,1619,16977,4744,15386,4903,16221,2483,16534,617,15234,222,17199,1806,16467,4153,19258,3871,15478,2036,19461,4361,19997,3751,15515,45,18600,793,18286,3352],[323,18591,3288,16077,3706,17892,1825,16350,4960,16335,1084,15156,3627,19207,1286,18019,4305,17278,671,17708,4925,18288,1272,17016,4564,18903,2054,16751,2934,19690,4926,16925,2524,19252,3499,16992,4013,19352,3702,18381,1685,15891,1436,15650,1520,16261,602,18322,3028,15132,1060,15623,2251,15105,2643,18474,2870,16520,2841,17569,4809,18414,1020,18592,1012,16586,2922,16421,743,17740,4993,15150,4463,15507,4454,17074,4785,17340,1133,19909,2667,16921,115,15569,2875,16872,2326,18038,1327,18612,2556,19801,866,16743,4311,15804,4757,19107,2763,18375,4613,19858,75,17433,2012,18573,1122,17606,1215],[2140,18962,2437,17541,2961,19695,2913,15987,3357,19289,317,17505,637,15695,3593,15594,4156,19244,3285,18310,1684,16399,4989,17437,4065,15906,4251,18387,651,19582,105,16905,1617,16829,705,17226,479,16001,1339,18457,4779,16873,3917,15907,1630,18781,3347,18720,4088,19447,211,17643,985,19346,3951,17960,4316,15636,4973,18185,3208,16990,932,19147,4899,15118,2556,15820,1377,17375,2391,16283,1346,17701,4553,19053,110,16330,4518,16202,1017,17092,417,17321,3374,18898,3668,15903,688,16139,1941,16142,3676,17636,2607,15340,4237,15044,2186,17027,638,17584,3322,19483,3709,15832,3978,18343,3222],[1101,17113,4321,15633,3735,18330,1871,15820,219,19604,2099,17811,282,17305,1622,19796,3601,17832,4805,15880,4494,18602,4988,16372,3629,18511,3360,18305,4027,17541,150,15499,4603,16202,592,19815,1454,19183,4121,18244,1499,19613,1014,16835,3305,15631,1081,19141,173,15565,2978,18427,3540,15342,3798,18570,317,16595,2740,19993,3259,19645,1707,17040,1167,18815,597,16790,2621,15406,2131,17797,1748,15503,2243,19053,3824,16116,969,19866,2199,15815,2433,19736,3264,16768,3868,18610,195,19479,3740,19408,2999,15557,4813,19112,1340,19185,1043,19533,4686,15580,1725,16513,2753,16385,1062,17302,1773],[2227,15203,669,17588,207,18945,2781,16210,2443,19794,2621,15193,1268,16569,1858,18565,3205,16994,2790,19237,3426,17816,2217,18350,2024,18338,3821,17089,4162,19832,2556,19977,4286,15580,4494,17937,627,17540,4492,16400,820,16349,2117,19774,3674,16706,1316,17006,486,15037,3492,18689,4874,17386,205,18202,1209,15767,3666,15465,4877,15247,3564,18004,541,19065,3206,19383,3402,16390,2571,15232,4651,16844,214,17856,2733,15505,3579,18398,4780,17080,94,15104,271,19697,3205,17753,140,19821,1832,16806,1316,16679,2590,18840,1822,15184,3673,16039,1599,15028,2364,16137,3074,18217,2642,17544,3616],[242,19346,4871,19818,4782,15373,1881,16413,2561,16009,3914,16693,2623,16091,4589,18364,2738,16627,4305,18895,3155,18395,3080,19492,2262,16577,332,18205,3163,18953,956,18244,4008,15726,3303,16866,4375,19409,2405,19537,2286,16311,3778,19280,673,16186,4015,17713,922,18152,1420,18870,2473,16273,2544,18684,4853,15552,2614,15917,4373,15248,1753,17067,1191,19651,4268,17389,1625,15748,679,17715,2650,18893,3236,17307,2770,18609,1513,18181,347,19009,2305,19997,1160,19874,3546,17761,1203,15963,791,19867,3461,15701,3802,18854,771,15513,4735,18673,937,19846,3009,18240,3917,16804,1281,16789,1848],[2043,19600,3578,18733,1550,18325,1577,19847,2879,16400,4594,19710,3957,16550,1673,15518,2107,16552,1561,17328,177,15821,4108,18131,2400,16564,165,15984,2878,17368,1498,16046,144,19888,869,18751,1530,17839,2421,16263,3754,15920,2466,18289,2965,18216,3975,16775,340,17024,1242,17531,4820,19597,489,17136,3076,17765,1352,15777,13,16186,1476,19749,4040,17896,3185,16453,1410,18637,4632,15712,4962,15392,4173,15337,1408,19759,1786,16933,2962,16253,3305,16235,4133,19783,2403,17800,4469,16399,4797,15168,3416,17070,696,16557,3169,17659,4456,19636,4106,17157,3664,17400,4427,17156,3780,16285,205],[4445,18349,2558,16946,365,18139,1779,15093,3794,17064,1912,15691,2071,18185,1843,18208,1628,19699,1369,15992,1476,18475,3840,17460,321,16068,963,18841,2979,19397,2003,19899,2098,19205,4610,19711,2214,18766,1389,16960,1345,18786,3349,17297,2433,18828,3643,16351,4142,19097,752,18606,3800,17382,4884,16076,2418,18777,4675,17679,4379,18614,1144,15102,2470,15545,2657,17659,121,18934,4145,16234,1066,17479,4080,19845,4230,19488,1785,17622,1883,16864,2861,18650,1419,19623,1436,19102,670,15804,1174,18220,228,19001,72,18384,1978,18634,537,19848,3219,17823,1024,19191,1994,16232,1233,16755,2071],[692,17125,4607,18399,1265,19641,571,16809,2451,15008,3786,17236,4442,19286,4842,16479,696,16356,4445,18032,2065,17604,603,16143,499,15046,4887,17635,3209,19490,3362,18585,2713,15195,2039,16520,4719,16185,3736,19308,4357,15497,3183,17004,1610,18868,2061,18576,1785,17205,776,17440,1768,15725,1370,19476,1423,17031,4566,18891,2496,19684,4915,18312,3164,18738,4571,19608,1318,18589,3701,16752,4036,15846,702,17897,2131,17551,3559,18169,327,15703,1741,15221,1851,17156,1321,17216,1530,15709,3420,19573,3277,16130,4094,15821,4883,16450,3720,15350,2667,17063,2287,18164,3217,19083,258,18502,503],[3460,19936,2708,19238,3725,17636,3532,17003,4728,19604,1347,19442,3185,17715,2577,19478,1158,15448,1076,15229,232,18605,4789,19083,2792,17257,1398,18723,3828,17369,3325,17750,1803,17584,4223,19017,3443,16953,4879,19945,1568,19599,3399,15679,2487,17190,652,15364,4276,17097,4493,19430,1278,16392,4462,16901,4564,15921,710,18816,2878,18210,44,16396,2186,19240,1414,19313,1075,17444,2747,19495,4599,15450,2384,18937,1004,18718,3452,18143,2661,16857,3906,16443,1918,15112,690,15960,1154,16041,4843,19816,2059,18782,4091,17794,4773,19812,2931,16695,3615,17955,3023,17086,539,16484,294,19019,1562],[1764,15648,391,18100,1912,17937,4627,15774,2116,16412,3797,18637,183,17010,4504,18686,1647,15703,4944,16900,2242,17926,4270,17261,782,15421,3939,18162,1828,16950,2012,16494,1753,15639,3974,17980,756,16363,3208,17554,3519,17603,930,17516,2951,18252,3788,18735,4597,15235,2198,16342,180,17264,3175,15106,428,19368,2218,19895,1025,16181,2594,18056,3777,17413,4634,18623,1578,15040,1403,15881,2056,15657,3669,16397,775,17872,228,17041,337,15481,4581,19351,1402,18291,2215,16543,2264,16335,2831,17574,3250,15433,282,15169,4513,18298,4248,18104,4423,15942,727,16575,2351,17299,3321,19101,4686],[951,19873,1349,17261,2021,17950,4436,19283,1246,18781,3187,19381,3467,18933,546,16133,3819,19092,423,17624,3128,19172,3958,17515,33,15881,760,15105,1621,19959,3218,16216,1699,18388,3002,17909,384,19462,2254,16931,4840,18790,132,19683,4178,15308,3684,18163,3261,17136,1928,18479,1719,16036,1562,18514,1145,16009,3891,19813,426,18643,4279,15044,3311,17167,1045,16270,1536,18230,2010,17789,373,17248,3294,15394,661,19412,2574,17578,3764,16260,562,15397,4608,15415,1713,19310,3970,16260,2199,18593,222,18858,1344,15997,625,18342,2251,19165,2388,16876,2226,18013,4676,17883,1828,15657,1874],[4664,18792,2918,18959,537,18152,3566,15680,2205,19667,1037,16807,1982,19843,3026,17248,1190,17827,1666,18064,491,15823,262,17770,1082,15126,1114,16847,1190,17729,67,15807,4053,17004,1969,16501,310,17266,3126,15872,2833,15398,1798,16448,1378,18994,3982,19819,4708,16061,3240,15651,3910,19605,4450,16956,2997,19986,16,16030,1877,16174,3769,17879,3681,19529,4893,18892,3216,15257,3884,17134,329,15901,257,16141,3563,15313,2304,15346,4880,19195,4495,16948,739,19065,3467,18599,199,17687,4336,16882,1175,17831,2918,15402,699,15974,3188,18826,4199,15607,3897,18437,3872,19685,3204,18186,1509],[743,18640,245,15972,2945,17168,774,17097,3659,19667,3941,16934,437,18705,4550,19171,2445,15776,1747,19435,4175,16555,2545,18832,3422,17772,2603,17852,2537,19363,3677,17505,1552,16350,1443,18994,325,15846,205,18283,3034,18352,418,18226,4891,15387,3862,18189,4491,17525,1000,16815,757,19461,311,15394,3131,18243,4011,19593,2745,18053,4999,15145,1180,16392,4206,17047,3047,17791,4885,15186,2498,18252,3015,19525,4188,17936,1745,18337,2657,16795,2880,18465,1418,15620,4782,16480,3189,17727,1408,19402,192,19810,241,16659,1382,17958,2382,17981,2398,17626,419,17124,2401,17360,1143,16564,2860],[4950,17848,2434,19695,4961,18378,492,15649,656,15521,2799,19841,904,15832,1118,16757,843,17480,4088,15576,3686,18141,4294,16730,384,18641,1700,15619,3229,18960,2237,17460,778,19669,3280,16710,1484,19731,1519,16124,3091,15644,3142,18752,2678,19187,4542,18348,2936,15092,288,17895,4140,18562,4448,18694,2629,19691,4002,17912,893,18151,810,18576,3835,16529,3198,18285,17,17445,2395,19495,1921,16713,4003,16868,3074,15241,4012,19717,4480,19534,1580,18561,1028,17991,4457,18508,4324,19775,501,18663,469,18303,302,16027,3199,17336,3412,19916,4054,15691,4036,18933,3561,16136,2128,17380,2790],[1048,18394,4729,15473,3332,15428,2772,16084,993,15448,4288,15717,3377,15890,1049,18939,788,17606,1653,15038,1939,17549,4451,17605,476,15615,625,16543,452,15020,436,18135,388,18612,296,18784,1237,18288,3044,18062,1565,18384,3569,17811,1853,18176,3356,18367,1138,17825,4152,15066,3899,15749,3974,18478,3050,19221,2694,17456,4173,18096,2113,16291,2146,18733,1791,18249,2864,19763,4991,16346,1059,18166,840,17690,2352,15997,3741,16983,1182,19071,3131,19975,701,19501,320,15757,2568,16702,3958,16416,2311,18072,3856,19323,4486,19416,1316,15663,1728,16508,1574,16411,4700,16752,1698,17323,3710],[2757,16185,3176,16425,1427,16515,1224,18397,3409,16224,3706,15142,488,18953,2378,18077,3361,16089,2904,17314,390,15823,3907,19978,3654,15807,4281,19859,2341,17127,1878,16234,1255,16653,3722,18091,1610,18728,320,17254,3926,17776,1699,18347,3816,15378,2435,16751,3221,16175,1069,19162,2949,15063,4982,16628,4978,18950,940,15033,3697,18797,3174,15761,1162,17358,4352,15444,4251,17568,3826,16497,4411,18452,849,15299,2891,19281,4003,17128,4657,16568,2603,15050,4230,18171,2771,17637,4897,19116,3529,18531,2435,19844,1679,19415,4109,17664,4378,15142,977,18316,3244,16123,86,16585,1997,15191,4634],[4658,18566,77,15919,947,17255,1206,16003,4417,18542,3474,15060,1498,15154,4451,18494,3450,17337,3656,15512,4925,18444,3378,17818,4386,18329,4532,17742,2192,15123,1869,16122,3156,19859,2902,16394,932,19992,4143,18498,3865,17732,1846,19360,941,19560,2831,16860,59,19816,2738,17715,3351,17900,2237,15115,3248,19876,3770,19182,4097,17102,2565,18329,3959,16473,3209,18508,2346,18820,1140,19367,3791,15813,998,19797,965,18613,1188,16069,3225,16725,1956,15058,4076,18029,4024,19383,2517,18363,1583,16080,4787,16462,3269,16057,555,18306,1505,18346,3153,17619,2175,18353,1880,15665,3925,18526,2967],[4580,17272,3405,17846,2704,19798,4837,18910,2634,16006,1306,19284,1036,16766,3662,15632,2703,15631,126,16527,2854,17980,4858,18197,800,15180,1872,16898,2811,18838,4407,17617,565,17118,1562,16992,3778,19424,1163,15159,2093,17073,4883,15908,3922,16161,1827,17251,2663,18191,2827,17731,2385,19538,1008,17085,3185,15869,4039,19799,4141,17196,3777,16312,1593,19178,4251,19265,2682,19425,4065,17059,4489,15892,2354,18591,4608,19246,2995,19779,4164,19281,2652,17841,672,16084,1212,18946,3184,17676,4092,18197,2880,15019,2811,19182,1752,17492,1441,19769,3364,17042,3636,15796,4161,17163,1912,16090,4352],[240,19430,2272,15047,548,15365,261,17064,2005,16576,4207,18663,91,15013,735,19489,236,16356,444,19229,4807,15818,3293,15010,3133,17681,3756,15747,1296,17040,1270,17499,2005,19252,1266,15681,1342,15362,615,17025,3116,17542,3248,17106,1602,15419,4844,17873,773,19254,2502,18368,1606,18964,4217,19547,16,19392,3374,15447,1870,18163,2458,17606,4840,19789,1396,15064,2547,17379,3027,17044,1214,15071,4347,18685,60,17254,3782,17913,299,19330,4700,16284,451,19831,1919,19539,2650,17157,4067,17289,2657,16172,4219,19352,4387,15176,141,19592,1083,17794,1375,18106,827,17726,1368,18077,2223],[1903,16235,1168,17197,2698,17505,3153,16596,4304,15531,3833,16013,466,15895,1203,15587,2927,18333,2071,16163,2540,15972,4655,19136,1467,18629,3896,18904,1822,17259,3840,17920,1894,18996,1378,15430,2721,18812,2268,17885,448,17780,3055,18760,921,17953,2303,18291,4191,16068,1668,16834,3352,17767,3087,15881,3013,19788,317,15080,4384,15464,406,18568,2321,16807,2970,19175,4618,15549,3195,16472,4855,19185,1781,19383,210,15576,4407,17451,3132,17700,1555,19221,2811,17732,3678,15896,3825,18351,2105,18469,3937,19148,1080,17018,1904,19550,1235,15564,1555,19295,3647,16917,4454,16484,3460,17051,3041],[4689,18369,1711,19074,2597,15940,4387,15549,4066,19321,2793,15394,76,15621,1505,15895,2506,16206,1150,19183,3316,17937,3891,15723,154,19959,3552,15843,1950,18705,507,16976,3629,17754,2635,17321,788,17866,3828,15817,3518,19755,175,18487,2897,17425,786,15819,3075,19433,2213,16074,77,19379,1770,15208,4259,15402,280,17260,4037,15106,3519,16882,3455,17488,2538,17454,3797,16088,2548,15758,2768,18252,1352,16522,1355,15142,370,19314,2956,19440,2110,16700,3195,17009,811,17681,3820,17883,806,17530,3975,17280,1431,17702,2231,15241,2028,18583,4220,17515,3199,15943,1110,18456,4104,15031,1385],[602,16615,3647,18104,771,19720,2873,19740,4205,15689,4458,18899,2544,16229,900,16300,2974,17802,2072,19039,2326,16554,4097,18665,4663,15465,1970,15111,179,19591,4982,16785,1432,16008,3427,16060,1408,18096,4126,15553,1536,17554,4024,19537,320,18165,3589,19346,3722,16267,1266,15304,3243,19239,4014,17642,2999,15908,4761,18905,2473,16065,2906,18808,4234,16049,4361,18727,4418,17298,3134,19793,4670,18712,117,18210,3097,18291,3276,16090,3308,18456,2977,17763,2207,17653,4026,19531,2313,18217,356,18479,3715,18557,4884,17407,2356,18374,4076,18004,704,19143,912,17219,1750,18829,3426,17852,695],[3456,19390,4120,16521,208,19216,1069,18459,644,19794,3338,19081,4157,18148,236,15748,2792,17853,2440,16523,2030,19396,2580,17230,4014,16429,2574,19646,3310,17631,1711,19902,3870,15757,4175,19505,3536,15338,4243,16564,1154,16315,3482,19887,4434,19701,4648,17130,3380,19262,1278,19996,3976,18154,3824,16120,4505,16239,760,15994,3418,16512,1514,18603,1968,18133,1947,19468,2090,16265,2155,18224,2510,19129,4502,16996,3138,16738,3084,19898,1719,19868,545,15888,2833,18506,1574,16635,3613,18730,46,16830,1064,18130,2998,17330,1286,17061,3225,15830,1318,17576,721,18167,392,17763,4068,18244,4631],[1115,17358,117,15319,1889,18822,1684,18727,4526,15498,1782,17871,3459,19277,76,19583,2397,17276,542,16359,1069,16252,1329,18050,3186,17448,4120,17329,2820,19991,3499,15343,120,15843,651,18316,1559,18943,792,15581,2467,18055,835,17196,4623,19856,1576,16299,3480,19559,2072,17868,2334,18034,1999,15065,1574,18389,3849,18460,3215,17879,2376,17506,4050,17041,2510,16106,2369,16399,2399,15359,3925,19462,3577,15482,4634,19907,4033,18825,80,19712,184,16601,118,19528,2918,17023,1848,16143,4659,15644,304,15636,3404,17336,4405,16881,55,16282,1246,19576,4445,19197,315,18840,3689,17102,4377],[2637,16548,676,19029,3281,17977,3663,18240,1145,18790,323,17193,228,16879,777,16555,1625,15414,4134,17064,1585,19806,1629,17611,3956,16380,2032,19233,3143,19126,4608,19422,1417,19679,4357,15229,1398,17797,3576,19774,355,18733,794,19703,4401,15379,2083,18432,3320,18538,1113,18185,402,15714,726,19861,3787,19755,2890,15968,1652,18422,113,16719,1850,16527,57,17596,4947,15071,4981,19254,3494,15417,1772,16044,2022,16269,3155,18363,2876,17437,3241,17847,4005,18293,507,19475,1259,17887,4739,16268,4998,16165,571,18959,3458,18966,2010,17064,1715,19934,2937,16104,2108,19321,2651,16314,908],[276,17599,268,15155,1541,18029,1270,17566,1027,16276,485,19237,3792,18460,3564,15341,3151,16880,3252,17269,1204,15490,3060,18293,4234,17337,4058,16383,3438,15775,1449,19001,4338,16328,4375,17154,3735,19868,3,16724,1355,19467,3909,19138,3125,18066,248,19621,2784,15132,4834,16568,3397,16763,1945,19289,3289,17188,1484,15601,694,16338,256,18560,363,17467,1247,15239,4820,18880,4309,19744,2090,19457,4460,17256,1349,18847,2419,18194,551,15980,2999,18573,3784,16897,27,16718,3746,18453,1668,17314,1623,17268,2851,18960,3791,19272,4141,18284,2730,16831,968,18761,2637,18782,678,17105,1272],[3580,19431,523,15399,3967,15494,179,18900,3822,18187,4842,16669,1125,16623,3000,18452,3087,16134,4495,17907,3440,16406,1104,16572,850,15120,1980,18536,2423,17478,1194,18948,95,15033,4590,19689,4713,16150,2452,15734,3195,18003,2677,15082,4362,16477,467,15066,2614,19385,2930,17240,3455,17421,2028,17190,4928,19108,2058,17752,2101,17623,3953,16048,2386,17338,1512,16636,4286,18558,3455,19423,3147,18578,2465,15261,1408,17042,3097,16315,4173,17663,3469,16207,4044,16526,3652,16047,4395,19446,4520,17983,2507,16438,4392,17395,4042,16651,1791,15411,4259,19380,2541,15087,4427,18716,1878,18490,3831],[3620,17563,2032,19121,4851,19935,1751,17866,3770,15329,1262,18218,1803,16521,2196,15374,2758,15475,2832,16953,3231,15882,932,15141,4214,17651,3783,16708,1341,16095,2203,18094,4283,19642,2304,19635,4983,15621,2290,17230,1778,19215,757,16366,436,15328,4522,15946,3445,17674,4331,15890,4945,16347,1445,19735,4798,18213,2067,18398,4518,18115,4071,17382,4373,15975,296,15183,2415,18084,2629,15061,384,19998,488,19072,2773,17014,3606,17425,4688,17921,2473,18055,2236,18479,854,19974,926,15718,4956,15109,2912,18244,976,18144,4199,16370,2904,19706,2236,19958,402,18789,4590,19804,2562,15042,3166],[1892,15784,2142,18778,4234,15371,78,16992,1634,19885,112,18703,1267,18980,3004,19559,2755,16041,3293,18819,636,18610,4419,16867,1190,19075,3933,16195,2485,17043,3065,18328,3531,19127,2085,17880,3223,17096,465,19633,1356,17231,3368,19406,4250,19195,3535,15609,947,18336,4533,15291,4941,15072,3502,18334,4651,18298,4520,16595,4082,17316,4675,17774,3161,15283,4630,18554,4248,15366,2539,17566,1450,15029,4595,19176,1873,18254,3698,15272,4258,18390,3945,19805,4682,17567,3508,19595,1633,15234,483,17487,1352,19666,2188,16953,3001,17406,4246,15751,3813,18169,3380,17307,3725,15457,4222,17569,2009],[4141,15728,3568,16146,4073,17827,321,18717,3196,18805,3382,19763,2954,15777,1971,17514,1791,18793,4569,19024,4848,16455,2046,16900,3998,18153,360,18233,2587,18605,2004,18425,2607,17185,801,17432,2766,15325,2472,18548,4393,15513,1000,18354,4132,16944,2392,17793,4293,19029,2397,17350,928,15143,4289,18929,580,18135,2293,18083,4679,16253,4882,16701,79,19291,4858,16865,1435,18192,3992,15978,4168,15350,3839,19400,3978,18012,706,17659,4106,19678,4592,18783,2716,18142,219,15601,2567,17868,1869,15704,729,18018,1218,16266,3118,19746,3323,18527,2625,15807,2169,16279,4914,17091,4769,19341,1983],[3785,15440,4252,15247,4973,15280,1185,17799,2143,19102,216,17392,3446,19650,4634,19271,714,15972,3644,18143,4515,18031,3745,15269,1335,19786,3278,16513,2317,19936,1825,16118,3245,15715,1208,15428,1446,17644,3739,16184,3224,19814,2285,16978,867,19103,4694,18594,1676,16422,2392,16625,1971,17099,241,16797,205,18466,2176,15893,2149,18763,3616,19862,1966,19712,4967,19037,3406,19725,2080,19511,3595,16393,3708,17090,1565,19210,1953,18707,987,16065,117,17364,2305,16645,1315,15416,2878,17219,4904,16414,3471,17670,3157,15885,4446,15925,3137,15402,365,19350,4643,19915,556,15204,2493,18175,2849],[765,18173,596,19604,442,18945,3263,16056,1292,15040,2359,16992,804,18373,4053,16088,4145,17125,1084,16717,3119,18074,3838,17221,1238,16144,3817,16509,4996,18131,763,19407,2333,17740,4878,19998,3834,18768,2936,18599,116,16498,2998,19985,3127,18047,3820,16684,132,18963,1146,19248,1401,19064,2062,18277,3740,15710,2350,18674,1046,16600,4379,16127,1607,19691,2002,16779,4452,16164,4170,15209,4873,16058,3210,17392,1322,17175,4610,15845,2272,17072,1142,15400,1700,19587,4183,19860,920,18358,2162,19246,4451,18895,2951,18488,1347,19304,2889,17813,4058,15463,4562,17608,339,16939,317,19428,2745],[1021,15541,1059,18225,2699,19699,1855,16481,712,18249,4325,15961,3480,19607,3989,18098,3157,15946,3618,18665,2206,16804,1979,15493,4213,17831,1216,15383,3255,18235,1992,16237,3676,16361,4375,16126,3193,17612,2174,17172,948,17885,2317,16316,1145,15778,1032,15912,638,17504,3896,18501,1629,16448,2621,19943,1927,15687,2524,15764,2626,15387,4836,15939,1565,18904,3948,17197,4768,19382,2174,18788,719,15393,911,17246,3726,15117,1431,19722,777,17792,1722,15054,1632,18251,2931,18643,4341,17846,3287,17561,116,18643,4408,18233,2800,16811,2866,15599,2827,15814,4449,15118,2052,16339,3295,18307,500],[312,17992,4545,16251,846,19370,2967,15754,1975,19652,771,19180,1433,16119,3270,16104,4157,19267,4996,18057,4001,19013,2745,18834,4278,18220,1761,17830,1487,19523,4116,18434,2022,19871,634,18844,2188,15044,1632,16307,331,16974,1413,19961,777,15951,897,19356,777,17088,3691,18034,2333,15404,468,17218,1764,18813,3799,16651,2730,19271,922,18945,2897,18660,2279,15350,2687,17559,1126,19951,2883,16020,1378,17505,3947,19342,120,15353,3602,15959,4753,16225,1763,17240,2220,15644,2360,15923,2665,15383,2601,16802,2841,18977,2285,18477,3907,15120,4923,18259,2584,19620,3643,19355,1888,18254,4421],[3218,15064,3132,19868,2435,18783,4822,19950,4243,18762,3826,16098,4233,18924,3305,18042,1865,15537,3240,16436,4436,16679,4895,15462,3070,16773,1035,16811,4270,18306,2664,18438,1292,19085,2004,18728,3475,17160,1298,15238,3782,17145,1021,17900,3628,19225,706,17670,2263,18067,2952,16617,4574,17621,4530,19680,1771,19259,2803,17907,1410,18490,3294,17925,2826,15091,451,16516,3265,17969,63,17354,1291,18809,3797,18208,1751,19730,1679,19734,3083,18590,2204,16357,1342,19669,2038,19464,1978,15747,1963,18438,1375,19030,4880,16797,847,18048,3274,18845,2680,16160,4044,16228,2606,15858,4482,19412,4015],[2808,18980,4066,19678,1230,15503,1644,17681,966,16282,1354,18699,4636,16977,1043,17790,2201,17020,4443,16981,2669,19934,2071,17838,2785,18863,3132,18089,2412,17527,4185,17555,3606,16047,3317,17025,4790,17704,3632,15332,3011,16821,3217,19736,766,16292,1574,18615,3514,16181,136,16908,1965,17789,2700,17316,3278,18012,332,17446,2754,18092,1616,19287,3465,15239,1727,15332,2565,18738,4897,19859,1373,18147,1063,18437,2148,16402,922,15947,2326,16372,3784,15866,2163,17913,1827,16846,3670,19189,2327,17166,4428,16898,3645,15277,3301,17124,4918,17045,4965,19028,3733,19342,486,17047,1969,19775,311],[3775,19944,4138,19729,1148,18203,1416,19722,1217,16539,2935,16409,3280,17111,4039,18360,3968,16529,150,18353,3388,18768,711,15783,4026,18159,3335,17846,1981,18621,2087,16193,3306,17821,1154,16932,1420,15215,780,19906,4064,17157,1661,19569,2808,15021,3422,17543,4707,17122,794,16898,574,17266,3727,18950,3865,15965,2889,17693,1333,17512,1377,15077,357,19289,3203,18240,93,16898,1679,18868,2134,18696,2642,18388,4662,15089,905,17411,4895,15709,2384,19647,4197,16765,1627,18251,4447,18035,145,15241,3791,17157,3272,16243,30,16601,1040,16570,1651,18118,652,17194,4329,16460,1593,19128,406],[4101,19276,4908,18008,2801,15783,3883,18038,969,15579,529,19530,1747,18724,1497,16499,853,17983,1829,19988,1003,17132,4787,18843,759,16988,3588,19725,4384,16738,1025,17303,4656,18497,638,15359,4433,17530,2270,19109,2944,18918,2797,17367,1873,17157,3255,15400,3009,16895,2206,19952,2655,19653,30,18674,599,19011,2185,19086,542,17633,4467,19902,3658,18987,4914,18251,4375,19471,2252,18560,2948,15648,3294,17331,852,19518,4845,16397,626,19208,1516,18517,2437,17431,4661,19165,3988,18923,2998,18149,1683,15526,4629,16162,3110,18821,4284,15585,4507,15574,3768,19569,1573,16129,4458,18780,4297],[1529,17071,4944,18267,4913,16809,2553,19732,3529,16395,1156,16824,1470,16609,4827,19629,1906,19967,3517,15822,4212,18846,910,16126,1010,16117,1370,16686,2566,19323,4991,17677,2565,18367,3371,16394,374,19379,2867,18894,569,17404,4740,17400,66,19767,3039,18659,1660,19805,4142,15890,481,16788,1209,16445,3981,15932,3912,16808,4443,17001,436,17761,3512,18519,97,19584,1886,17520,3627,19383,1402,16907,80,15451,1310,19578,2464,18355,2357,19666,4684,19259,4715,16362,2054,18308,4693,15621,1118,18788,3466,17461,3450,17176,1596,19508,4287,19967,4090,19164,2576,19688,2262,18680,2270,17983,2124],[139,19517,15062,19084,1771,18626,17018,18220,23,19747,15356,16369,1019,19296,19099,15850,4016,17854,17172,17665,146,17302,18072,19126,4537,17302,15130,15446,2604,16315,16672,16275,2005,18111,17383,16448,2314,17457,18240,18853,168,17789,18914,15951,3920,19239,15961,15867,4698,17461,17781,15548,3823,17484,19443,16070,2874,18726,17218,19362,3969,19286,16565,18372,749,18487,19744,17479,3232,18169,17026,19774,2923,18746,17310,17917,1910,18997,18088,15817,322,19732,15776,19336,1588,18650,18345,15721,262,19838,19208,17998,4195,15357,16614,16138,2733,17958,17819,17652,3087,17484,19422,15582,3436,17881,19792,18803,1238],[1513,3149,549,2317,4713,2258,2086,14,1923,1454,1486,1448,3526,4045,2572,2099,4521,4549,3553,4835,1521,2570,1578,4079,903,3132,3763,235,4691,4095,3106,4428,2120,558,1125,1363,4654,4680,2809,780,4933,2037,4290,3720,3976,4896,409,4758,1228,508,673,2712,2977,1542,4298,752,475,3833,3306,3880,29,4671,1444,2701,1405,3719,2348,1372,904,1070,779,1224,4623,1095,4270,2646,1877,1756,508,476,4400,2391,4094,2540,2116,3258,2268,3593,78,4606,3230,243,3606,1501,1787,3757,687,411,1231,4464,2849,3730,828,13,2291,1831,576,2166,4527]], 41564259),
])
def test_trapRainWater(heightMap, expected):
assert Solution().trapRainWater(heightMap) == expected
assert Solution().trapRainWater_fh(heightMap) == expected
| unlicense |
hkchenhongyi/django | tests/template_tests/filter_tests/test_make_list.py | 345 | 1611 | from django.template.defaultfilters import make_list
from django.test import SimpleTestCase
from django.test.utils import str_prefix
from django.utils.safestring import mark_safe
from ..utils import setup
class MakeListTests(SimpleTestCase):
"""
The make_list filter can destroy existing escaping, so the results are
escaped.
"""
@setup({'make_list01': '{% autoescape off %}{{ a|make_list }}{% endautoescape %}'})
def test_make_list01(self):
output = self.engine.render_to_string('make_list01', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
@setup({'make_list02': '{{ a|make_list }}'})
def test_make_list02(self):
output = self.engine.render_to_string('make_list02', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
@setup({'make_list03':
'{% autoescape off %}{{ a|make_list|stringformat:"s"|safe }}{% endautoescape %}'})
def test_make_list03(self):
output = self.engine.render_to_string('make_list03', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
@setup({'make_list04': '{{ a|make_list|stringformat:"s"|safe }}'})
def test_make_list04(self):
output = self.engine.render_to_string('make_list04', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
class FunctionTests(SimpleTestCase):
def test_string(self):
self.assertEqual(make_list('abc'), ['a', 'b', 'c'])
def test_integer(self):
self.assertEqual(make_list(1234), ['1', '2', '3', '4'])
| bsd-3-clause |
David-Amaro/bank-payment | account_banking_mandate/models/payment_line.py | 10 | 3509 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Mandate module for openERP
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# @author: Cyril Sester <[email protected]>,
# Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, exceptions, _
class PaymentLine(models.Model):
_inherit = 'payment.line'
mandate_id = fields.Many2one(
comodel_name='account.banking.mandate', string='Direct Debit Mandate',
domain=[('state', '=', 'valid')])
@api.model
def create(self, vals=None):
"""If the customer invoice has a mandate, take it
otherwise, take the first valid mandate of the bank account
"""
if vals is None:
vals = {}
partner_bank_id = vals.get('bank_id')
move_line_id = vals.get('move_line_id')
if (self.env.context.get('search_payment_order_type') == 'debit' and
'mandate_id' not in vals):
if move_line_id:
line = self.env['account.move.line'].browse(move_line_id)
if (line.invoice and line.invoice.type == 'out_invoice' and
line.invoice.mandate_id):
vals.update({
'mandate_id': line.invoice.mandate_id.id,
'bank_id': line.invoice.mandate_id.partner_bank_id.id,
})
if partner_bank_id and 'mandate_id' not in vals:
mandates = self.env['account.banking.mandate'].search(
[('partner_bank_id', '=', partner_bank_id),
('state', '=', 'valid')])
if mandates:
vals['mandate_id'] = mandates[0].id
return super(PaymentLine, self).create(vals)
@api.one
@api.constrains('mandate_id', 'bank_id')
def _check_mandate_bank_link(self):
if (self.mandate_id and self.bank_id and
self.mandate_id.partner_bank_id.id !=
self.bank_id.id):
raise exceptions.Warning(
_("The payment line with reference '%s' has the bank account "
"'%s' which is not attached to the mandate '%s' (this "
"mandate is attached to the bank account '%s').") %
(self.name,
self.env['res.partner.bank'].name_get(
[self.bank_id.id])[0][1],
self.mandate_id.unique_mandate_reference,
self.env['res.partner.bank'].name_get(
[self.mandate_id.partner_bank_id.id])[0][1]))
| agpl-3.0 |
deanhiller/databus | webapp/play1.3.x/framework/pym/play/utils.py | 1 | 10462 | import sys
import os, os.path
import re
import random
import fileinput
import getopt
import shutil
import zipfile
def playVersion(play_env):
play_version_file = os.path.join(play_env["basedir"], 'framework', 'src', 'play', 'version')
return open(play_version_file).readline().strip()
def replaceAll(file, searchExp, replaceExp, regexp=False):
if not regexp:
replaceExp = replaceExp.replace('\\', '\\\\')
searchExp = searchExp.replace('$', '\\$')
searchExp = searchExp.replace('{', '\\{')
searchExp = searchExp.replace('}', '\\}')
searchExp = searchExp.replace('.', '\\.')
for line in fileinput.input(file, inplace=1):
line = re.sub(searchExp, replaceExp, line)
sys.stdout.write(line)
def fileHas(file, searchExp):
# The file doesn't get closed if we don't iterate to the end, so
# we must continue even after we found the match.
found = False
for line in fileinput.input(file):
if line.find(searchExp) > -1:
found = True
return found
def secretKey():
return ''.join([random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789') for i in range(64)])
def isParentOf(path1, path2):
try:
relpath = os.path.relpath(path1, path2)
sep = os.sep
if sep == '\\':
sep = '\\\\'
ptn = '^\.\.(' + sep + '\.\.)*$'
return re.match(ptn, relpath) != None
except:
return False
def isExcluded(path, exclusion_list = None):
if exclusion_list is None:
return False
for exclusion in exclusion_list:
if isParentOf(exclusion, path):
return True
return False
def getWithModules(args, env):
withModules = []
try:
optlist, newargs = getopt.getopt(args, '', ['with=', 'name='])
for o, a in optlist:
if o in ('--with='):
withModules = a.split(',')
except getopt.GetoptError:
pass # Other argument that --with= has been passed (which is OK)
md = []
for m in withModules:
dirname = None
candidate = os.path.join(env["basedir"], 'modules/%s' % m)
if os.path.exists(candidate) and os.path.isdir(candidate):
dirname = candidate
else:
for f in os.listdir(os.path.join(env["basedir"], 'modules')):
if os.path.isdir(os.path.join(env["basedir"], 'modules/%s' % f)) and f.find('%s-' % m) == 0:
dirname = os.path.join(env["basedir"], 'modules/%s' % f)
break
if not dirname:
print "~ Oops. Module " + m + " not found (try running `play install " + m + "`)"
print "~"
sys.exit(-1)
md.append(dirname)
return md
def package_as_war(app, env, war_path, war_zip_path, war_exclusion_list = None):
if war_exclusion_list is None:
war_exclusion_list = []
app.check()
modules = app.modules()
classpath = app.getClasspath()
if not war_path:
print "~ Oops. Please specify a path where to generate the WAR, using the -o or --output option"
print "~"
sys.exit(-1)
if os.path.exists(war_path) and not os.path.exists(os.path.join(war_path, 'WEB-INF')):
print "~ Oops. The destination path already exists but does not seem to host a valid WAR structure"
print "~"
sys.exit(-1)
if isParentOf(app.path, war_path) and not isExcluded(war_path, war_exclusion_list):
print "~ Oops. Please specify a destination directory outside of the application"
print "~ or exclude war destination directory using the --exclude option and ':'-separator "
print "~ (eg: --exclude .svn:target:logs:tmp)."
print "~"
sys.exit(-1)
print "~ Packaging current version of the framework and the application to %s ..." % (os.path.normpath(war_path))
if os.path.exists(war_path): shutil.rmtree(war_path)
if os.path.exists(os.path.join(app.path, 'war')):
copy_directory(os.path.join(app.path, 'war'), war_path)
else:
os.makedirs(war_path)
if not os.path.exists(os.path.join(war_path, 'WEB-INF')): os.mkdir(os.path.join(war_path, 'WEB-INF'))
if not os.path.exists(os.path.join(war_path, 'WEB-INF/web.xml')):
shutil.copyfile(os.path.join(env["basedir"], 'resources/war/web.xml'), os.path.join(war_path, 'WEB-INF/web.xml'))
application_name = app.readConf('application.name')
replaceAll(os.path.join(war_path, 'WEB-INF/web.xml'), r'%APPLICATION_NAME%', application_name)
if env["id"] is not "":
replaceAll(os.path.join(war_path, 'WEB-INF/web.xml'), r'%PLAY_ID%', env["id"])
else:
replaceAll(os.path.join(war_path, 'WEB-INF/web.xml'), r'%PLAY_ID%', 'war')
if os.path.exists(os.path.join(war_path, 'WEB-INF/application')): shutil.rmtree(os.path.join(war_path, 'WEB-INF/application'))
copy_directory(app.path, os.path.join(war_path, 'WEB-INF/application'), war_exclusion_list)
if os.path.exists(os.path.join(war_path, 'WEB-INF/application/war')):
shutil.rmtree(os.path.join(war_path, 'WEB-INF/application/war'))
if os.path.exists(os.path.join(war_path, 'WEB-INF/application/logs')):
shutil.rmtree(os.path.join(war_path, 'WEB-INF/application/logs'))
if os.path.exists(os.path.join(war_path, 'WEB-INF/application/tmp')):
shutil.rmtree(os.path.join(war_path, 'WEB-INF/application/tmp'))
if os.path.exists(os.path.join(war_path, 'WEB-INF/application/modules')):
shutil.rmtree(os.path.join(war_path, 'WEB-INF/application/modules'))
copy_directory(os.path.join(app.path, 'conf'), os.path.join(war_path, 'WEB-INF/classes'))
if os.path.exists(os.path.join(war_path, 'WEB-INF/lib')): shutil.rmtree(os.path.join(war_path, 'WEB-INF/lib'))
os.mkdir(os.path.join(war_path, 'WEB-INF/lib'))
for jar in classpath:
if jar.endswith('.jar') and jar.find('provided-') == -1:
shutil.copyfile(jar, os.path.join(war_path, 'WEB-INF/lib/%s' % os.path.split(jar)[1]))
if os.path.exists(os.path.join(war_path, 'WEB-INF/framework')): shutil.rmtree(os.path.join(war_path, 'WEB-INF/framework'))
os.mkdir(os.path.join(war_path, 'WEB-INF/framework'))
copy_directory(os.path.join(env["basedir"], 'framework/templates'), os.path.join(war_path, 'WEB-INF/framework/templates'))
# modules
for module in modules:
to = os.path.join(war_path, 'WEB-INF/application/modules/%s' % os.path.basename(module))
copy_directory(module, to)
if os.path.exists(os.path.join(to, 'src')):
shutil.rmtree(os.path.join(to, 'src'))
if os.path.exists(os.path.join(to, 'dist')):
shutil.rmtree(os.path.join(to, 'dist'))
if os.path.exists(os.path.join(to, 'samples-and-tests')):
shutil.rmtree(os.path.join(to, 'samples-and-tests'))
if os.path.exists(os.path.join(to, 'build.xml')):
os.remove(os.path.join(to, 'build.xml'))
if os.path.exists(os.path.join(to, 'commands.py')):
os.remove(os.path.join(to, 'commands.py'))
if os.path.exists(os.path.join(to, 'lib')):
shutil.rmtree(os.path.join(to, 'lib'))
if os.path.exists(os.path.join(to, 'nbproject')):
shutil.rmtree(os.path.join(to, 'nbproject'))
if os.path.exists(os.path.join(to, 'documentation')):
shutil.rmtree(os.path.join(to, 'documentation'))
if not os.path.exists(os.path.join(war_path, 'WEB-INF/resources')): os.mkdir(os.path.join(war_path, 'WEB-INF/resources'))
shutil.copyfile(os.path.join(env["basedir"], 'resources/messages'), os.path.join(war_path, 'WEB-INF/resources/messages'))
if war_zip_path:
print "~ Creating zipped archive to %s ..." % (os.path.normpath(war_zip_path))
if os.path.exists(war_zip_path):
os.remove(war_zip_path)
zip = zipfile.ZipFile(war_zip_path, 'w', zipfile.ZIP_STORED)
dist_dir = os.path.join(app.path, 'dist')
for (dirpath, dirnames, filenames) in os.walk(war_path):
if dirpath == dist_dir:
continue
if dirpath.find('/.') > -1:
continue
for file in filenames:
if file.find('~') > -1 or file.startswith('.'):
continue
zip.write(os.path.join(dirpath, file), os.path.join(dirpath[len(war_path):], file))
zip.close()
# Recursively delete all files/folders in root whose name equals to filename
# We could pass a "ignore" parameter to copytree, but that's not supported in Python 2.5
def deleteFrom(root, filenames):
for f in os.listdir(root):
fullpath = os.path.join(root, f)
if f in filenames:
delete(fullpath)
elif os.path.isdir(fullpath):
deleteFrom(fullpath, filenames)
def delete(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
# Copy a directory, skipping dot-files
def copy_directory(source, target, exclude = None):
if exclude is None:
exclude = []
skip = None
if not os.path.exists(target):
os.makedirs(target)
for root, dirs, files in os.walk(source):
path_from_source = root[len(source):]
# Ignore path containing '.' in path
# But keep those with relative path '..'
if re.search(r'/\.[^\.]|\\\.[^\.]', path_from_source):
continue
for file in files:
if root.find('/.') > -1 or root.find('\\.') > -1:
continue
if file.find('~') == 0 or file.startswith('.'):
continue
# Loop to detect files to exclude (coming from exclude list)
# Search is done only on path for the moment
skip = 0
for exclusion in exclude:
if root.find(exclusion) > -1:
skip = 1
# Skipping the file if exclusion has been found
if skip == 1:
continue
from_ = os.path.join(root, file)
to_ = from_.replace(source, target, 1)
to_directory = os.path.split(to_)[0]
if not os.path.exists(to_directory):
os.makedirs(to_directory)
shutil.copyfile(from_, to_)
def isTestFrameworkId( framework_id ):
return (framework_id == 'test' or (framework_id.startswith('test-') and framework_id.__len__() >= 6 ))
| mpl-2.0 |
lgp171188/fjord | fjord/feedback/migrations/0002_make_products.py | 7 | 2510 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def make_products(apps, schema_editor):
# Create the current set of products
Product = apps.get_model('feedback', 'Product')
Product.objects.create(
enabled=True,
notes=u'',
display_name=u'Firefox',
db_name=u'Firefox',
slug=u'firefox',
browser_data_browser=u'Firefox',
browser=u'Firefox',
on_dashboard=True,
on_picker=True)
Product.objects.create(
enabled=True,
notes=u'',
display_name=u'Firefox for Android',
db_name=u'Firefox for Android',
slug=u'android',
browser=u'Firefox for Android',
on_dashboard=True,
on_picker=True)
Product.objects.create(
enabled=True,
notes=u'',
display_name=u'Firefox OS',
db_name=u'Firefox OS',
slug=u'fxos',
browser=u'Firefox OS',
on_dashboard=True,
on_picker=True)
Product.objects.create(
enabled=True,
notes=u'',
display_name=u'Firefox Developer',
db_name=u'Firefox dev',
slug=u'firefoxdev',
browser_data_browser=u'Firefox',
browser=u'Firefox',
on_dashboard=False,
on_picker=True)
Product.objects.create(
enabled=True,
notes=u'',
display_name=u'Loop',
db_name=u'Loop',
slug=u'loop',
on_dashboard=False,
on_picker=False)
Product.objects.create(
enabled=True,
notes=u'',
display_name=u'Firefox 64',
db_name=u'Firefox 64',
slug=u'firefox64',
browser_data_browser=u'Firefox',
browser=u'Firefox',
on_dashboard=False,
on_picker=False)
Product.objects.create(
enabled=True,
notes=u'',
display_name=u'Firefox Metro',
db_name=u'Firefox Metro',
slug=u'metrofirefox',
on_dashboard=False,
on_picker=False)
def remove_products(apps, schema_editor):
Product = apps.get_model('feedback', 'Product')
Product.objects.filter(slug__in=[
'firefox',
'android',
'fxos',
'firefoxdev',
'loop',
'firefox64',
'metrofirefox']).delete()
class Migration(migrations.Migration):
dependencies = [
('feedback', '0001_initial'),
]
operations = [
migrations.RunPython(make_products, remove_products),
]
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.