repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
synergeticsedx/deployment-wipro
|
common/test/acceptance/tests/lms/test_lms_acid_xblock.py
|
15
|
5952
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
from unittest import expectedFailure
from common.test.acceptance.tests.helpers import UniqueCourseTest
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.course_info import CourseInfoPage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.pages.xblock.acid import AcidView
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
class XBlockAcidBase(UniqueCourseTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
self.setup_fixtures()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
def validate_acid_block_view(self, acid_block):
"""
Verify that the LMS view for the Acid Block is correct
"""
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
class XBlockAcidChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
def validate_acid_parent_block_view(self, acid_parent_block):
super(XBlockAcidChildTest, self).validate_acid_block_view(acid_parent_block)
self.assertTrue(acid_parent_block.child_tests_passed)
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
acid_parent_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid_parent]')
self.validate_acid_parent_block_view(acid_parent_block)
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
class XBlockAcidAsideTest(XBlockAcidBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
@expectedFailure
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
acid_aside = AcidView(self.browser, '.xblock_asides-v1-student_view[data-block-type=acid_aside]')
self.validate_acid_aside_view(acid_aside)
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
def validate_acid_aside_view(self, acid_aside):
self.validate_acid_block_view(acid_aside)
|
agpl-3.0
|
jonathonwalz/ansible
|
lib/ansible/plugins/inventory/virtualbox.py
|
16
|
7523
|
# This file is part of Ansible,
# (c) 2012-2017, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
'''
DOCUMENTATION:
name: virtualbox
plugin_type: inventory
short_description: virtualbox inventory source
description:
- Get inventory hosts from the local virtualbox installation.
- Uses a <name>.vbox.yaml (or .vbox.yml) YAML configuration file.
options:
running_only:
description: toggles showing all vms vs only those currently running
type: boolean
default: False
settings_password_file:
description: provide a file containing the settings password (equivalent to --settingspwfile)
network_info_path:
description: property path to query for network information (ansible_host)
default: "/VirtualBox/GuestInfo/Net/0/V4/IP"
query:
description: create vars from virtualbox properties
type: dictionary
default: {}
compose:
description: create vars from jinja2 expressions, these are created AFTER the query block
type: dictionary
default: {}
EXAMPLES:
# file must be named vbox.yaml or vbox.yml
simple_config_file:
plugin: virtualbox
settings_password_file: /etc/virtulbox/secrets
query:
logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList
compose:
ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh')
'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from subprocess import Popen, PIPE
from ansible.errors import AnsibleParserError
from ansible.module_utils._text import to_bytes
from ansible.plugins.inventory import BaseInventoryPlugin
class InventoryModule(BaseInventoryPlugin):
''' Host inventory parser for ansible using local virtualbox. '''
NAME = 'virtualbox'
VBOX = "VBoxManage"
def _query_vbox_data(self, host, property_path):
ret = None
try:
cmd = [self.VBOX, 'guestproperty', 'get', host, property_path]
x = Popen(cmd, stdout=PIPE)
ipinfo = x.stdout.read()
if 'Value' in ipinfo:
a, ip = ipinfo.split(':', 1)
ret = ip.strip()
except:
pass
return ret
def _set_variables(self, hostvars, data):
# set vars in inventory from hostvars
for host in hostvars:
# create vars from vbox properties
if data.get('query') and isinstance(data['query'], dict):
for varname in data['query']:
hostvars[host][varname] = self._query_vbox_data(host, data['query'][varname])
# create composite vars
if data.get('compose') and isinstance(data['compose'], dict):
for varname in data['compose']:
hostvars[host][varname] = self._compose(data['compose'][varname], hostvars[host])
# actually update inventory
for key in hostvars[host]:
self.inventory.set_variable(host, key, hostvars[host][key])
def _populate_from_source(self, source_data, config_data):
hostvars = {}
prevkey = pref_k = ''
current_host = None
# needed to possibly set ansible_host
netinfo = config_data.get('network_info_path', "/VirtualBox/GuestInfo/Net/0/V4/IP")
for line in source_data:
try:
k, v = line.split(':', 1)
except:
# skip non splitable
continue
if k.strip() == '':
# skip empty
continue
v = v.strip()
# found host
if k.startswith('Name') and ',' not in v: # some setting strings appear in Name
current_host = v
if current_host not in hostvars:
hostvars[current_host] = {}
self.inventory.add_host(current_host)
# try to get network info
netdata = self._query_vbox_data(current_host, netinfo)
if netdata:
self.inventory.set_variable(current_host, 'ansible_host', netdata)
# found groups
elif k == 'Groups':
for group in v.split('/'):
if group:
self.inventory.add_group(group)
self.inventory.add_child(group, current_host)
continue
else:
# found vars, accumulate in hostvars for clean inventory set
pref_k = 'vbox_' + k.strip().replace(' ', '_')
if k.startswith(' '):
if prevkey not in hostvars[current_host]:
hostvars[current_host][prevkey] = {}
hostvars[current_host][prevkey][pref_k] = v
else:
if v != '':
hostvars[current_host][pref_k] = v
prevkey = pref_k
self._set_variables(hostvars, config_data)
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
if path.endswith('.vbox.yaml') or path.endswith('.vbox.yml'):
valid = True
return valid
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
cache_key = self.get_cache_prefix(path)
# file is config file
try:
config_data = self.loader.load_from_file(path)
except Exception as e:
raise AnsibleParserError(e)
if not config_data or config_data.get('plugin') != self.NAME:
# this is not my config file
return False
source_data = None
if cache and cache_key in inventory.cache:
try:
source_data = inventory.cache[cache_key]
except KeyError:
pass
if not source_data:
pwfile = to_bytes(config_data.get('settings_password_file'))
running = config_data.get('running_only', False)
# start getting data
cmd = [self.VBOX, 'list', '-l']
if running:
cmd.append('runningvms')
else:
cmd.append('vms')
if pwfile and os.path.exists(pwfile):
cmd.append('--settingspwfile')
cmd.append(pwfile)
try:
p = Popen(cmd, stdout=PIPE)
except Exception as e:
AnsibleParserError(e)
source_data = p.stdout.readlines()
inventory.cache[cache_key] = source_data
self._populate_from_source(source_data, config_data)
|
gpl-3.0
|
wdv4758h/ZipPy
|
lib-python/3/formatter.py
|
751
|
14930
|
"""Generic output formatting.
Formatter objects transform an abstract flow of formatting events into
specific output events on writer objects. Formatters manage several stack
structures to allow various properties of a writer object to be changed and
restored; writers need not be able to handle relative changes nor any sort
of ``change back'' operation. Specific writer properties which may be
controlled via formatter objects are horizontal alignment, font, and left
margin indentations. A mechanism is provided which supports providing
arbitrary, non-exclusive style settings to a writer as well. Additional
interfaces facilitate formatting events which are not reversible, such as
paragraph separation.
Writer objects encapsulate device interfaces. Abstract devices, such as
file formats, are supported as well as physical devices. The provided
implementations all work with abstract devices. The interface makes
available mechanisms for setting the properties which formatter objects
manage and inserting data into the output.
"""
import sys
AS_IS = None
class NullFormatter:
"""A formatter which does nothing.
If the writer parameter is omitted, a NullWriter instance is created.
No methods of the writer are called by NullFormatter instances.
Implementations should inherit from this class if implementing a writer
interface but don't need to inherit any implementation.
"""
def __init__(self, writer=None):
if writer is None:
writer = NullWriter()
self.writer = writer
def end_paragraph(self, blankline): pass
def add_line_break(self): pass
def add_hor_rule(self, *args, **kw): pass
def add_label_data(self, format, counter, blankline=None): pass
def add_flowing_data(self, data): pass
def add_literal_data(self, data): pass
def flush_softspace(self): pass
def push_alignment(self, align): pass
def pop_alignment(self): pass
def push_font(self, x): pass
def pop_font(self): pass
def push_margin(self, margin): pass
def pop_margin(self): pass
def set_spacing(self, spacing): pass
def push_style(self, *styles): pass
def pop_style(self, n=1): pass
def assert_line_data(self, flag=1): pass
class AbstractFormatter:
"""The standard formatter.
This implementation has demonstrated wide applicability to many writers,
and may be used directly in most circumstances. It has been used to
implement a full-featured World Wide Web browser.
"""
# Space handling policy: blank spaces at the boundary between elements
# are handled by the outermost context. "Literal" data is not checked
# to determine context, so spaces in literal data are handled directly
# in all circumstances.
def __init__(self, writer):
self.writer = writer # Output device
self.align = None # Current alignment
self.align_stack = [] # Alignment stack
self.font_stack = [] # Font state
self.margin_stack = [] # Margin state
self.spacing = None # Vertical spacing state
self.style_stack = [] # Other state, e.g. color
self.nospace = 1 # Should leading space be suppressed
self.softspace = 0 # Should a space be inserted
self.para_end = 1 # Just ended a paragraph
self.parskip = 0 # Skipped space between paragraphs?
self.hard_break = 1 # Have a hard break
self.have_label = 0
def end_paragraph(self, blankline):
if not self.hard_break:
self.writer.send_line_break()
self.have_label = 0
if self.parskip < blankline and not self.have_label:
self.writer.send_paragraph(blankline - self.parskip)
self.parskip = blankline
self.have_label = 0
self.hard_break = self.nospace = self.para_end = 1
self.softspace = 0
def add_line_break(self):
if not (self.hard_break or self.para_end):
self.writer.send_line_break()
self.have_label = self.parskip = 0
self.hard_break = self.nospace = 1
self.softspace = 0
def add_hor_rule(self, *args, **kw):
if not self.hard_break:
self.writer.send_line_break()
self.writer.send_hor_rule(*args, **kw)
self.hard_break = self.nospace = 1
self.have_label = self.para_end = self.softspace = self.parskip = 0
def add_label_data(self, format, counter, blankline = None):
if self.have_label or not self.hard_break:
self.writer.send_line_break()
if not self.para_end:
self.writer.send_paragraph((blankline and 1) or 0)
if isinstance(format, str):
self.writer.send_label_data(self.format_counter(format, counter))
else:
self.writer.send_label_data(format)
self.nospace = self.have_label = self.hard_break = self.para_end = 1
self.softspace = self.parskip = 0
def format_counter(self, format, counter):
label = ''
for c in format:
if c == '1':
label = label + ('%d' % counter)
elif c in 'aA':
if counter > 0:
label = label + self.format_letter(c, counter)
elif c in 'iI':
if counter > 0:
label = label + self.format_roman(c, counter)
else:
label = label + c
return label
def format_letter(self, case, counter):
label = ''
while counter > 0:
counter, x = divmod(counter-1, 26)
# This makes a strong assumption that lowercase letters
# and uppercase letters form two contiguous blocks, with
# letters in order!
s = chr(ord(case) + x)
label = s + label
return label
def format_roman(self, case, counter):
ones = ['i', 'x', 'c', 'm']
fives = ['v', 'l', 'd']
label, index = '', 0
# This will die of IndexError when counter is too big
while counter > 0:
counter, x = divmod(counter, 10)
if x == 9:
label = ones[index] + ones[index+1] + label
elif x == 4:
label = ones[index] + fives[index] + label
else:
if x >= 5:
s = fives[index]
x = x-5
else:
s = ''
s = s + ones[index]*x
label = s + label
index = index + 1
if case == 'I':
return label.upper()
return label
def add_flowing_data(self, data):
if not data: return
prespace = data[:1].isspace()
postspace = data[-1:].isspace()
data = " ".join(data.split())
if self.nospace and not data:
return
elif prespace or self.softspace:
if not data:
if not self.nospace:
self.softspace = 1
self.parskip = 0
return
if not self.nospace:
data = ' ' + data
self.hard_break = self.nospace = self.para_end = \
self.parskip = self.have_label = 0
self.softspace = postspace
self.writer.send_flowing_data(data)
def add_literal_data(self, data):
if not data: return
if self.softspace:
self.writer.send_flowing_data(" ")
self.hard_break = data[-1:] == '\n'
self.nospace = self.para_end = self.softspace = \
self.parskip = self.have_label = 0
self.writer.send_literal_data(data)
def flush_softspace(self):
if self.softspace:
self.hard_break = self.para_end = self.parskip = \
self.have_label = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
def push_alignment(self, align):
if align and align != self.align:
self.writer.new_alignment(align)
self.align = align
self.align_stack.append(align)
else:
self.align_stack.append(self.align)
def pop_alignment(self):
if self.align_stack:
del self.align_stack[-1]
if self.align_stack:
self.align = align = self.align_stack[-1]
self.writer.new_alignment(align)
else:
self.align = None
self.writer.new_alignment(None)
def push_font(self, font):
size, i, b, tt = font
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
if self.font_stack:
csize, ci, cb, ctt = self.font_stack[-1]
if size is AS_IS: size = csize
if i is AS_IS: i = ci
if b is AS_IS: b = cb
if tt is AS_IS: tt = ctt
font = (size, i, b, tt)
self.font_stack.append(font)
self.writer.new_font(font)
def pop_font(self):
if self.font_stack:
del self.font_stack[-1]
if self.font_stack:
font = self.font_stack[-1]
else:
font = None
self.writer.new_font(font)
def push_margin(self, margin):
self.margin_stack.append(margin)
fstack = [m for m in self.margin_stack if m]
if not margin and fstack:
margin = fstack[-1]
self.writer.new_margin(margin, len(fstack))
def pop_margin(self):
if self.margin_stack:
del self.margin_stack[-1]
fstack = [m for m in self.margin_stack if m]
if fstack:
margin = fstack[-1]
else:
margin = None
self.writer.new_margin(margin, len(fstack))
def set_spacing(self, spacing):
self.spacing = spacing
self.writer.new_spacing(spacing)
def push_style(self, *styles):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
for style in styles:
self.style_stack.append(style)
self.writer.new_styles(tuple(self.style_stack))
def pop_style(self, n=1):
del self.style_stack[-n:]
self.writer.new_styles(tuple(self.style_stack))
def assert_line_data(self, flag=1):
self.nospace = self.hard_break = not flag
self.para_end = self.parskip = self.have_label = 0
class NullWriter:
"""Minimal writer interface to use in testing & inheritance.
A writer which only provides the interface definition; no actions are
taken on any methods. This should be the base class for all writers
which do not need to inherit any implementation methods.
"""
def __init__(self): pass
def flush(self): pass
def new_alignment(self, align): pass
def new_font(self, font): pass
def new_margin(self, margin, level): pass
def new_spacing(self, spacing): pass
def new_styles(self, styles): pass
def send_paragraph(self, blankline): pass
def send_line_break(self): pass
def send_hor_rule(self, *args, **kw): pass
def send_label_data(self, data): pass
def send_flowing_data(self, data): pass
def send_literal_data(self, data): pass
class AbstractWriter(NullWriter):
"""A writer which can be used in debugging formatters, but not much else.
Each method simply announces itself by printing its name and
arguments on standard output.
"""
def new_alignment(self, align):
print("new_alignment(%r)" % (align,))
def new_font(self, font):
print("new_font(%r)" % (font,))
def new_margin(self, margin, level):
print("new_margin(%r, %d)" % (margin, level))
def new_spacing(self, spacing):
print("new_spacing(%r)" % (spacing,))
def new_styles(self, styles):
print("new_styles(%r)" % (styles,))
def send_paragraph(self, blankline):
print("send_paragraph(%r)" % (blankline,))
def send_line_break(self):
print("send_line_break()")
def send_hor_rule(self, *args, **kw):
print("send_hor_rule()")
def send_label_data(self, data):
print("send_label_data(%r)" % (data,))
def send_flowing_data(self, data):
print("send_flowing_data(%r)" % (data,))
def send_literal_data(self, data):
print("send_literal_data(%r)" % (data,))
class DumbWriter(NullWriter):
"""Simple writer class which writes output on the file object passed in
as the file parameter or, if file is omitted, on standard output. The
output is simply word-wrapped to the number of columns specified by
the maxcol parameter. This class is suitable for reflowing a sequence
of paragraphs.
"""
def __init__(self, file=None, maxcol=72):
self.file = file or sys.stdout
self.maxcol = maxcol
NullWriter.__init__(self)
self.reset()
def reset(self):
self.col = 0
self.atbreak = 0
def send_paragraph(self, blankline):
self.file.write('\n'*blankline)
self.col = 0
self.atbreak = 0
def send_line_break(self):
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_hor_rule(self, *args, **kw):
self.file.write('\n')
self.file.write('-'*self.maxcol)
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_literal_data(self, data):
self.file.write(data)
i = data.rfind('\n')
if i >= 0:
self.col = 0
data = data[i+1:]
data = data.expandtabs()
self.col = self.col + len(data)
self.atbreak = 0
def send_flowing_data(self, data):
if not data: return
atbreak = self.atbreak or data[0].isspace()
col = self.col
maxcol = self.maxcol
write = self.file.write
for word in data.split():
if atbreak:
if col + len(word) >= maxcol:
write('\n')
col = 0
else:
write(' ')
col = col + 1
write(word)
col = col + len(word)
atbreak = 1
self.col = col
self.atbreak = data[-1].isspace()
def test(file = None):
w = DumbWriter()
f = AbstractFormatter(w)
if file is not None:
fp = open(file)
elif sys.argv[1:]:
fp = open(sys.argv[1])
else:
fp = sys.stdin
for line in fp:
if line == '\n':
f.end_paragraph(1)
else:
f.add_flowing_data(line)
f.end_paragraph(0)
if __name__ == '__main__':
test()
|
bsd-3-clause
|
isabernardes/Heriga
|
Herigaenv/lib/python2.7/site-packages/PIL/PcfFontFile.py
|
19
|
6136
|
#
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library
# $Id$
#
# portable compiled font file parser
#
# history:
# 1997-08-19 fl created
# 2003-09-13 fl fixed loading of unicode fonts
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1997-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from . import Image, FontFile
from ._binary import i8, i16le as l16, i32le as l32, i16be as b16, i32be as b32
# --------------------------------------------------------------------
# declarations
PCF_MAGIC = 0x70636601 # "\x01fcp"
PCF_PROPERTIES = (1 << 0)
PCF_ACCELERATORS = (1 << 1)
PCF_METRICS = (1 << 2)
PCF_BITMAPS = (1 << 3)
PCF_INK_METRICS = (1 << 4)
PCF_BDF_ENCODINGS = (1 << 5)
PCF_SWIDTHS = (1 << 6)
PCF_GLYPH_NAMES = (1 << 7)
PCF_BDF_ACCELERATORS = (1 << 8)
BYTES_PER_ROW = [
lambda bits: ((bits+7) >> 3),
lambda bits: ((bits+15) >> 3) & ~1,
lambda bits: ((bits+31) >> 3) & ~3,
lambda bits: ((bits+63) >> 3) & ~7,
]
def sz(s, o):
return s[o:s.index(b"\0", o)]
##
# Font file plugin for the X11 PCF format.
class PcfFontFile(FontFile.FontFile):
name = "name"
def __init__(self, fp):
magic = l32(fp.read(4))
if magic != PCF_MAGIC:
raise SyntaxError("not a PCF file")
FontFile.FontFile.__init__(self)
count = l32(fp.read(4))
self.toc = {}
for i in range(count):
type = l32(fp.read(4))
self.toc[type] = l32(fp.read(4)), l32(fp.read(4)), l32(fp.read(4))
self.fp = fp
self.info = self._load_properties()
metrics = self._load_metrics()
bitmaps = self._load_bitmaps(metrics)
encoding = self._load_encoding()
#
# create glyph structure
for ch in range(256):
ix = encoding[ch]
if ix is not None:
x, y, l, r, w, a, d, f = metrics[ix]
glyph = (w, 0), (l, d-y, x+l, d), (0, 0, x, y), bitmaps[ix]
self.glyph[ch] = glyph
def _getformat(self, tag):
format, size, offset = self.toc[tag]
fp = self.fp
fp.seek(offset)
format = l32(fp.read(4))
if format & 4:
i16, i32 = b16, b32
else:
i16, i32 = l16, l32
return fp, format, i16, i32
def _load_properties(self):
#
# font properties
properties = {}
fp, format, i16, i32 = self._getformat(PCF_PROPERTIES)
nprops = i32(fp.read(4))
# read property description
p = []
for i in range(nprops):
p.append((i32(fp.read(4)), i8(fp.read(1)), i32(fp.read(4))))
if nprops & 3:
fp.seek(4 - (nprops & 3), 1) # pad
data = fp.read(i32(fp.read(4)))
for k, s, v in p:
k = sz(data, k)
if s:
v = sz(data, v)
properties[k] = v
return properties
def _load_metrics(self):
#
# font metrics
metrics = []
fp, format, i16, i32 = self._getformat(PCF_METRICS)
append = metrics.append
if (format & 0xff00) == 0x100:
# "compressed" metrics
for i in range(i16(fp.read(2))):
left = i8(fp.read(1)) - 128
right = i8(fp.read(1)) - 128
width = i8(fp.read(1)) - 128
ascent = i8(fp.read(1)) - 128
descent = i8(fp.read(1)) - 128
xsize = right - left
ysize = ascent + descent
append(
(xsize, ysize, left, right, width,
ascent, descent, 0)
)
else:
# "jumbo" metrics
for i in range(i32(fp.read(4))):
left = i16(fp.read(2))
right = i16(fp.read(2))
width = i16(fp.read(2))
ascent = i16(fp.read(2))
descent = i16(fp.read(2))
attributes = i16(fp.read(2))
xsize = right - left
ysize = ascent + descent
append(
(xsize, ysize, left, right, width,
ascent, descent, attributes)
)
return metrics
def _load_bitmaps(self, metrics):
#
# bitmap data
bitmaps = []
fp, format, i16, i32 = self._getformat(PCF_BITMAPS)
nbitmaps = i32(fp.read(4))
if nbitmaps != len(metrics):
raise IOError("Wrong number of bitmaps")
offsets = []
for i in range(nbitmaps):
offsets.append(i32(fp.read(4)))
bitmapSizes = []
for i in range(4):
bitmapSizes.append(i32(fp.read(4)))
# byteorder = format & 4 # non-zero => MSB
bitorder = format & 8 # non-zero => MSB
padindex = format & 3
bitmapsize = bitmapSizes[padindex]
offsets.append(bitmapsize)
data = fp.read(bitmapsize)
pad = BYTES_PER_ROW[padindex]
mode = "1;R"
if bitorder:
mode = "1"
for i in range(nbitmaps):
x, y, l, r, w, a, d, f = metrics[i]
b, e = offsets[i], offsets[i+1]
bitmaps.append(
Image.frombytes("1", (x, y), data[b:e], "raw", mode, pad(x))
)
return bitmaps
def _load_encoding(self):
# map character code to bitmap index
encoding = [None] * 256
fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS)
firstCol, lastCol = i16(fp.read(2)), i16(fp.read(2))
firstRow, lastRow = i16(fp.read(2)), i16(fp.read(2))
default = i16(fp.read(2))
nencoding = (lastCol - firstCol + 1) * (lastRow - firstRow + 1)
for i in range(nencoding):
encodingOffset = i16(fp.read(2))
if encodingOffset != 0xFFFF:
try:
encoding[i+firstCol] = encodingOffset
except IndexError:
break # only load ISO-8859-1 glyphs
return encoding
|
mit
|
joshisa/taiga-back
|
taiga/projects/wiki/models.py
|
21
|
3733
|
# Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.contrib.contenttypes import generic
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from taiga.projects.notifications.mixins import WatchedModelMixin
from taiga.projects.occ import OCCModelMixin
class WikiPage(OCCModelMixin, WatchedModelMixin, models.Model):
project = models.ForeignKey("projects.Project", null=False, blank=False,
related_name="wiki_pages", verbose_name=_("project"))
slug = models.SlugField(max_length=500, db_index=True, null=False, blank=False,
verbose_name=_("slug"))
content = models.TextField(null=False, blank=True,
verbose_name=_("content"))
owner = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,
related_name="owned_wiki_pages", verbose_name=_("owner"))
last_modifier = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,
related_name="last_modified_wiki_pages", verbose_name=_("last modifier"))
created_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("created date"),
default=timezone.now)
modified_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("modified date"))
attachments = generic.GenericRelation("attachments.Attachment")
_importing = None
class Meta:
verbose_name = "wiki page"
verbose_name_plural = "wiki pages"
ordering = ["project", "slug"]
unique_together = ("project", "slug",)
permissions = (
("view_wikipage", "Can view wiki page"),
)
def __str__(self):
return "project {0} - {1}".format(self.project_id, self.slug)
def save(self, *args, **kwargs):
if not self._importing or not self.modified_date:
self.modified_date = timezone.now()
return super().save(*args, **kwargs)
class WikiLink(models.Model):
project = models.ForeignKey("projects.Project", null=False, blank=False,
related_name="wiki_links", verbose_name=_("project"))
title = models.CharField(max_length=500, null=False, blank=False)
href = models.SlugField(max_length=500, db_index=True, null=False, blank=False,
verbose_name=_("href"))
order = models.PositiveSmallIntegerField(default=1, null=False, blank=False,
verbose_name=_("order"))
class Meta:
verbose_name = "wiki link"
verbose_name_plural = "wiki links"
ordering = ["project", "order"]
unique_together = ("project", "href")
def __str__(self):
return self.title
|
agpl-3.0
|
codebauss/don
|
openstack_dashboard/don/ovs/collector.py
|
1
|
35342
|
#
# This file runs a whole bunch of commands (on the shell), parses their outputs
# and constructs a dictionary of extracted information.
#
import pprint
import re
import argparse
import os
import sys
from common import settings, debug, error, status_update, dump_json, load_json
from common import execute_cmd,connect_to_box,get_vm_credentials
import ConfigParser
# from analyzer import analyze
don_config = ConfigParser.ConfigParser()
try:
don_config.read('/etc/don/don.conf')
except Exception,e:
print e
deployment_type = don_config.get('DEFAULT','deployment_type')
def get_env(filename):
try:
lines=open(os.getcwd()+os.sep+filename,'r').read().splitlines()
except IOError,e:
print "%s :%s"%(e.args[1],filename)
raise
env = {}
for line in lines:
if line.startswith('export'):
m = re.search(r'export (.+)=(.+)', line)
if m:
key = m.group(1).replace('"','')
val = m.group(2).replace('"','')
env.update({key:val})
return env
myenv = os.environ.copy()
myenv.update(get_env('admin-openrc.sh'))
# Contains all info gathered by parsing the output of commands
info = {
'vms' : {},
'brctl' : {},
'bridges' : {
'br-ex' : {'ports': {}},
'br-int' : {'ports': {}},
'br-tun' : {'ports': {}}
},
'floating_ips' : {},
}
def add_new_command (cmd_dict, cmd_key, cmd):
if cmd_dict.has_key(cmd_key):
error(cmd_key + ' already exists in command dictionary')
return
cmd_dict[cmd_key] = cmd
def record_linuxbridge (bridge, interface_list):
brctl_dict = info['brctl']
if brctl_dict.has_key(bridge):
error('Bridge ' + bridge + ' repeated! Overwriting!')
brctl_dict[bridge] = {'interfaces' : interface_list}
def get_bridge_entry (br):
if not bridge_dict.has_key(br):
error('Bridge ' + br + ' does not exist! Supported bridges: ' + str(bridge_dict.keys()))
return None
return bridge_dict.get(br)
#
# Parser functions (for each command). Each function has the sample input as a comment above it.
#
'''
<uuid>31b1cfcc-ca85-48a9-a84a-8b222d377080</uuid>
<nova:name>VM1</nova:name>
<source bridge='qbrb0f5cfc8-4d'/>
<uuid>f9743f1c-caeb-4892-af83-9dc0ac757545</uuid>
<nova:name>VM2</nova:name>
<source bridge='qbr6ce314cb-a5'/>
'''
def cat_instance_parser (parse_this):
vm_dict = info['vms']
uuid = None
name = None
src_bridge = None
for line in parse_this:
m = re.search('<uuid>(\S+)</uuid>', line)
if m:
uuid = m.group(1)
continue
m = re.search('<nova:name>(\S+)</nova:name>', line)
if m:
name = m.group(1)
continue
m = re.search('<source bridge=\'(\S+)\'/>', line)
if m:
src_bridge = m.group(1)
if not vm_dict.has_key(name):
vm_dict[name] = {}
vm_entry = vm_dict[name]
vm_entry['uuid'] = uuid
if not vm_entry.has_key('src_bridge'):
vm_entry['src_bridge'] = []
vm_entry['tap_dev'] = []
vm_entry['src_bridge'].append(src_bridge)
vm_entry['tap_dev'].append(src_bridge.replace('qbr', 'tap'))
'''
bridge name bridge id STP enabled interfaces
qbr6ce314cb-a5 8000.9255d5550cf8 no qvb6ce314cb-a5
tap6ce314cb-a5
qbrb0f5cfc8-4d 8000.b2277f2c981b no qvbb0f5cfc8-4d
tapb0f5cfc8-4d
virbr0 8000.000000000000 yes
'''
def brctl_show_parser (parse_this):
interfaces = []
bridge = None
for line in parse_this:
m = re.search('(qbr\S+)\s+\S+\s+\S+\s+(\S+)', line)
if m:
# We already have a bridge, that means we are now lookign at the next bridge
if bridge:
record_linuxbridge(bridge, interfaces)
interfaces = []
bridge = m.group(1)
interfaces.append(m.group(2))
continue
m = re.search('^\s+(\S+)', line)
if m:
interfaces.append(m.group(1))
# handle the last bridge
if bridge:
record_linuxbridge(bridge, interfaces)
'''
ubuntu@ubuntu-VirtualBox:~/don$ sudo ovs-vsctl show
0fc4d93f-28e0-408a-8edb-21d5ec76b2c3
Bridge br-tun
fail_mode: secure
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
Port br-tun
Interface br-tun
type: internal
Bridge br-int
fail_mode: secure
Port "tap3b74b285-71"
tag: 2
Interface "tap3b74b285-71"
type: internal
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port "qvob0f5cfc8-4d"
tag: 2
Interface "qvob0f5cfc8-4d"
Port "qr-77ce7d4c-d5"
tag: 1
Interface "qr-77ce7d4c-d5"
type: internal
Port "qr-56cf8a2d-27"
tag: 2
Interface "qr-56cf8a2d-27"
type: internal
Port "qvo6ce314cb-a5"
tag: 2
Interface "qvo6ce314cb-a5"
Port br-int
Interface br-int
type: internal
Port "tap9d44135a-45"
tag: 1
Interface "tap9d44135a-45"
type: internal
Bridge br-ex
Port "qg-2909632b-b8"
Interface "qg-2909632b-b8"
type: internal
Port br-ex
Interface br-ex
type: internal
Port "qg-e2fb759b-60"
Interface "qg-e2fb759b-60"
type: internal
ovs_version: "2.0.2"
'''
def ovs_vsctl_show_parser (parse_this):
bridge = None
bridge_dict = info['bridges']
for line in parse_this:
m = re.search('Bridge\s+(br-\S+)', line)
if m:
bridge = str(m.group(1))
if not bridge_dict.has_key(bridge):
error('Skipping bridge [' + bridge + ']! Supported bridges: ' + str(bridge_dict.keys()))
bridge = None
continue
bridge_entry = bridge_dict.get(bridge)
if bridge:
m = re.search('fail_mode: (\S+)', line)
if m:
bridge_entry['fail_mode'] = m.group(1)
continue
m = re.search('Port (\S+)', line)
if m:
# the port names seem to have double quotes around them!
port = m.group(1).replace('"','')
if not bridge_entry['ports'].has_key(port):
bridge_entry['ports'][port] = {}
port_entry = bridge_entry['ports'][port]
continue
m = re.search('tag: (\d+)', line)
if m:
port_entry['tag'] = m.group(1)
continue
m = re.search('Interface (\S+)', line)
if m:
# the interface names seem to have double quotes around them!
interface = m.group(1).replace('"', '')
if not port_entry.has_key('interfaces'):
port_entry['interfaces'] = {}
port_entry['interfaces'][interface] = {}
interface_entry = port_entry['interfaces'][interface]
continue
m = re.search('type: (\S+)', line)
if m:
interface_entry['type'] = m.group(1)
continue
m = re.search('options: {(\S+)}', line)
if m:
options = m.group(1)
interface_entry['options'] = options
continue
'''
OFPT_FEATURES_REPLY (xid=0x2): dpid:00008207ee8eee4d
n_tables:254, n_buffers:256
capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP
actions: OUTPUT SET_VLAN_VID SET_VLAN_PCP STRIP_VLAN SET_DL_SRC SET_DL_DST SET_NW_SRC SET_NW_DST SET_NW_TOS SET_TP_SRC SET_TP_DST ENQUEUE
4(patch-tun): addr:e2:ce:31:60:94:e0
config: 0
state: 0
speed: 0 Mbps now, 0 Mbps max
5(tap9d44135a-45): addr:00:00:00:00:00:00
config: PORT_DOWN
state: LINK_DOWN
speed: 0 Mbps now, 0 Mbps max
6(qr-77ce7d4c-d5): addr:00:00:00:00:00:00
config: PORT_DOWN
state: LINK_DOWN
speed: 0 Mbps now, 0 Mbps max
7(tap3b74b285-71): addr:00:00:00:00:00:00
config: PORT_DOWN
state: LINK_DOWN
speed: 0 Mbps now, 0 Mbps max
8(qr-56cf8a2d-27): addr:00:00:00:00:00:00
config: PORT_DOWN
state: LINK_DOWN
speed: 0 Mbps now, 0 Mbps max
9(qvob0f5cfc8-4d): addr:7a:82:4f:4e:a0:ab
config: 0
state: 0
current: 10GB-FD COPPER
speed: 10000 Mbps now, 0 Mbps max
10(qvo6ce314cb-a5): addr:42:92:2a:95:28:ed
config: 0
state: 0
current: 10GB-FD COPPER
speed: 10000 Mbps now, 0 Mbps max
LOCAL(br-int): addr:82:07:ee:8e:ee:4d
config: 0
state: 0
speed: 0 Mbps now, 0 Mbps max
OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0
'''
def ovs_ofctl_show_br_parser (bridge, parse_this):
bridge_dict = info['bridges']
if not bridge_dict.has_key(bridge):
error('Skipping bridge [' + bridge + ']! Supported bridges: ' + str(bridge_dict.keys()))
return
bridge_entry = bridge_dict.get(bridge)
pprint.pprint(bridge_entry)
for line in parse_this:
m = re.search('(\d+)\((\S+)\):\s+addr:(\S+)', line)
if m:
port_id = m.group(1)
port = m.group(2)
port_mac = m.group(3)
if not bridge_entry['ports'].has_key(port):
bridge_entry['ports'][port] = {}
port_entry = bridge_entry['ports'][port]
port_entry['id'] = port_id
port_entry['mac'] = port_mac
continue
m = re.search('(\w+)\((\S+)\):\s+addr:(\S+)', line)
if m:
port_id = m.group(1)
port = m.group(2)
port_mac = m.group(3)
if not bridge_entry['ports'].has_key(port):
bridge_entry['ports'][port] = {}
port_entry = bridge_entry['ports'][port]
port_entry['id'] = port_id
port_entry['mac'] = port_mac
pass
# These three are all wrappers for each of the three bridges
def ovs_ofctl_show_br_int_parser (parse_this):
ovs_ofctl_show_br_parser('br-int', parse_this)
def ovs_ofctl_show_br_ex_parser (parse_this):
ovs_ofctl_show_br_parser('br-ex', parse_this)
def ovs_ofctl_show_br_tun_parser (parse_this):
ovs_ofctl_show_br_parser('br-tun', parse_this)
'''
+--------------------------------------+-------+--------+------------+-------------+--------------------------------------------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+-------+--------+------------+-------------+--------------------------------------------------------+
| 31b1cfcc-ca85-48a9-a84a-8b222d377080 | VM1 | ACTIVE | - | Running | private=10.0.2.3 |
| f9743f1c-caeb-4892-af83-9dc0ac757545 | VM2 | ACTIVE | - | Running | private=10.0.2.4 |
| 83b547b9-9578-4840-997a-5aa1c4e829b0 | VM3-1 | ACTIVE | - | Running | private2=10.0.3.3 |
| 17b4685e-5cbe-4dd1-862a-6f89c191e1e7 | VM3-2 | ACTIVE | - | Running | private2=10.0.3.4 |
| ee4952a3-0700-42ea-aab3-7503bc9d87e2 | VM4 | ACTIVE | - | Running | private2=10.0.3.5; public=172.24.4.4; private=10.0.2.5 |
+--------------------------------------+-------+--------+------------+-------------+--------------------------------------------------------+
'''
def nova_list_parser (parse_this):
vm_dict = info['vms']
for line in parse_this:
if re.search('^\+', line) or re.search('^$', line) or re.search('Networks', line):
continue
parts = line.split('|')
parts = [x.strip() for x in parts]
vm = parts[2]
networks = parts[6].split(';')
networks = [x.strip() for x in networks]
if not vm_dict.has_key(vm):
vm_dict[vm] = {'interfaces' : {}}
for entry in networks:
# excluding ipv6 ip
if len(entry.split(',')) > 1:
network=entry.split('=')[0]
ip = filter(lambda a:re.search("(\d+\.\d+\.\d+\.\d+)",a)!=None ,\
entry.split('=')[1].split(','))[0].strip()
else:
(network, ip) = entry.split(',')[0].split('=')
vm_dict[vm]['interfaces'][ip] = {'network': network}
pass
'''
+--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------+
| id | name | mac_address | fixed_ips |
+--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------+
| 1dd820b1-98bd-4f39-b1ab-e89ecc67ae43 | | fa:16:3e:0f:36:26 | {"subnet_id": "75ae4ce8-495d-4f53-93d1-bf98e55d6658", "ip_address": "172.24.4.4"} |
| 1f73af79-fa69-4433-bcab-16d7a0bc2607 | | fa:16:3e:dc:c8:de | {"subnet_id": "dbc9717f-5a08-48bb-92e2-ed2da443541b", "ip_address": "10.0.3.1"} |
| 2909632b-b8a3-436b-aabd-9868d0c1051e | | fa:16:3e:af:95:a9 | {"subnet_id": "75ae4ce8-495d-4f53-93d1-bf98e55d6658", "ip_address": "172.24.4.2"} |
| 3b74b285-71d0-4311-8a69-2b032eebbe13 | | fa:16:3e:70:09:45 | {"subnet_id": "1083b740-45ce-49be-b603-73cbc26af5d7", "ip_address": "10.0.2.2"} |
| 56cf8a2d-27b7-4eab-a334-349c70520868 | | fa:16:3e:8a:ce:cb | {"subnet_id": "1083b740-45ce-49be-b603-73cbc26af5d7", "ip_address": "10.0.2.1"} |
| 6ce314cb-a599-4af8-8187-bdb0bfa88809 | | fa:16:3e:83:b1:60 | {"subnet_id": "1083b740-45ce-49be-b603-73cbc26af5d7", "ip_address": "10.0.2.4"} |
| 77ce7d4c-d5b9-4669-b23c-b0d9ee5f58c8 | | fa:16:3e:a6:de:15 | {"subnet_id": "531f1674-2b46-4ad7-9d73-4c41d215cc99", "ip_address": "10.0.0.1"} |
| 9c34adc0-c655-4b00-89ba-ca65def56fe0 | | fa:16:3e:a1:e7:f5 | {"subnet_id": "dbc9717f-5a08-48bb-92e2-ed2da443541b", "ip_address": "10.0.3.4"} |
| 9d44135a-4551-4448-9c80-d211b023c3eb | | fa:16:3e:80:83:c9 | {"subnet_id": "531f1674-2b46-4ad7-9d73-4c41d215cc99", "ip_address": "10.0.0.2"} |
| b0f5cfc8-4da0-42ad-8c18-6f29870bfb2a | | fa:16:3e:ae:a2:17 | {"subnet_id": "1083b740-45ce-49be-b603-73cbc26af5d7", "ip_address": "10.0.2.3"} |
| c03437a8-8a44-4615-b160-e1ef227d63c5 | | fa:16:3e:7f:b6:a5 | {"subnet_id": "dbc9717f-5a08-48bb-92e2-ed2da443541b", "ip_address": "10.0.3.5"} |
| cb7d8a29-8140-4ed0-a1c7-03cbf0be0c5b | | fa:16:3e:33:ee:b1 | {"subnet_id": "1083b740-45ce-49be-b603-73cbc26af5d7", "ip_address": "10.0.2.5"} |
| e2fb759b-602a-4fcd-8674-e8f5fe297dbc | | fa:16:3e:ea:47:b5 | {"subnet_id": "75ae4ce8-495d-4f53-93d1-bf98e55d6658", "ip_address": "172.24.4.3"} |
| e4f25d71-5684-4ccc-8114-2465a84ecc58 | | fa:16:3e:90:c7:d3 | {"subnet_id": "dbc9717f-5a08-48bb-92e2-ed2da443541b", "ip_address": "10.0.3.2"} |
| f57aa80e-2ef3-4031-a0a4-bc12d2445687 | | fa:16:3e:2e:6e:91 | {"subnet_id": "dbc9717f-5a08-48bb-92e2-ed2da443541b", "ip_address": "10.0.3.3"} |
+--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------+
'''
def neutron_port_list_parser (parse_this):
tap_to_ip = {}
for line in parse_this:
if re.search('^\+', line) or re.search('^$', line) or re.search('fixed_ips', line):
continue
parts = line.split('|')
parts = [x.strip() for x in parts]
tap = parts[1][:11]
#ip = parts[4].split(':')[-1].replace('}', '')
m = re.search('"ip_address": "(\S+)"', parts[4])
if m:
ip = m.group(1)
tap_to_ip[tap] = ip
info['tap_to_ip'] = tap_to_ip
pass
'''
+--------------------------------------+---------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------+-------+
| id | name | external_gateway_info | distributed | ha |
+--------------------------------------+---------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------+-------+
| 8c981cdb-c19f-47c1-8149-f85a506c486c | router1 | {"network_id": "640ece56-c6dc-4868-8e7a-12547508098a", "enable_snat": true, "external_fixed_ips": [{"subnet_id": "75ae4ce8-495d-4f53-93d1-bf98e55d6658", "ip_address": "172.24.4.2"}]} | False | False |
| ac41aab2-f9c3-4a06-8eef-f909ee1e6e50 | router | {"network_id": "640ece56-c6dc-4868-8e7a-12547508098a", "enable_snat": true, "external_fixed_ips": [{"subnet_id": "75ae4ce8-495d-4f53-93d1-bf98e55d6658", "ip_address": "172.24.4.3"}]} | False | False |
+--------------------------------------+---------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------+-------+
'''
def neutron_router_list_parser (parse_this):
routers = {}
for line in parse_this:
if re.search('^\+', line) or re.search('^$', line) or re.search('external_gateway_info', line):
continue
parts = line.split('|')
parts = [x.strip() for x in parts]
router_id = parts[1]
name = parts[2]
network_id = 'unknown'
m = re.search('"network_id":\s+"(\S+)"', parts[3])
if m:
network_id = m.group(1)
ip_address = 'x.x.x.x'
m = re.search('"ip_address":\s+"(\d+\.\d+\.\d+\.\d+)"', parts[3])
if m:
ip_address = m.group(1)
routers[name] = {'id' : router_id,
'ip_address' : ip_address,
'network_id' : network_id,
}
info['routers'] = routers
# now add some more commands to get further information for
# l3-agents which run in different namespaces
for router in info['routers'].keys():
uuid = info['routers'][router]['id']
namespace = 'qrouter-' + uuid
cmd_key = 'netns_' + namespace
cmd = {
'cmd' : 'echo namespace: ' + namespace + '; echo "sudo ip netns exec ' + namespace + ' ip a" > /tmp/don.bash; bash /tmp/don.bash',
'help' : 'Collect namespace info for l3-agent',
'shell' : True,
'output': None,
'order' : 100,
'parser': ip_namespace_qrouter_parser,
}
add_new_command (commands, cmd_key, cmd)
pass
def ip_namespace_qrouter_parser (parse_this):
nm_dict = info['namespaces']
qr_intf = None
qg_intf = None
ip = None
for line in parse_this:
if re.search('^\+', line) or re.search('^$', line):
continue
m = re.search('^namespace: (\S+)', line)
if m:
namespace = m.group(1)
continue
m = re.search('^\d+: (qr-\S+):', line)
if m:
qr_intf = m.group(1)
continue
m = re.search('^\d+: (qg-\S+):', line)
if m:
qg_intf = m.group(1)
continue
m = re.search('inet (\d+\.\d+\.\d+\.\d+/\d+)', line)
if m:
ip = m.group(1)
if not nm_dict[namespace].has_key('interfaces'):
nm_dict[namespace] = {'interfaces': {}}
if qg_intf:
nm_dict[namespace]['interfaces'][qg_intf] = ip
elif qr_intf:
nm_dict[namespace]['interfaces'][qr_intf] = ip
else:
continue
qr_intf = None
qg_intf = None
ip = None
pass
def ip_namespace_qdhcp_parser (parse_this):
nm_dict = info['namespaces']
tap_intf = None
ip = None
for line in parse_this:
if re.search('^\+', line) or re.search('^$', line):
continue
m = re.search('^namespace: (\S+)', line)
if m:
namespace = m.group(1)
continue
m = re.search('^\d+: (tap\S+):', line)
if m:
tap_intf = m.group(1)
m = re.search('inet (\d+\.\d+\.\d+\.\d+/\d+)', line)
if m:
ip = m.group(1)
if not nm_dict[namespace].has_key('interfaces'):
nm_dict[namespace] = {'interfaces': {}}
if tap_intf:
nm_dict[namespace]['interfaces'][tap_intf] = ip
tap_intf = None
ip = None
pass
'''
+--------------------------------------+----------+----------------------------------------------------------+
| id | name | subnets |
+--------------------------------------+----------+----------------------------------------------------------+
| 0a355cf0-00d0-45e1-9a3a-9aca436510d5 | private2 | 8393a2da-09dd-46e8-a26f-caf9f12c48f5 10.0.3.0/24 |
| 3b4ddfcb-49b8-46ae-9ecd-cb4f9b1830fc | public | 2dd78cb6-eb90-44ea-82b0-bbdb7316edb2 172.24.4.0/24 |
| | | 304ce342-18fe-4b4a-aa49-f5c7e5e31b2a 2001:db8::/64 |
| 4b7a42e8-cc16-411c-b932-989106c2f934 | private1 | cc580da4-0b61-4982-ae7b-d2d5c441b1d7 10.0.2.0/24 |
| bfedebe8-c436-4056-8d12-1d2f7e62e8ec | private | 4deed2ad-e184-43a9-8cc7-4493aa07f78f fdfd:57f1:b2ba::/64 |
| | | 8e2c5cfd-fbc1-4fe0-9f5e-f0b0dc070fb8 10.0.0.0/24 |
+--------------------------------------+----------+----------------------------------------------------------+
'''
def neutron_net_list_parser (parse_this):
networks = {}
ip = 'unknown'
for line in parse_this:
if re.search('^\+', line) or re.search('^$', line) or re.search('subnets', line):
continue
# Skip IPv6 for the time being
m = re.search('^\| (\S+) \| (\S+)\s+\| \S+ (\S+)', line)
if m:
network_id = m.group(1)
name = m.group(2)
possible_ip = m.group(3)
if re.search('\.', possible_ip):
ip = possible_ip
networks[network_id] = {'name' : name,
'ip' : ip
}
m = re.search('^\|\s+\|\s+\| \S+ (\S+)', line)
if m:
possible_ip = m.group(1)
if re.search('\.', possible_ip):
ip = possible_ip
networks[network_id] = {'name' : name,
'ip' : ip
}
ip = 'Unknown'
info['networks'] = networks
# now add some more commands to get further information for
# dhcp agents which run in different namespaces
for network_id in networks.keys():
# There is no dhcp agent run for public network
if networks[network_id]['name'] == 'public':
continue
namespace = 'qdhcp-' + network_id
cmd_key = 'netns_' + namespace
cmd = {
'cmd' : 'echo namespace: ' + namespace + '; echo "sudo ip netns exec ' + namespace + ' ip a" > /tmp/don.bash; bash /tmp/don.bash',
'help' : 'Collect namespace info for dhcp-agent',
'shell' : True,
'output': None,
'order' : 110,
'parser': ip_namespace_qdhcp_parser,
}
add_new_command (commands, cmd_key, cmd)
pass
'''
qdhcp-d5357ad8-df8b-4f19-8433-9db13304e4b2
qrouter-ac41aab2-f9c3-4a06-8eef-f909ee1e6e50
qdhcp-49be53de-33ed-480a-a06e-6e77c8f887dc
qrouter-8c981cdb-c19f-47c1-8149-f85a506c486c
qdhcp-82b0e328-4530-495e-a43f-238ef7a53d62
'''
def ip_netns_parser (parse_this):
namespaces = {}
for line in parse_this:
if re.search('^q', line):
namespaces[line] = {}
info['namespaces'] = namespaces
def dummy_parser (parse_this):
debug('Dummy Parser :-)')
pass
def floating_ip_list_parser(parse_this):
floating_ips = {}
for line in parse_this:
if re.search('^\+', line) or re.search('^$', line) or re.search('Pool', line):
continue
parts = line.split('|')
parts = [x.strip() for x in parts]
floating_ip = parts[2]
vm_id = parts[3]
pool = parts[5]
# ignore floating ips which is not assigned to any vms
if vm_id != '-':
floating_ips.update({vm_id:{'floating_ip':floating_ip,'pool':pool}})
info['floating_ips'] = floating_ips
# static commands whose output have info that help us diagnose
commands = {
'nova_list':
{
'cmd' : ['nova', 'list'],
'help' : 'Collect list of VMs from nova',
'env' : True,
'output': None,
'order' : 1,
'parser': nova_list_parser,
},
'cat_instance':
{
'cmd' : 'cat /etc/libvirt/qemu/instance-*.xml | egrep -e "<uuid>" -e "nova:name" -e "source bridge"',
'help' : 'Collect some info from the launched VMs',
'sudo' : True,
'shell' : True,
'output': None,
'order' : 2,
'parser': cat_instance_parser,
},
'neutron_port_list':
{
'cmd' : ['neutron', 'port-list'],
'help' : 'Collect neutron configured ports',
'env' : True,
'output': None,
'order' : 3,
'parser': neutron_port_list_parser,
},
'neutron_router_list':
{
'cmd' : ['neutron', 'router-list'],
'help' : 'Collect neutron configured routers',
'env' : True,
'output': None,
'order' : 4,
'parser': neutron_router_list_parser,
},
'neutron_net_list':
{
'cmd' : ['neutron', 'net-list'],
'help' : 'Collect neutron configured networks',
'env' : True,
'output': None,
'order' : 5,
'parser': neutron_net_list_parser,
},
'ip_netns':
{
'cmd' : ['ip', 'netns'],
'help' : 'Collect network namespaces',
'output': None,
'order' : 6,
'parser': ip_netns_parser,
},
'brctl_show':
{
'cmd' : ['brctl', 'show'],
'help' : 'Collect information about bridges (linuxbridge) configured',
'output': None,
'order' : 10,
'parser': brctl_show_parser,
},
'ovs_appctl_fdb_show_br_ex':
{
'cmd' : ['ovs-appctl', 'fdb/show', 'br-ex'],
'help' : 'Collect mac data base for bridge br-ex',
'sudo' : True,
'output': None,
'order' : 20,
'parser': None,
},
'ovs_appctl_fdb_show_br_int':
{
'cmd' : ['ovs-appctl', 'fdb/show', 'br-int'],
'help' : 'Collect mac data base for ovs bridge br-int',
'sudo' : True,
'output': None,
'order' : 21,
'parser': None,
},
'ovs_appctl_fdb_show_br_tun':
{
'cmd' : ['ovs-appctl', 'fdb/show', 'br-tun'],
'help' : 'Collect mac data base for ovs bridge br-tun',
'sudo' : True,
'output': None,
'order' : 22,
'parser': None,
},
'ovs_vsctl_show':
{
'cmd' : ['ovs-vsctl', 'show'],
'help' : 'Collect ovs bridge info',
'sudo' : True,
'output': None,
'order' : 30,
'parser': ovs_vsctl_show_parser,
},
'ovs_ofctl_show_br_ex':
{
'cmd' : ['ovs-ofctl', 'show', 'br-ex'],
'help' : 'Collect openflow information for ovs bridge br-ex',
'sudo' : True,
'output': None,
'order' : 40,
'parser': ovs_ofctl_show_br_ex_parser,
},
'ovs_ofctl_show_br_int':
{
'cmd' : ['ovs-ofctl', 'show', 'br-int'],
'help' : 'Collect openflow information for ovs bridge br-int',
'sudo' : True,
'output': None,
'order' : 41,
'parser': ovs_ofctl_show_br_int_parser,
},
'ovs_ofctl_show_br_tun':
{
'cmd' : ['ovs-ofctl', 'show', 'br-tun'],
'help' : 'Collect openflow information for ovs bridge br-tun',
'sudo' : True,
'output': None,
'order' : 42,
'parser': ovs_ofctl_show_br_tun_parser,
},
'ovs_ofctl_dump_flows_br_ex':
{
'cmd' : ['ovs-ofctl', 'dump-flows', 'br-ex'],
'help' : 'Collect openflow flow table information for ovs bridge br-ex',
'sudo' : True,
'output': None,
'order' : 50,
'parser': None,
},
'ovs_ofctl_dump_flows_br_int':
{
'cmd' : ['ovs-ofctl', 'dump-flows', 'br-int'],
'help' : 'Collect openflow flow table information for ovs bridge br-int',
'sudo' : True,
'output': None,
'order' : 51,
'parser': None,
},
'ovs_ofctl_dump_flows_br_tun':
{
'cmd' : ['ovs-ofctl', 'dump-flows', 'br-tun'],
'help' : 'Collect openflow flow table information for ovs bridge br-tun',
'sudo' : True,
'output': None,
'order' : 52,
'parser': None,
},
'instance_floating_ip_list':
{
'cmd' : ['nova', 'floating-ip-list'],
'help' : 'Collect floating ip information for instances',
'env' : True,
'output': None,
'order' : 53,
'parser': floating_ip_list_parser,
},
}
def check_args():
parser = argparse.ArgumentParser(description='Runs commands, collects, and parses output',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--debug', dest='debug', help='Enable debugging',
default=True, action='store_true')
parser.add_argument('--info_file', dest='info_file',
help='Info will be stored in JSON format in this file',
default="don.json", type=str)
args = parser.parse_args()
settings['debug'] = args.debug
settings['info_file'] = args.info_file
def all_commands_executed (commands):
for cmd in commands.keys():
if commands[cmd]['parser']:
done = commands[cmd].get('done', False)
if done == False:
return False
return True
def get_vm_info_from_compute(cmd):
output = execute_cmd(['nova', 'hypervisor-list'], sudo=False, shell=False, env=myenv).split('\n');
compute_list = get_hypervisor(output)
vm_info = []
compute_creds = get_vm_credentials()
for node in compute_list:
creds = compute_creds.get('hypervisor').get(node,compute_creds.get('hypervisor')['default'])
ssh = connect_to_box(node,creds['username'],creds['password'])
(stdin,out,err) = ssh.exec_command('sudo ' + cmd)
vm_info.extend(out.read().splitlines())
ssh.close()
return vm_info
def exec_on_remote(cmd):
node_details = get_vm_credentials()
creds = node_details.get('network')
# print "sudo "+cmd
ssh = connect_to_box(creds['hostname'],creds['username'],creds['password'])
(stdin,out,err) = ssh.exec_command(cmd)
if len(err.read()):
return []
return out.read().splitlines()
def get_hypervisor(parse_this):
hypervisor = []
for line in parse_this:
if re.search('^\+', line) or re.search('^$', line) or re.search('Hypervisor hostname', line):
continue
parts = line.split('|')
parts = [x.strip() for x in parts]
name = parts[2]
hypervisor.append(name)
return hypervisor
def main():
check_args()
iteration = 0
# Parser of any specific command might add more commands to be executed.
# Hence continue in a loop.
while True:
if (all_commands_executed(commands) or iteration >= 10):
break
iteration += 1
status_update('Iteration: ' + str(iteration))
sorted_keys = sorted(commands.items(), key = lambda (k,v) : v['order'])
for (cmd, dontcare) in sorted_keys:
# Only collect stuff for which we have written a parser
if commands[cmd]['parser']:
if commands[cmd].get('done', False):
continue
if commands[cmd].has_key('help'):
status_update(commands[cmd]['help'])
shell = commands[cmd].get('shell', False)
env = None
if commands[cmd].get('env', False):
env = myenv
sudo = commands[cmd].get('sudo', False)
if deployment_type == 'multinode':
# handling for network node
if cmd.startswith('netns_'):
commands[cmd]['output'] = exec_on_remote(commands[cmd]['cmd'])
if cmd == 'cat_instance':
commands[cmd]['output'] = get_vm_info_from_compute(commands[cmd]['cmd'])
print commands[cmd]['output']
else:
commands[cmd]['output'] = execute_cmd(commands[cmd]['cmd'], sudo=sudo, shell=shell, env=env).split('\n');
else:
commands[cmd]['output'] = execute_cmd(commands[cmd]['cmd'], sudo=sudo, shell=shell, env=env).split('\n');
commands[cmd]['parser'](commands[cmd]['output'])
commands[cmd]['done'] = True
debug('============= COMMANDS =============')
#debug(pprint.pformat(commands))
status_update('Writing collected info into ' + settings['info_file'])
dump_json(info, settings['info_file'])
if __name__ == "__main__":
main()
|
apache-2.0
|
KAMI911/loec
|
examples/Sharpen/binaries-windows-python26/ImageFont.py
|
3
|
17547
|
#
# The Python Imaging Library.
# $Id: ImageFont.py 2813 2006-10-07 10:11:35Z fredrik $
#
# PIL raster font management
#
# History:
# 1996-08-07 fl created (experimental)
# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3
# 1999-02-06 fl rewrote most font management stuff in C
# 1999-03-17 fl take pth files into account in load_path (from Richard Jones)
# 2001-02-17 fl added freetype support
# 2001-05-09 fl added TransposedFont wrapper class
# 2002-03-04 fl make sure we have a "L" or "1" font
# 2002-12-04 fl skip non-directory entries in the system path
# 2003-04-29 fl add embedded default font
# 2003-09-27 fl added support for truetype charmap encodings
#
# Todo:
# Adapt to PILFONT2 format (16-bit fonts, compressed, single file)
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1996-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image
import os, string, sys
# FIXME: add support for pilfont2 format (see FontFile.py)
# --------------------------------------------------------------------
# Font metrics format:
# "PILfont" LF
# fontdescriptor LF
# (optional) key=value... LF
# "DATA" LF
# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox)
#
# To place a character, cut out srcbox and paste at dstbox,
# relative to the character position. Then move the character
# position according to dx, dy.
# --------------------------------------------------------------------
##
# The <b>ImageFont</b> module defines a class with the same name.
# Instances of this class store bitmap fonts, and are used with the
# <b>text</b> method of the <b>ImageDraw</b> class.
# <p>
# PIL uses it's own font file format to store bitmap fonts. You can
# use the <b>pilfont</b> utility to convert BDF and PCF font
# descriptors (X window font formats) to this format.
# <p>
# Starting with version 1.1.4, PIL can be configured to support
# TrueType and OpenType fonts. For earlier version, TrueType
# support is only available as part of the imToolkit package
#
# @see ImageDraw#ImageDraw.text
# @see pilfont
class ImageFont:
"PIL font wrapper"
def _load_pilfont(self, filename):
file = open(filename, "rb")
for ext in (".png", ".gif", ".pbm"):
try:
fullname = os.path.splitext(filename)[0] + ext
image = Image.open(fullname)
except:
pass
else:
if image and image.mode in ("1", "L"):
break
else:
raise IOError("cannot find glyph data file")
self.file = fullname
return self._load_pilfont_data(file, image)
def _load_pilfont_data(self, file, image):
# read PILfont header
if file.readline() != "PILfont\n":
raise SyntaxError("Not a PILfont file")
d = string.split(file.readline(), ";")
self.info = [] # FIXME: should be a dictionary
s = file.readline()
while s and s != "DATA\n":
self.info.append(s)
# read PILfont metrics
data = file.read(256*20)
# check image
if image.mode not in ("1", "L"):
raise TypeError("invalid font image mode")
image.load()
self.font = Image.core.font(image.im, data)
# delegate critical operations to internal type
self.getsize = self.font.getsize
self.getmask = self.font.getmask
##
# Wrapper for FreeType fonts. Application code should use the
# <b>truetype</b> factory function to create font objects.
class FreeTypeFont:
"FreeType font wrapper (requires _imagingft service)"
def __init__(self, file, size, index=0, encoding=""):
# FIXME: use service provider instead
import _imagingft
self.font = _imagingft.getfont(file, size, index, encoding)
def getname(self):
return self.font.family, self.font.style
def getmetrics(self):
return self.font.ascent, self.font.descent
def getsize(self, text):
return self.font.getsize(text)[0]
def getmask(self, text, mode=""):
return self.getmask2(text, mode)[0]
def getmask2(self, text, mode="", fill=Image.core.fill):
size, offset = self.font.getsize(text)
im = fill("L", size, 0)
self.font.render(text, im.id, mode=="1")
return im, offset
##
# Wrapper that creates a transposed font from any existing font
# object.
#
# @param font A font object.
# @param orientation An optional orientation. If given, this should
# be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM,
# Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270.
class TransposedFont:
"Wrapper for writing rotated or mirrored text"
def __init__(self, font, orientation=None):
self.font = font
self.orientation = orientation # any 'transpose' argument, or None
def getsize(self, text):
w, h = self.font.getsize(text)
if self.orientation in (Image.ROTATE_90, Image.ROTATE_270):
return h, w
return w, h
def getmask(self, text, mode=""):
im = self.font.getmask(text, mode)
if self.orientation is not None:
return im.transpose(self.orientation)
return im
##
# Load font file. This function loads a font object from the given
# bitmap font file, and returns the corresponding font object.
#
# @param filename Name of font file.
# @return A font object.
# @exception IOError If the file could not be read.
def load(filename):
"Load a font file."
f = ImageFont()
f._load_pilfont(filename)
return f
##
# Load a TrueType or OpenType font file, and create a font object.
# This function loads a font object from the given file, and creates
# a font object for a font of the given size.
# <p>
# This function requires the _imagingft service.
#
# @param filename A truetype font file. Under Windows, if the file
# is not found in this filename, the loader also looks in Windows
# <b>fonts</b> directory
# @param size The requested size, in points.
# @param index Which font face to load (default is first available face).
# @param encoding Which font encoding to use (default is Unicode). Common
# encodings are "unic" (Unicode), "symb" (Microsoft Symbol), "ADOB"
# (Adobe Standard), "ADBE" (Adobe Expert), and "armn" (Apple Roman).
# See the FreeType documentation for more information.
# @return A font object.
# @exception IOError If the file could not be read.
def truetype(filename, size, index=0, encoding=""):
"Load a truetype font file."
try:
return FreeTypeFont(filename, size, index, encoding)
except IOError:
if sys.platform == "win32":
# check the windows font repository
# NOTE: must use uppercase WINDIR, to work around bugs in
# 1.5.2's os.environ.get()
windir = os.environ.get("WINDIR")
if windir:
filename = os.path.join(windir, "fonts", filename)
return FreeTypeFont(filename, size, index, encoding)
raise
##
# Load font file. Same as load, but searches for a bitmap font along
# the Python path.
#
# @param filename Name of font file.
# @return A font object.
# @exception IOError If the file could not be read.
# @see #load
def load_path(filename):
"Load a font file, searching along the Python path."
for dir in sys.path:
if Image.isDirectory(dir):
try:
return load(os.path.join(dir, filename))
except IOError:
pass
raise IOError("cannot find font file")
##
# Load a (probably rather ugly) default font.
#
# @return A font object.
def load_default():
"Load a default font."
from StringIO import StringIO
import base64
f = ImageFont()
f._load_pilfont_data(
# courB08
StringIO(base64.decodestring('''
UElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA
BgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL
AAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA
AAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB
ACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A
BAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB
//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA
AAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH
AAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA
ZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv
AAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/
/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5
AAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA
AP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG
AAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA
BgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA
AMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA
2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF
AAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA////
+gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA
////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA
BgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv
AAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA
AAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA
AUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA
BQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP//
//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA
AP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF
AAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB
mwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn
AAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA
AAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7
AAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA
Av/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB
//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA
AAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ
AAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC
DgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ
AAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/
+wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5
AAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/
///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG
AAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA
BQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA
Am0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC
eQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG
AAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA////
+gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA
////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA
BgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT
AAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A
AALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA
Au4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA
Bf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP//
//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA
AP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ
AAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA
LQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5
AAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA
AABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5
AAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA
AP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG
AAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA
EgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK
AJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA
pQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG
AAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA////
+QAGAAIAzgAKANUAEw==
''')), Image.open(StringIO(base64.decodestring('''
iVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u
Mc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9
M43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g
LeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F
IUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA
Bu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791
NAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx
in0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9
SjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY
AYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt
y8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG
ABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY
lODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H
/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3
AAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47
c4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/
/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw
pEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv
oJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR
evta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA
AAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v//
Gc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR
w7IkEbzhVQAAAABJRU5ErkJggg==
'''))))
return f
if __name__ == "__main__":
# create font data chunk for embedding
import base64, os, sys
font = "../Images/courB08"
print " f._load_pilfont_data("
print " # %s" % os.path.basename(font)
print " StringIO(base64.decodestring('''"
base64.encode(open(font + ".pil", "rb"), sys.stdout)
print "''')), Image.open(StringIO(base64.decodestring('''"
base64.encode(open(font + ".pbm", "rb"), sys.stdout)
print "'''))))"
|
gpl-3.0
|
proxysh/Safejumper-for-Mac
|
buildmac/Resources/env/lib/python2.7/site-packages/twisted/logger/test/test_filter.py
|
24
|
12010
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._filter}.
"""
from zope.interface.verify import verifyObject, BrokenMethodImplementation
from twisted.trial import unittest
from .._levels import InvalidLogLevelError
from .._levels import LogLevel
from .._observer import ILogObserver
from .._observer import LogPublisher
from .._filter import FilteringLogObserver
from .._filter import PredicateResult
from .._filter import LogLevelFilterPredicate
class FilteringLogObserverTests(unittest.TestCase):
"""
Tests for L{FilteringLogObserver}.
"""
def test_interface(self):
"""
L{FilteringLogObserver} is an L{ILogObserver}.
"""
observer = FilteringLogObserver(lambda e: None, ())
try:
verifyObject(ILogObserver, observer)
except BrokenMethodImplementation as e:
self.fail(e)
def filterWith(self, filters, other=False):
"""
Apply a set of pre-defined filters on a known set of events and return
the filtered list of event numbers.
The pre-defined events are four events with a C{count} attribute set to
C{0}, C{1}, C{2}, and C{3}.
@param filters: names of the filters to apply.
Options are:
- C{"twoMinus"} (count <=2),
- C{"twoPlus"} (count >= 2),
- C{"notTwo"} (count != 2),
- C{"no"} (False).
@type filters: iterable of str
@param other: Whether to return a list of filtered events as well.
@type other: L{bool}
@return: event numbers or 2-tuple of lists of event numbers.
@rtype: L{list} of L{int} or 2-L{tuple} of L{list} of L{int}
"""
events = [
dict(count=0),
dict(count=1),
dict(count=2),
dict(count=3),
]
class Filters(object):
@staticmethod
def twoMinus(event):
"""
count <= 2
@param event: an event
@type event: dict
@return: L{PredicateResult.yes} if C{event["count"] <= 2},
otherwise L{PredicateResult.maybe}.
"""
if event["count"] <= 2:
return PredicateResult.yes
return PredicateResult.maybe
@staticmethod
def twoPlus(event):
"""
count >= 2
@param event: an event
@type event: dict
@return: L{PredicateResult.yes} if C{event["count"] >= 2},
otherwise L{PredicateResult.maybe}.
"""
if event["count"] >= 2:
return PredicateResult.yes
return PredicateResult.maybe
@staticmethod
def notTwo(event):
"""
count != 2
@param event: an event
@type event: dict
@return: L{PredicateResult.yes} if C{event["count"] != 2},
otherwise L{PredicateResult.maybe}.
"""
if event["count"] == 2:
return PredicateResult.no
return PredicateResult.maybe
@staticmethod
def no(event):
"""
No way, man.
@param event: an event
@type event: dict
@return: L{PredicateResult.no}
"""
return PredicateResult.no
@staticmethod
def bogus(event):
"""
Bogus result.
@param event: an event
@type event: dict
@return: something other than a valid predicate result.
"""
return None
predicates = (getattr(Filters, f) for f in filters)
eventsSeen = []
eventsNotSeen = []
trackingObserver = eventsSeen.append
if other:
extra = [eventsNotSeen.append]
else:
extra = []
filteringObserver = FilteringLogObserver(
trackingObserver, predicates, *extra
)
for e in events:
filteringObserver(e)
if extra:
return (
[e["count"] for e in eventsSeen],
[e["count"] for e in eventsNotSeen],
)
return [e["count"] for e in eventsSeen]
def test_shouldLogEventNoFilters(self):
"""
No filters: all events come through.
"""
self.assertEqual(self.filterWith([]), [0, 1, 2, 3])
def test_shouldLogEventNoFilter(self):
"""
Filter with negative predicate result.
"""
self.assertEqual(self.filterWith(["notTwo"]), [0, 1, 3])
def test_shouldLogEventOtherObserver(self):
"""
Filtered results get sent to the other observer, if passed.
"""
self.assertEqual(self.filterWith(["notTwo"], True), ([0, 1, 3], [2]))
def test_shouldLogEventYesFilter(self):
"""
Filter with positive predicate result.
"""
self.assertEqual(self.filterWith(["twoPlus"]), [0, 1, 2, 3])
def test_shouldLogEventYesNoFilter(self):
"""
Series of filters with positive and negative predicate results.
"""
self.assertEqual(self.filterWith(["twoPlus", "no"]), [2, 3])
def test_shouldLogEventYesYesNoFilter(self):
"""
Series of filters with positive, positive and negative predicate
results.
"""
self.assertEqual(
self.filterWith(["twoPlus", "twoMinus", "no"]),
[0, 1, 2, 3]
)
def test_shouldLogEventBadPredicateResult(self):
"""
Filter with invalid predicate result.
"""
self.assertRaises(TypeError, self.filterWith, ["bogus"])
def test_call(self):
"""
Test filtering results from each predicate type.
"""
e = dict(obj=object())
def callWithPredicateResult(result):
seen = []
observer = FilteringLogObserver(
lambda e: seen.append(e),
(lambda e: result,)
)
observer(e)
return seen
self.assertIn(e, callWithPredicateResult(PredicateResult.yes))
self.assertIn(e, callWithPredicateResult(PredicateResult.maybe))
self.assertNotIn(e, callWithPredicateResult(PredicateResult.no))
def test_trace(self):
"""
Tracing keeps track of forwarding through the filtering observer.
"""
event = dict(log_trace=[])
oYes = lambda e: None
oNo = lambda e: None
def testObserver(e):
self.assertIs(e, event)
self.assertEqual(
event["log_trace"],
[
(publisher, yesFilter),
(yesFilter, oYes),
(publisher, noFilter),
# ... noFilter doesn't call oNo
(publisher, oTest),
]
)
oTest = testObserver
yesFilter = FilteringLogObserver(
oYes,
(lambda e: PredicateResult.yes,)
)
noFilter = FilteringLogObserver(
oNo,
(lambda e: PredicateResult.no,)
)
publisher = LogPublisher(yesFilter, noFilter, testObserver)
publisher(event)
class LogLevelFilterPredicateTests(unittest.TestCase):
"""
Tests for L{LogLevelFilterPredicate}.
"""
def test_defaultLogLevel(self):
"""
Default log level is used.
"""
predicate = LogLevelFilterPredicate()
self.assertEqual(
predicate.logLevelForNamespace(None),
predicate.defaultLogLevel
)
self.assertEqual(
predicate.logLevelForNamespace(""),
predicate.defaultLogLevel
)
self.assertEqual(
predicate.logLevelForNamespace("rocker.cool.namespace"),
predicate.defaultLogLevel
)
def test_setLogLevel(self):
"""
Setting and retrieving log levels.
"""
predicate = LogLevelFilterPredicate()
predicate.setLogLevelForNamespace(None, LogLevel.error)
predicate.setLogLevelForNamespace("twext.web2", LogLevel.debug)
predicate.setLogLevelForNamespace("twext.web2.dav", LogLevel.warn)
self.assertEqual(
predicate.logLevelForNamespace(None),
LogLevel.error
)
self.assertEqual(
predicate.logLevelForNamespace("twisted"),
LogLevel.error
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2"),
LogLevel.debug
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2.dav"),
LogLevel.warn
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2.dav.test"),
LogLevel.warn
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2.dav.test1.test2"),
LogLevel.warn
)
def test_setInvalidLogLevel(self):
"""
Can't pass invalid log levels to C{setLogLevelForNamespace()}.
"""
predicate = LogLevelFilterPredicate()
self.assertRaises(
InvalidLogLevelError,
predicate.setLogLevelForNamespace, "twext.web2", object()
)
# Level must be a constant, not the name of a constant
self.assertRaises(
InvalidLogLevelError,
predicate.setLogLevelForNamespace, "twext.web2", "debug"
)
def test_clearLogLevels(self):
"""
Clearing log levels.
"""
predicate = LogLevelFilterPredicate()
predicate.setLogLevelForNamespace("twext.web2", LogLevel.debug)
predicate.setLogLevelForNamespace("twext.web2.dav", LogLevel.error)
predicate.clearLogLevels()
self.assertEqual(
predicate.logLevelForNamespace("twisted"),
predicate.defaultLogLevel
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2"),
predicate.defaultLogLevel
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2.dav"),
predicate.defaultLogLevel
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2.dav.test"),
predicate.defaultLogLevel
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2.dav.test1.test2"),
predicate.defaultLogLevel
)
def test_filtering(self):
"""
Events are filtered based on log level/namespace.
"""
predicate = LogLevelFilterPredicate()
predicate.setLogLevelForNamespace(None, LogLevel.error)
predicate.setLogLevelForNamespace("twext.web2", LogLevel.debug)
predicate.setLogLevelForNamespace("twext.web2.dav", LogLevel.warn)
def checkPredicate(namespace, level, expectedResult):
event = dict(log_namespace=namespace, log_level=level)
self.assertEqual(expectedResult, predicate(event))
checkPredicate("", LogLevel.debug, PredicateResult.no)
checkPredicate("", LogLevel.error, PredicateResult.maybe)
checkPredicate("twext.web2", LogLevel.debug, PredicateResult.maybe)
checkPredicate("twext.web2", LogLevel.error, PredicateResult.maybe)
checkPredicate("twext.web2.dav", LogLevel.debug, PredicateResult.no)
checkPredicate("twext.web2.dav", LogLevel.error, PredicateResult.maybe)
checkPredicate(None, LogLevel.critical, PredicateResult.no)
checkPredicate("twext.web2", None, PredicateResult.no)
|
gpl-2.0
|
n0m4dz/odoo
|
addons/mail/tests/test_invite.py
|
199
|
2591
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from .common import TestMail
class test_invite(TestMail):
def test_00_basic_invite(self):
cr, uid = self.cr, self.uid
mail_invite = self.registry('mail.wizard.invite')
# Do: create a mail_wizard_invite, validate it
self._init_mock_build_email()
context = {'default_res_model': 'mail.group', 'default_res_id': self.group_pigs_id}
mail_invite_id = mail_invite.create(cr, self.user_raoul_id, {'partner_ids': [(4, self.partner_bert_id)], 'send_mail': True}, context)
mail_invite.add_followers(cr, self.user_raoul_id, [mail_invite_id], {'default_model': 'mail.group', 'default_res_id': 0})
# Test: Pigs followers should contain Admin, Bert
self.group_pigs.refresh()
follower_ids = [follower.id for follower in self.group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([self.partner_admin_id, self.partner_bert_id]), 'invite: Pigs followers after invite is incorrect')
# Test: (pretend to) send email and check subject, body
self.assertEqual(len(self._build_email_kwargs_list), 1, 'sent email number incorrect, should be only for Bert')
for sent_email in self._build_email_kwargs_list:
self.assertEqual(sent_email.get('subject'), 'Invitation to follow Discussion group: Pigs',
'invite: subject of invitation email is incorrect')
self.assertIn('Raoul Grosbedon invited you to follow Discussion group document: Pigs', sent_email.get('body'),
'invite: body of invitation email is incorrect')
|
agpl-3.0
|
nin042/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
|
113
|
20678
|
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
# Copyright (C) 2011 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import optparse
import os
import signal
import sys
import traceback
from webkitpy.common.host import Host
from webkitpy.layout_tests.controllers.manager import Manager
from webkitpy.port import configuration_options, platform_options
from webkitpy.layout_tests.views import buildbot_results
from webkitpy.layout_tests.views import printing
_log = logging.getLogger(__name__)
# This mirrors what the shell normally does.
INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
# This is a randomly chosen exit code that can be tested against to
# indicate that an unexpected exception occurred.
EXCEPTIONAL_EXIT_STATUS = 254
def main(argv, stdout, stderr):
options, args = parse_args(argv)
if options.platform and 'test' in options.platform:
# It's a bit lame to import mocks into real code, but this allows the user
# to run tests against the test platform interactively, which is useful for
# debugging test failures.
from webkitpy.common.host_mock import MockHost
host = MockHost()
else:
host = Host()
if options.lint_test_files:
from webkitpy.layout_tests.lint_test_expectations import lint
return lint(host, options, stderr)
try:
port = host.port_factory.get(options.platform, options)
except NotImplementedError, e:
# FIXME: is this the best way to handle unsupported port names?
print >> stderr, str(e)
return EXCEPTIONAL_EXIT_STATUS
try:
run_details = run(port, options, args, stderr)
if run_details.exit_code != -1:
bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging)
bot_printer.print_results(run_details)
return run_details.exit_code
except KeyboardInterrupt:
return INTERRUPTED_EXIT_STATUS
except BaseException as e:
if isinstance(e, Exception):
print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
traceback.print_exc(file=stderr)
return EXCEPTIONAL_EXIT_STATUS
def parse_args(args):
option_group_definitions = []
option_group_definitions.append(("Platform options", platform_options()))
option_group_definitions.append(("Configuration options", configuration_options()))
option_group_definitions.append(("Printing Options", printing.print_options()))
option_group_definitions.append(("EFL-specific Options", [
optparse.make_option("--webprocess-cmd-prefix", type="string",
default=False, help="Prefix used when spawning the Web process (Debug mode only)"),
]))
option_group_definitions.append(("WebKit Options", [
optparse.make_option("--gc-between-tests", action="store_true", default=False,
help="Force garbage collection between each test"),
optparse.make_option("--complex-text", action="store_true", default=False,
help="Use the complex text code path for all text (Mac OS X and Windows only)"),
optparse.make_option("-l", "--leaks", action="store_true", default=False,
help="Enable leaks checking (Mac OS X only)"),
optparse.make_option("-g", "--guard-malloc", action="store_true", default=False,
help="Enable Guard Malloc (Mac OS X only)"),
optparse.make_option("--threaded", action="store_true", default=False,
help="Run a concurrent JavaScript thread with each test"),
optparse.make_option("--webkit-test-runner", "-2", action="store_true",
help="Use WebKitTestRunner rather than DumpRenderTree."),
# FIXME: We should merge this w/ --build-directory and only have one flag.
optparse.make_option("--root", action="store",
help="Path to a directory containing the executables needed to run tests."),
]))
option_group_definitions.append(("Results Options", [
optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true",
dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false",
dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
optparse.make_option("--no-sample-on-timeout", action="store_false",
dest="sample_on_timeout", help="Don't run sample on timeout (Mac OS X only)"),
optparse.make_option("--no-ref-tests", action="store_true",
dest="no_ref_tests", help="Skip all ref tests"),
optparse.make_option("--tolerance",
help="Ignore image differences less than this percentage (some "
"ports may ignore this option)", type="float"),
optparse.make_option("--results-directory", help="Location of test results"),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--add-platform-exceptions", action="store_true", default=False,
help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"),
optparse.make_option("--new-baseline", action="store_true",
default=False, help="Save generated results as new baselines "
"into the *most-specific-platform* directory, overwriting whatever's "
"already there. Equivalent to --reset-results --add-platform-exceptions"),
optparse.make_option("--reset-results", action="store_true",
default=False, help="Reset expectations to the "
"generated results in their existing location."),
optparse.make_option("--no-new-test-results", action="store_false",
dest="new_test_results", default=True,
help="Don't create new baselines when no expected results exist"),
#FIXME: we should support a comma separated list with --pixel-test-directory as well.
optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
help="A directory where it is allowed to execute tests as pixel tests. "
"Specify multiple times to add multiple directories. "
"This option implies --pixel-tests. If specified, only those tests "
"will be executed as pixel tests that are located in one of the "
"directories enumerated with the option. Some ports may ignore this "
"option while others can have a default value that can be overridden here."),
optparse.make_option("--skip-failing-tests", action="store_true",
default=False, help="Skip tests that are expected to fail. "
"Note: When using this option, you might miss new crashes "
"in these tests."),
optparse.make_option("--additional-drt-flag", action="append",
default=[], help="Additional command line flag to pass to DumpRenderTree "
"Specify multiple times to add multiple flags."),
optparse.make_option("--driver-name", type="string",
help="Alternative DumpRenderTree binary to use"),
optparse.make_option("--additional-platform-directory", action="append",
default=[], help="Additional directory where to look for test "
"baselines (will take precendence over platform baselines). "
"Specify multiple times to add multiple search path entries."),
optparse.make_option("--additional-expectations", action="append", default=[],
help="Path to a test_expectations file that will override previous expectations. "
"Specify multiple times for multiple sets of overrides."),
optparse.make_option("--compare-port", action="store", default=None,
help="Use the specified port's baselines first"),
optparse.make_option("--no-show-results", action="store_false",
default=True, dest="show_results",
help="Don't launch a browser with results after the tests "
"are done"),
optparse.make_option("--full-results-html", action="store_true",
default=False,
help="Show all failures in results.html, rather than only regressions"),
optparse.make_option("--clobber-old-results", action="store_true",
default=False, help="Clobbers test results from previous runs."),
optparse.make_option("--http", action="store_true", dest="http",
default=True, help="Run HTTP and WebSocket tests (default)"),
optparse.make_option("--no-http", action="store_false", dest="http",
help="Don't run HTTP and WebSocket tests"),
optparse.make_option("--ignore-metrics", action="store_true", dest="ignore_metrics",
default=False, help="Ignore rendering metrics related information from test "
"output, only compare the structure of the rendertree."),
optparse.make_option("--nocheck-sys-deps", action="store_true",
default=False,
help="Don't check the system dependencies (themes)"),
optparse.make_option("--nojava", action="store_true",
default=False,
help="Don't build java support files"),
]))
option_group_definitions.append(("Testing Options", [
optparse.make_option("--build", dest="build",
action="store_true", default=True,
help="Check to ensure the DumpRenderTree build is up-to-date "
"(default)."),
optparse.make_option("--no-build", dest="build",
action="store_false", help="Don't check to see if the "
"DumpRenderTree build is up-to-date."),
optparse.make_option("-n", "--dry-run", action="store_true",
default=False,
help="Do everything but actually run the tests or upload results."),
optparse.make_option("--wrapper",
help="wrapper command to insert before invocations of "
"DumpRenderTree; option is split on whitespace before "
"running. (Example: --wrapper='valgrind --smc-check=all')"),
optparse.make_option("-i", "--ignore-tests", action="append", default=[],
help="directories or test to ignore (may specify multiple times)"),
optparse.make_option("--test-list", action="append",
help="read list of tests to run from file", metavar="FILE"),
optparse.make_option("--skipped", action="store", default="default",
help=("control how tests marked SKIP are run. "
"'default' == Skip tests unless explicitly listed on the command line, "
"'ignore' == Run them anyway, "
"'only' == only run the SKIP tests, "
"'always' == always skip, even if listed on the command line.")),
optparse.make_option("--force", dest="skipped", action="store_const", const='ignore',
help="Run all tests, even those marked SKIP in the test list (same as --skipped=ignore)"),
optparse.make_option("--time-out-ms",
help="Set the timeout for each test"),
optparse.make_option("--order", action="store", default="natural",
help=("determine the order in which the test cases will be run. "
"'none' == use the order in which the tests were listed either in arguments or test list, "
"'natural' == use the natural order (default), "
"'random' == randomize the test order.")),
optparse.make_option("--run-chunk",
help=("Run a specified chunk (n:l), the nth of len l, "
"of the layout tests")),
optparse.make_option("--run-part", help=("Run a specified part (n:m), "
"the nth of m parts, of the layout tests")),
optparse.make_option("--batch-size",
help=("Run a the tests in batches (n), after every n tests, "
"DumpRenderTree is relaunched."), type="int", default=None),
optparse.make_option("--run-singly", action="store_true",
default=False, help="run a separate DumpRenderTree for each test (implies --verbose)"),
optparse.make_option("--child-processes",
help="Number of DumpRenderTrees to run in parallel."),
# FIXME: Display default number of child processes that will run.
optparse.make_option("-f", "--fully-parallel", action="store_true",
help="run all tests in parallel"),
optparse.make_option("--exit-after-n-failures", type="int", default=None,
help="Exit after the first N failures instead of running all "
"tests"),
optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
default=None, help="Exit after the first N crashes instead of "
"running all tests"),
optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
optparse.make_option("--retry-failures", action="store_true",
default=True,
help="Re-try any tests that produce unexpected results (default)"),
optparse.make_option("--no-retry-failures", action="store_false",
dest="retry_failures",
help="Don't re-try any tests that produce unexpected results."),
optparse.make_option("--max-locked-shards", type="int", default=0,
help="Set the maximum number of locked shards"),
optparse.make_option("--additional-env-var", type="string", action="append", default=[],
help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
optparse.make_option("--profile", action="store_true",
help="Output per-test profile information."),
optparse.make_option("--profiler", action="store",
help="Output per-test profile information, using the specified profiler."),
]))
option_group_definitions.append(("Miscellaneous Options", [
optparse.make_option("--lint-test-files", action="store_true",
default=False, help=("Makes sure the test files parse for all "
"configurations. Does not run any tests.")),
]))
# FIXME: Move these into json_results_generator.py
option_group_definitions.append(("Result JSON Options", [
optparse.make_option("--master-name", help="The name of the buildbot master."),
optparse.make_option("--builder-name", default="",
help=("The name of the builder shown on the waterfall running "
"this script e.g. WebKit.")),
optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
help=("The name of the builder used in its path, e.g. "
"webkit-rel.")),
optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
help=("The build number of the builder running this script.")),
optparse.make_option("--test-results-server", default="",
help=("If specified, upload results json files to this appengine "
"server.")),
]))
option_parser = optparse.OptionParser()
for group_name, group_options in option_group_definitions:
option_group = optparse.OptionGroup(option_parser, group_name)
option_group.add_options(group_options)
option_parser.add_option_group(option_group)
return option_parser.parse_args(args)
def _set_up_derived_options(port, options):
"""Sets the options values that depend on other options values."""
if not options.child_processes:
options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES",
str(port.default_child_processes()))
if not options.max_locked_shards:
options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_SHARDS",
str(port.default_max_locked_shards())))
if not options.configuration:
options.configuration = port.default_configuration()
if options.pixel_tests is None:
options.pixel_tests = port.default_pixel_tests()
if not options.time_out_ms:
options.time_out_ms = str(port.default_timeout_ms())
options.slow_time_out_ms = str(5 * int(options.time_out_ms))
if options.additional_platform_directory:
additional_platform_directories = []
for path in options.additional_platform_directory:
additional_platform_directories.append(port.host.filesystem.abspath(path))
options.additional_platform_directory = additional_platform_directories
if not options.http and options.skipped in ('ignore', 'only'):
_log.warning("--force/--skipped=%s overrides --no-http." % (options.skipped))
options.http = True
if options.ignore_metrics and (options.new_baseline or options.reset_results):
_log.warning("--ignore-metrics has no effect with --new-baselines or with --reset-results")
if options.new_baseline:
options.reset_results = True
options.add_platform_exceptions = True
if options.pixel_test_directories:
options.pixel_tests = True
varified_dirs = set()
pixel_test_directories = options.pixel_test_directories
for directory in pixel_test_directories:
# FIXME: we should support specifying the directories all the ways we support it for additional
# arguments specifying which tests and directories to run. We should also move the logic for that
# to Port.
filesystem = port.host.filesystem
if not filesystem.isdir(filesystem.join(port.layout_tests_dir(), directory)):
_log.warning("'%s' was passed to --pixel-test-directories, which doesn't seem to be a directory" % str(directory))
else:
varified_dirs.add(directory)
options.pixel_test_directories = list(varified_dirs)
if options.run_singly:
options.verbose = True
def run(port, options, args, logging_stream):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
try:
printer = printing.Printer(port, options, logging_stream, logger=logger)
_set_up_derived_options(port, options)
manager = Manager(port, options, printer)
printer.print_config(port.results_directory())
run_details = manager.run(args)
_log.debug("Testing completed, Exit status: %d" % run_details.exit_code)
return run_details
finally:
printer.cleanup()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
|
bsd-3-clause
|
vincent-tr/rpi-js-os
|
ext/v8-6.3.166/v8/tools/testrunner/server/daemon.py
|
123
|
3753
|
#!/usr/bin/env python
# This code has been written by Sander Marechal and published at:
# http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
# where the author has placed it in the public domain (see comment #6 at
# http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/#c6
# ).
# Some minor modifications have been made by the V8 authors. The work remains
# in the public domain.
import atexit
import os
from signal import SIGTERM
from signal import SIGINT
import sys
import time
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null',
stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
# TODO: (debug) re-enable this!
#os.dup2(si.fileno(), sys.stdin.fileno())
#os.dup2(so.fileno(), sys.stdout.fileno())
#os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
# Give the process a one-second chance to exit gracefully.
os.kill(pid, SIGINT)
time.sleep(1)
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be
called after the process has been daemonized by start() or restart().
"""
|
gpl-3.0
|
maxenglander/terraform
|
vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py
|
1232
|
3478
|
#!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
import msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
"someday",
"",
"bytestring",
1328176922000002000,
-2206187877999998000,
0,
-6795364578871345152
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
packer = msgpack.Packer()
serialized = packer.pack(l[i])
f = open(os.path.join(destdir, str(i) + '.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: msgpack_test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
|
mpl-2.0
|
willthames/ansible
|
lib/ansible/modules/network/cloudengine/ce_facts.py
|
39
|
11082
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = """
---
module: ce_facts
version_added: "2.4"
author: "wangdezhuang (@CloudEngine-Ansible)"
short_description: Gets facts about HUAWEI CloudEngine switches.
description:
- Collects facts from CloudEngine devices running the CloudEngine
operating system. Fact collection is supported over Cli
transport. This module prepends all of the base network fact keys
with C(ansible_net_<fact>). The facts module will always collect a
base set of facts from the device and can enable or disable
collection of additional facts.
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a
list of values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
- name: CloudEngine facts test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Gather_subset is all"
ce_facts:
gather_subset: all
provider: "{{ cli }}"
- name: "Collect only the config facts"
ce_facts:
gather_subset: config
provider: "{{ cli }}"
- name: "Do not collect hardware facts"
ce_facts:
gather_subset: "!hardware"
provider: "{{ cli }}"
"""
RETURN = """
gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
BIOS Version:
description: The BIOS version running on the remote device
returned: always
type: str
Board Type:
description: The board type of the remote device
returned: always
type: str
CPLD1 Version:
description: The CPLD1 Version running the remote device
returned: always
type: str
CPLD2 Version:
description: The CPLD2 Version running the remote device
returned: always
type: str
MAB Version:
description: The MAB Version running the remote device
returned: always
type: str
PCB Version:
description: The PCB Version running the remote device
returned: always
type: str
hostname:
description: The hostname of the remote device
returned: always
type: str
# hardware
FAN:
description: The fan state on the device
returned: when hardware is configured
type: str
PWR:
description: The power state on the device
returned: when hardware is configured
type: str
filesystems:
description: The filesystems on the device
returned: when hardware is configured
type: str
flash_free:
description: The flash free space on the device
returned: when hardware is configured
type: str
flash_total:
description: The flash total space on the device
returned: when hardware is configured
type: str
memory_free:
description: The memory free space on the remote device
returned: when hardware is configured
type: str
memory_total:
description: The memory total space on the remote device
returned: when hardware is configured
type: str
# config
config:
description: The current system configuration on the device
returned: when config is configured
type: str
# interfaces
all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.ce import run_commands
from ansible.module_utils.ce import ce_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
class FactsBase(object):
COMMANDS = frozenset()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, list(self.COMMANDS))
class Default(FactsBase):
""" Class default """
COMMANDS = [
'display version',
'display current-configuration | include sysname'
]
def populate(self):
""" Populate method """
super(Default, self).populate()
data = self.responses[0]
if data:
version = data.split("\n")
tmp_version = version[11:]
for item in tmp_version:
tmp_item = item.split()
tmp_key = tmp_item[1] + " " + tmp_item[2]
self.facts[tmp_key] = tmp_item[4]
data = self.responses[1]
if data:
tmp_value = re.findall(r'sysname (.*)', data)
self.facts['hostname'] = tmp_value[0]
class Config(FactsBase):
""" Class config """
COMMANDS = [
'display current-configuration configuration system'
]
def populate(self):
""" Populate method """
super(Config, self).populate()
data = self.responses[0]
if data:
self.facts['config'] = data.split("\n")
class Hardware(FactsBase):
""" Class hardware """
COMMANDS = [
'dir',
'display memory',
'display device'
]
def populate(self):
""" Populate method """
super(Hardware, self).populate()
data = self.responses[0]
if data:
self.facts['filesystems'] = re.findall(r'^Directory of (.*)/', data)[0]
self.facts['flash_total'] = re.findall(r'(.*) total', data)[0].replace(",", "")
self.facts['flash_free'] = re.findall(r'total \((.*) free\)', data)[0].replace(",", "")
data = self.responses[1]
if data:
memory_total = re.findall(r'Total Memory Used: (.*) Kbytes', data)[0]
use_percent = re.findall(r'Memory Using Percentage: (.*)%', data)[0]
memory_free = str(int(memory_total) - int(memory_total) * int(use_percent) / 100)
self.facts['memory_total'] = memory_total + " Kb"
self.facts['memory_free'] = memory_free + " Kb"
data = self.responses[2]
if data:
device_info = data.split("\n")
tmp_device_info = device_info[4:-1]
for item in tmp_device_info:
tmp_item = item.split()
if len(tmp_item) == 8:
self.facts[tmp_item[2]] = tmp_item[6]
elif len(tmp_item) == 7:
self.facts[tmp_item[0]] = tmp_item[5]
class Interfaces(FactsBase):
""" Class interfaces """
COMMANDS = [
'display interface brief',
'display ip interface brief',
'display lldp neighbor brief'
]
def populate(self):
""" Populate method"""
interface_dict = dict()
ipv4_addr_dict = dict()
neighbors_dict = dict()
super(Interfaces, self).populate()
data = self.responses[0]
if data:
interface_info = data.split("\n")
tmp_interface = interface_info[12:]
for item in tmp_interface:
tmp_item = item.split()
interface_dict[tmp_item[0]] = tmp_item[1]
self.facts['interfaces'] = interface_dict
data = self.responses[1]
if data:
ipv4_addr = data.split("\n")
tmp_ipv4 = ipv4_addr[11:]
for item in tmp_ipv4:
tmp_item = item.split()
ipv4_addr_dict[tmp_item[0]] = tmp_item[1]
self.facts['all_ipv4_addresses'] = ipv4_addr_dict
data = self.responses[2]
if data:
neighbors = data.split("\n")
tmp_neighbors = neighbors[2:]
for item in tmp_neighbors:
tmp_item = item.split()
neighbors_dict[tmp_item[0]] = tmp_item[3]
self.facts['neighbors'] = neighbors_dict
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
""" Module main """
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
# this is to maintain capability with nxos_facts 2.1
if key.startswith('_'):
ansible_facts[key[1:]] = value
else:
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
|
gpl-3.0
|
romain-dartigues/ansible
|
lib/ansible/modules/cloud/memset/memset_memstore_facts.py
|
12
|
4680
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, Simon Weald <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: memset_memstore_facts
author: "Simon Weald (@analbeard)"
version_added: "2.8"
short_description: Retrieve Memstore product usage information.
notes:
- An API key generated via the Memset customer control panel is needed with the
following minimum scope - I(memstore.usage).
description:
- Retrieve Memstore product usage information.
options:
api_key:
required: true
description:
- The API key obtained from the Memset control panel.
name:
required: true
description:
- The Memstore product name (i.e. C(mstestyaa1)).
'''
EXAMPLES = '''
- name: get usage for mstestyaa1
memset_memstore_facts:
name: mstestyaa1
api_key: 5eb86c9896ab03919abcf03857163741
delegate_to: localhost
'''
RETURN = '''
---
memset_api:
description: Info from the Memset API
returned: always
type: complex
contains:
cdn_bandwidth:
description: Dictionary of CDN bandwidth facts
returned: always
type: complex
contains:
bytes_out:
description: Outbound CDN bandwidth for the last 24 hours in bytes
returned: always
type: integer
sample: 1000
requests:
description: Number of requests in the last 24 hours
returned: always
type: integer
sample: 10
bytes_in:
description: Inbound CDN bandwidth for the last 24 hours in bytes
returned: always
type: integer
sample: 1000
containers:
description: Number of containers
returned: always
type: integer
sample: 10
bytes:
description: Space used in bytes
returned: always
type: integer
sample: 3860997965
objs:
description: Number of objects
returned: always
type: integer
sample: 1000
bandwidth:
description: Dictionary of CDN bandwidth facts
returned: always
type: complex
contains:
bytes_out:
description: Outbound bandwidth for the last 24 hours in bytes
returned: always
type: integer
sample: 1000
requests:
description: Number of requests in the last 24 hours
returned: always
type: integer
sample: 10
bytes_in:
description: Inbound bandwidth for the last 24 hours in bytes
returned: always
type: integer
sample: 1000
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.memset import memset_api_call
def get_facts(args=None):
'''
Performs a simple API call and returns a JSON blob.
'''
retvals, payload = dict(), dict()
has_changed, has_failed = False, False
msg, stderr, memset_api = None, None, None
payload['name'] = args['name']
api_method = 'memstore.usage'
has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
if has_failed:
# this is the first time the API is called; incorrect credentials will
# manifest themselves at this point so we need to ensure the user is
# informed of the reason.
retvals['failed'] = has_failed
retvals['msg'] = msg
retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
return(retvals)
# we don't want to return the same thing twice
msg = None
memset_api = response.json()
retvals['changed'] = has_changed
retvals['failed'] = has_failed
for val in ['msg', 'memset_api']:
if val is not None:
retvals[val] = eval(val)
return(retvals)
def main():
global module
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, type='str', no_log=True),
name=dict(required=True, type='str')
),
supports_check_mode=False
)
# populate the dict with the user-provided vars.
args = dict()
for key, arg in module.params.items():
args[key] = arg
retvals = get_facts(args)
if retvals['failed']:
module.fail_json(**retvals)
else:
module.exit_json(**retvals)
if __name__ == '__main__':
main()
|
gpl-3.0
|
buntyke/GPy
|
GPy/util/classification.py
|
15
|
1489
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
def conf_matrix(p,labels,names=['1','0'],threshold=.5,show=True):
"""
Returns error rate and true/false positives in a binary classification problem
- Actual classes are displayed by column.
- Predicted classes are displayed by row.
:param p: array of class '1' probabilities.
:param labels: array of actual classes.
:param names: list of class names, defaults to ['1','0'].
:param threshold: probability value used to decide the class.
:param show: whether the matrix should be shown or not
:type show: False|True
"""
assert p.size == labels.size, "Arrays p and labels have different dimensions."
decision = np.ones((labels.size,1))
decision[p<threshold] = 0
diff = decision - labels
false_0 = diff[diff == -1].size
false_1 = diff[diff == 1].size
true_1 = np.sum(decision[diff ==0])
true_0 = labels.size - true_1 - false_0 - false_1
error = (false_1 + false_0)/np.float(labels.size)
if show:
print(100. - error * 100,'% instances correctly classified')
print('%-10s| %-10s| %-10s| ' % ('',names[0],names[1]))
print('----------|------------|------------|')
print('%-10s| %-10s| %-10s| ' % (names[0],true_1,false_0))
print('%-10s| %-10s| %-10s| ' % (names[1],false_1,true_0))
return error,true_1, false_1, true_0, false_0
|
mit
|
ChameleonCloud/horizon
|
openstack_dashboard/test/unit/api/test_network.py
|
3
|
6498
|
# Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
import netaddr
from django.test.utils import override_settings
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class NetworkApiNeutronTests(test.APIMockTestCase):
def setUp(self):
super(NetworkApiNeutronTests, self).setUp()
neutronclient = mock.patch.object(api.neutron, 'neutronclient').start()
self.qclient = neutronclient.return_value
def _get_expected_addresses(self, server, no_fip_expected=True):
server_ports = self.ports.filter(device_id=server.id)
addresses = collections.defaultdict(list)
for p in server_ports:
net_name = self.networks.get(id=p['network_id']).name
for ip in p.fixed_ips:
version = netaddr.IPAddress(ip['ip_address']).version
addresses[net_name].append(
{'version': version,
'addr': ip['ip_address'],
'OS-EXT-IPS-MAC:mac_addr': p.mac_address,
'OS-EXT-IPS:type': 'fixed'})
if no_fip_expected:
continue
fips = self.floating_ips.filter(port_id=p['id'])
if not fips:
continue
# Only one FIP should match.
fip = fips[0]
addresses[net_name].append(
{'version': 4,
'addr': fip.floating_ip_address,
'OS-EXT-IPS-MAC:mac_addr': p.mac_address,
'OS-EXT-IPS:type': 'floating'})
return addresses
def _check_server_address(self, res_server_data, no_fip_expected=False):
expected_addresses = self._get_expected_addresses(res_server_data,
no_fip_expected)
self.assertEqual(len(expected_addresses),
len(res_server_data.addresses))
for net, addresses in expected_addresses.items():
self.assertIn(net, res_server_data.addresses)
self.assertEqual(addresses, res_server_data.addresses[net])
def _test_servers_update_addresses(self, router_enabled=True):
tenant_id = self.request.user.tenant_id
servers = self.servers.list()
server_ids = tuple([server.id for server in servers])
server_ports = [p for p in self.api_ports.list()
if p['device_id'] in server_ids]
server_port_ids = tuple([p['id'] for p in server_ports])
if router_enabled:
assoc_fips = [fip for fip in self.api_floating_ips.list()
if fip['port_id'] in server_port_ids]
server_network_ids = [p['network_id'] for p in server_ports]
server_networks = [net for net in self.api_networks.list()
if net['id'] in server_network_ids]
list_ports_retvals = [{'ports': server_ports}]
self.qclient.list_ports.side_effect = list_ports_retvals
if router_enabled:
self.qclient.list_floatingips.return_value = {'floatingips':
assoc_fips}
list_ports_retvals.append({'ports': self.api_ports.list()})
self.qclient.list_networks.return_value = {'networks': server_networks}
self.qclient.list_subnets.return_value = {'subnets':
self.api_subnets.list()}
api.network.servers_update_addresses(self.request, servers)
self.assertEqual(self.servers.count(), len(servers))
self.assertEqual([server.id for server in self.servers.list()],
[server.id for server in servers])
no_fip_expected = not router_enabled
# server[0] has one fixed IP and one floating IP
# if router ext isenabled.
self._check_server_address(servers[0], no_fip_expected)
# The expected is also calculated, we examine the result manually once.
addrs = servers[0].addresses['net1']
if router_enabled:
self.assertEqual(3, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
self.assertEqual('fixed', addrs[1]['OS-EXT-IPS:type'])
self.assertEqual('floating', addrs[2]['OS-EXT-IPS:type'])
else:
self.assertEqual(2, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
self.assertEqual('fixed', addrs[1]['OS-EXT-IPS:type'])
# server[1] has one fixed IP.
self._check_server_address(servers[1], no_fip_expected)
# manual check.
addrs = servers[1].addresses['net2']
self.assertEqual(1, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
# server[2] has no corresponding ports in neutron_data,
# so it should be an empty dict.
self.assertFalse(servers[2].addresses)
expected_list_ports = [mock.call(device_id=server_ids)]
if router_enabled:
self.qclient.list_floatingips.assert_called_once_with(
tenant_id=tenant_id, port_id=server_port_ids)
expected_list_ports.append(mock.call(tenant_id=tenant_id))
else:
self.assertEqual(0, self.qclient.list_floatingips.call_count)
self.qclient.list_ports.assert_has_calls(expected_list_ports)
self.qclient.list_networks.assert_called_once_with(
id=frozenset(server_network_ids))
self.qclient.list_subnets.assert_called_once_with()
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': True})
def test_servers_update_addresses(self):
self._test_servers_update_addresses()
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
def test_servers_update_addresses_router_disabled(self):
self._test_servers_update_addresses(router_enabled=False)
|
apache-2.0
|
kevin-hannegan/vps-droplet
|
website/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py
|
2040
|
8935
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
mit
|
IRI-Research/django
|
django/utils/deconstruct.py
|
38
|
1087
|
def deconstructible(*args, **kwargs):
"""
Class decorator that allow the decorated class to be serialized
by the migrations subsystem.
Accepts an optional kwarg `path` to specify the import path.
"""
path = kwargs.pop('path', None)
def decorator(klass):
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(klass, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def deconstruct(obj):
"""
Returns a 3-tuple of class import path, positional arguments,
and keyword arguments.
"""
return (
path or '%s.%s' % (obj.__class__.__module__, obj.__class__.__name__),
obj._constructor_args[0],
obj._constructor_args[1],
)
klass.__new__ = staticmethod(__new__)
klass.deconstruct = deconstruct
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
|
bsd-3-clause
|
Edu-Glez/Bank_sentiment_analysis
|
env/lib/python3.6/site-packages/pygments/lexers/_cocoa_builtins.py
|
25
|
39982
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._cocoa_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file defines a set of types used across Cocoa frameworks from Apple.
There is a list of @interfaces, @protocols and some other (structs, unions)
File may be also used as standalone generator for aboves.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
COCOA_INTERFACES = set(['UITableViewCell', 'HKCorrelationQuery', 'NSURLSessionDataTask', 'PHFetchOptions', 'NSLinguisticTagger', 'NSStream', 'AVAudioUnitDelay', 'GCMotion', 'SKPhysicsWorld', 'NSString', 'CMAttitude', 'AVAudioEnvironmentDistanceAttenuationParameters', 'HKStatisticsCollection', 'SCNPlane', 'CBPeer', 'JSContext', 'SCNTransaction', 'SCNTorus', 'AVAudioUnitEffect', 'UICollectionReusableView', 'MTLSamplerDescriptor', 'AVAssetReaderSampleReferenceOutput', 'AVMutableCompositionTrack', 'GKLeaderboard', 'NSFetchedResultsController', 'SKRange', 'MKTileOverlayRenderer', 'MIDINetworkSession', 'UIVisualEffectView', 'CIWarpKernel', 'PKObject', 'MKRoute', 'MPVolumeView', 'UIPrintInfo', 'SCNText', 'ADClient', 'PKPayment', 'AVMutableAudioMix', 'GLKEffectPropertyLight', 'WKScriptMessage', 'AVMIDIPlayer', 'PHCollectionListChangeRequest', 'UICollectionViewLayout', 'NSMutableCharacterSet', 'SKPaymentTransaction', 'NEOnDemandRuleConnect', 'NSShadow', 'SCNView', 'NSURLSessionConfiguration', 'MTLVertexAttributeDescriptor', 'CBCharacteristic', 'HKQuantityType', 'CKLocationSortDescriptor', 'NEVPNIKEv2SecurityAssociationParameters', 'CMStepCounter', 'NSNetService', 'AVAssetWriterInputMetadataAdaptor', 'UICollectionView', 'UIViewPrintFormatter', 'SCNLevelOfDetail', 'CAShapeLayer', 'MCPeerID', 'MPRatingCommand', 'WKNavigation', 'NSDictionary', 'NSFileVersion', 'CMGyroData', 'AVAudioUnitDistortion', 'CKFetchRecordsOperation', 'SKPhysicsJointSpring', 'SCNHitTestResult', 'AVAudioTime', 'CIFilter', 'UIView', 'SCNConstraint', 'CAPropertyAnimation', 'MKMapItem', 'MPRemoteCommandCenter', 'PKPaymentSummaryItem', 'UICollectionViewFlowLayoutInvalidationContext', 'UIInputViewController', 'PKPass', 'SCNPhysicsBehavior', 'MTLRenderPassColorAttachmentDescriptor', 'MKPolygonRenderer', 'CKNotification', 'JSValue', 'PHCollectionList', 'CLGeocoder', 'NSByteCountFormatter', 'AVCaptureScreenInput', 'MPFeedbackCommand', 'CAAnimation', 'MKOverlayPathView', 'UIActionSheet', 'UIMotionEffectGroup', 'NSLengthFormatter', 'UIBarItem', 'SKProduct', 'AVAssetExportSession', 'NSKeyedUnarchiver', 'NSMutableSet', 'SCNPyramid', 'PHAssetCollection', 'MKMapView', 'HMHomeManager', 'CATransition', 'MTLCompileOptions', 'UIVibrancyEffect', 'CLCircularRegion', 'MKTileOverlay', 'SCNShape', 'ACAccountCredential', 'SKPhysicsJointLimit', 'MKMapSnapshotter', 'AVMediaSelectionGroup', 'NSIndexSet', 'CBPeripheralManager', 'CKRecordZone', 'AVAudioRecorder', 'NSURL', 'CBCentral', 'NSNumber', 'AVAudioOutputNode', 'MTLVertexAttributeDescriptorArray', 'MKETAResponse', 'SKTransition', 'SSReadingList', 'HKSourceQuery', 'UITableViewRowAction', 'UITableView', 'SCNParticlePropertyController', 'AVCaptureStillImageOutput', 'GCController', 'AVAudioPlayerNode', 'AVAudioSessionPortDescription', 'NSHTTPURLResponse', 'NEOnDemandRuleEvaluateConnection', 'SKEffectNode', 'HKQuantity', 'GCControllerElement', 'AVPlayerItemAccessLogEvent', 'SCNBox', 'NSExtensionContext', 'MKOverlayRenderer', 'SCNPhysicsVehicle', 'NSDecimalNumber', 'EKReminder', 'MKPolylineView', 'CKQuery', 'AVAudioMixerNode', 'GKAchievementDescription', 'EKParticipant', 'NSBlockOperation', 'UIActivityItemProvider', 'CLLocation', 'NSBatchUpdateRequest', 'PHContentEditingOutput', 'PHObjectChangeDetails', 'HKWorkoutType', 'MPMoviePlayerController', 'AVAudioFormat', 'HMTrigger', 'MTLRenderPassDepthAttachmentDescriptor', 'SCNRenderer', 'GKScore', 'UISplitViewController', 'HKSource', 'NSURLConnection', 'ABUnknownPersonViewController', 'SCNTechnique', 'UIMenuController', 'NSEvent', 'SKTextureAtlas', 'NSKeyedArchiver', 'GKLeaderboardSet', 'NSSimpleCString', 'AVAudioPCMBuffer', 'CBATTRequest', 'GKMatchRequest', 'AVMetadataObject', 'SKProductsRequest', 'UIAlertView', 'NSIncrementalStore', 'MFMailComposeViewController', 'SCNFloor', 'NSSortDescriptor', 'CKFetchNotificationChangesOperation', 'MPMovieAccessLog', 'NSManagedObjectContext', 'AVAudioUnitGenerator', 'WKBackForwardList', 'SKMutableTexture', 'AVCaptureAudioDataOutput', 'ACAccount', 'AVMetadataItem', 'MPRatingCommandEvent', 'AVCaptureDeviceInputSource', 'CLLocationManager', 'MPRemoteCommand', 'AVCaptureSession', 'UIStepper', 'UIRefreshControl', 'NEEvaluateConnectionRule', 'CKModifyRecordsOperation', 'UICollectionViewTransitionLayout', 'CBCentralManager', 'NSPurgeableData', 'PKShippingMethod', 'SLComposeViewController', 'NSHashTable', 'MKUserTrackingBarButtonItem', 'UILexiconEntry', 'CMMotionActivity', 'SKAction', 'SKShader', 'AVPlayerItemOutput', 'MTLRenderPassAttachmentDescriptor', 'UIDocumentInteractionController', 'UIDynamicItemBehavior', 'NSMutableDictionary', 'UILabel', 'AVCaptureInputPort', 'NSExpression', 'CAInterAppAudioTransportView', 'SKMutablePayment', 'UIImage', 'PHCachingImageManager', 'SCNTransformConstraint', 'HKCorrelationType', 'UIColor', 'SCNGeometrySource', 'AVCaptureAutoExposureBracketedStillImageSettings', 'UIPopoverBackgroundView', 'UIToolbar', 'NSNotificationCenter', 'UICollectionViewLayoutAttributes', 'AVAssetReaderOutputMetadataAdaptor', 'NSEntityMigrationPolicy', 'HMUser', 'NSLocale', 'NSURLSession', 'SCNCamera', 'NSTimeZone', 'UIManagedDocument', 'AVMutableVideoCompositionLayerInstruction', 'AVAssetTrackGroup', 'NSInvocationOperation', 'ALAssetRepresentation', 'AVQueuePlayer', 'HMServiceGroup', 'UIPasteboard', 'PHContentEditingInput', 'NSLayoutManager', 'EKCalendarChooser', 'EKObject', 'CATiledLayer', 'GLKReflectionMapEffect', 'NSManagedObjectID', 'NSEnergyFormatter', 'SLRequest', 'HMCharacteristic', 'AVPlayerLayer', 'MTLRenderPassDescriptor', 'SKPayment', 'NSPointerArray', 'AVAudioMix', 'SCNLight', 'MCAdvertiserAssistant', 'MKMapSnapshotOptions', 'HKCategorySample', 'AVAudioEnvironmentReverbParameters', 'SCNMorpher', 'AVTimedMetadataGroup', 'CBMutableCharacteristic', 'NSFetchRequest', 'UIDevice', 'NSManagedObject', 'NKAssetDownload', 'AVOutputSettingsAssistant', 'SKPhysicsJointPin', 'UITabBar', 'UITextInputMode', 'NSFetchRequestExpression', 'HMActionSet', 'CTSubscriber', 'PHAssetChangeRequest', 'NSPersistentStoreRequest', 'UITabBarController', 'HKQuantitySample', 'AVPlayerItem', 'AVSynchronizedLayer', 'MKDirectionsRequest', 'NSMetadataItem', 'UIPresentationController', 'UINavigationItem', 'PHFetchResultChangeDetails', 'PHImageManager', 'AVCaptureManualExposureBracketedStillImageSettings', 'UIStoryboardPopoverSegue', 'SCNLookAtConstraint', 'UIGravityBehavior', 'UIWindow', 'CBMutableDescriptor', 'NEOnDemandRuleDisconnect', 'UIBezierPath', 'UINavigationController', 'ABPeoplePickerNavigationController', 'EKSource', 'AVAssetWriterInput', 'AVPlayerItemTrack', 'GLKEffectPropertyTexture', 'NSHTTPCookie', 'NSURLResponse', 'SKPaymentQueue', 'NSAssertionHandler', 'MKReverseGeocoder', 'GCControllerAxisInput', 'NSArray', 'NSOrthography', 'NSURLSessionUploadTask', 'NSCharacterSet', 'AVMutableVideoCompositionInstruction', 'AVAssetReaderOutput', 'EAGLContext', 'WKFrameInfo', 'CMPedometer', 'MyClass', 'CKModifyBadgeOperation', 'AVCaptureAudioFileOutput', 'SKEmitterNode', 'NSMachPort', 'AVVideoCompositionCoreAnimationTool', 'PHCollection', 'SCNPhysicsWorld', 'NSURLRequest', 'CMAccelerometerData', 'NSNetServiceBrowser', 'CLFloor', 'AVAsynchronousVideoCompositionRequest', 'SCNGeometry', 'SCNIKConstraint', 'CIKernel', 'CAGradientLayer', 'HKCharacteristicType', 'NSFormatter', 'SCNAction', 'CATransaction', 'CBUUID', 'UIStoryboard', 'MPMediaLibrary', 'UITapGestureRecognizer', 'MPMediaItemArtwork', 'NSURLSessionTask', 'AVAudioUnit', 'MCBrowserViewController', 'UIFontDescriptor', 'NSRelationshipDescription', 'HKSample', 'WKWebView', 'NSMutableAttributedString', 'NSPersistentStoreAsynchronousResult', 'MPNowPlayingInfoCenter', 'MKLocalSearch', 'EAAccessory', 'HKCorrelation', 'CATextLayer', 'NSNotificationQueue', 'UINib', 'GLKTextureLoader', 'HKObjectType', 'NSValue', 'NSMutableIndexSet', 'SKPhysicsContact', 'NSProgress', 'AVPlayerViewController', 'CAScrollLayer', 'GKSavedGame', 'NSTextCheckingResult', 'PHObjectPlaceholder', 'SKConstraint', 'EKEventEditViewController', 'NSEntityDescription', 'NSURLCredentialStorage', 'UIApplication', 'SKDownload', 'SCNNode', 'MKLocalSearchRequest', 'SKScene', 'UISearchDisplayController', 'NEOnDemandRule', 'MTLRenderPassStencilAttachmentDescriptor', 'CAReplicatorLayer', 'UIPrintPageRenderer', 'EKCalendarItem', 'NSUUID', 'EAAccessoryManager', 'NEOnDemandRuleIgnore', 'SKRegion', 'AVAssetResourceLoader', 'EAWiFiUnconfiguredAccessoryBrowser', 'NSUserActivity', 'CTCall', 'UIPrinterPickerController', 'CIVector', 'UINavigationBar', 'UIPanGestureRecognizer', 'MPMediaQuery', 'ABNewPersonViewController', 'CKRecordZoneID', 'HKAnchoredObjectQuery', 'CKFetchRecordZonesOperation', 'UIStoryboardSegue', 'ACAccountType', 'GKSession', 'SKVideoNode', 'PHChange', 'SKReceiptRefreshRequest', 'GCExtendedGamepadSnapshot', 'MPSeekCommandEvent', 'GCExtendedGamepad', 'CAValueFunction', 'SCNCylinder', 'NSNotification', 'NSBatchUpdateResult', 'PKPushCredentials', 'SCNPhysicsSliderJoint', 'AVCaptureDeviceFormat', 'AVPlayerItemErrorLog', 'NSMapTable', 'NSSet', 'CMMotionManager', 'GKVoiceChatService', 'UIPageControl', 'UILexicon', 'MTLArrayType', 'AVAudioUnitReverb', 'MKGeodesicPolyline', 'AVMutableComposition', 'NSLayoutConstraint', 'UIPrinter', 'NSOrderedSet', 'CBAttribute', 'PKPushPayload', 'NSIncrementalStoreNode', 'EKEventStore', 'MPRemoteCommandEvent', 'UISlider', 'UIBlurEffect', 'CKAsset', 'AVCaptureInput', 'AVAudioEngine', 'MTLVertexDescriptor', 'SKPhysicsBody', 'NSOperation', 'PKPaymentPass', 'UIImageAsset', 'MKMapCamera', 'SKProductsResponse', 'GLKEffectPropertyMaterial', 'AVCaptureDevice', 'CTCallCenter', 'CABTMIDILocalPeripheralViewController', 'NEVPNManager', 'HKQuery', 'SCNPhysicsContact', 'CBMutableService', 'AVSampleBufferDisplayLayer', 'SCNSceneSource', 'SKLightNode', 'CKDiscoveredUserInfo', 'NSMutableArray', 'MTLDepthStencilDescriptor', 'MTLArgument', 'NSMassFormatter', 'CIRectangleFeature', 'PKPushRegistry', 'NEVPNConnection', 'MCNearbyServiceBrowser', 'NSOperationQueue', 'MKPolylineRenderer', 'HKWorkout', 'NSValueTransformer', 'UICollectionViewFlowLayout', 'MPChangePlaybackRateCommandEvent', 'NSEntityMapping', 'SKTexture', 'NSMergePolicy', 'UITextInputStringTokenizer', 'NSRecursiveLock', 'AVAsset', 'NSUndoManager', 'AVAudioUnitSampler', 'NSItemProvider', 'SKUniform', 'MPMediaPickerController', 'CKOperation', 'MTLRenderPipelineDescriptor', 'EAWiFiUnconfiguredAccessory', 'NSFileCoordinator', 'SKRequest', 'NSFileHandle', 'NSConditionLock', 'UISegmentedControl', 'NSManagedObjectModel', 'UITabBarItem', 'SCNCone', 'MPMediaItem', 'SCNMaterial', 'EKRecurrenceRule', 'UIEvent', 'UITouch', 'UIPrintInteractionController', 'CMDeviceMotion', 'NEVPNProtocol', 'NSCompoundPredicate', 'HKHealthStore', 'MKMultiPoint', 'HKSampleType', 'UIPrintFormatter', 'AVAudioUnitEQFilterParameters', 'SKView', 'NSConstantString', 'UIPopoverController', 'CKDatabase', 'AVMetadataFaceObject', 'UIAccelerometer', 'EKEventViewController', 'CMAltitudeData', 'MTLStencilDescriptor', 'UISwipeGestureRecognizer', 'NSPort', 'MKCircleRenderer', 'AVCompositionTrack', 'NSAsynchronousFetchRequest', 'NSUbiquitousKeyValueStore', 'NSMetadataQueryResultGroup', 'AVAssetResourceLoadingDataRequest', 'UITableViewHeaderFooterView', 'CKNotificationID', 'AVAudioSession', 'HKUnit', 'NSNull', 'NSPersistentStoreResult', 'MKCircleView', 'AVAudioChannelLayout', 'NEVPNProtocolIKEv2', 'WKProcessPool', 'UIAttachmentBehavior', 'CLBeacon', 'NSInputStream', 'NSURLCache', 'GKPlayer', 'NSMappingModel', 'CIQRCodeFeature', 'AVMutableVideoComposition', 'PHFetchResult', 'NSAttributeDescription', 'AVPlayer', 'MKAnnotationView', 'PKPaymentRequest', 'NSTimer', 'CBDescriptor', 'MKOverlayView', 'AVAudioUnitTimePitch', 'NSSaveChangesRequest', 'UIReferenceLibraryViewController', 'SKPhysicsJointFixed', 'UILocalizedIndexedCollation', 'UIInterpolatingMotionEffect', 'UIDocumentPickerViewController', 'AVAssetWriter', 'NSBundle', 'SKStoreProductViewController', 'GLKViewController', 'NSMetadataQueryAttributeValueTuple', 'GKTurnBasedMatch', 'AVAudioFile', 'UIActivity', 'NSPipe', 'MKShape', 'NSMergeConflict', 'CIImage', 'HKObject', 'UIRotationGestureRecognizer', 'AVPlayerItemLegibleOutput', 'AVAssetImageGenerator', 'GCControllerButtonInput', 'CKMarkNotificationsReadOperation', 'CKSubscription', 'MPTimedMetadata', 'NKIssue', 'UIScreenMode', 'HMAccessoryBrowser', 'GKTurnBasedEventHandler', 'UIWebView', 'MKPolyline', 'JSVirtualMachine', 'AVAssetReader', 'NSAttributedString', 'GKMatchmakerViewController', 'NSCountedSet', 'UIButton', 'WKNavigationResponse', 'GKLocalPlayer', 'MPMovieErrorLog', 'AVSpeechUtterance', 'HKStatistics', 'UILocalNotification', 'HKBiologicalSexObject', 'AVURLAsset', 'CBPeripheral', 'NSDateComponentsFormatter', 'SKSpriteNode', 'UIAccessibilityElement', 'AVAssetWriterInputGroup', 'HMZone', 'AVAssetReaderAudioMixOutput', 'NSEnumerator', 'UIDocument', 'MKLocalSearchResponse', 'UISimpleTextPrintFormatter', 'PHPhotoLibrary', 'CBService', 'UIDocumentMenuViewController', 'MCSession', 'QLPreviewController', 'CAMediaTimingFunction', 'UITextPosition', 'ASIdentifierManager', 'AVAssetResourceLoadingRequest', 'SLComposeServiceViewController', 'UIPinchGestureRecognizer', 'PHObject', 'NSExtensionItem', 'HKSampleQuery', 'MTLRenderPipelineColorAttachmentDescriptorArray', 'MKRouteStep', 'SCNCapsule', 'NSMetadataQuery', 'AVAssetResourceLoadingContentInformationRequest', 'UITraitCollection', 'CTCarrier', 'NSFileSecurity', 'UIAcceleration', 'UIMotionEffect', 'MTLRenderPipelineReflection', 'CLHeading', 'CLVisit', 'MKDirectionsResponse', 'HMAccessory', 'MTLStructType', 'UITextView', 'CMMagnetometerData', 'UICollisionBehavior', 'UIProgressView', 'CKServerChangeToken', 'UISearchBar', 'MKPlacemark', 'AVCaptureConnection', 'NSPropertyMapping', 'ALAssetsFilter', 'SK3DNode', 'AVPlayerItemErrorLogEvent', 'NSJSONSerialization', 'AVAssetReaderVideoCompositionOutput', 'ABPersonViewController', 'CIDetector', 'GKTurnBasedMatchmakerViewController', 'MPMediaItemCollection', 'SCNSphere', 'NSCondition', 'NSURLCredential', 'MIDINetworkConnection', 'NSFileProviderExtension', 'NSDecimalNumberHandler', 'NSAtomicStoreCacheNode', 'NSAtomicStore', 'EKAlarm', 'CKNotificationInfo', 'AVAudioUnitEQ', 'UIPercentDrivenInteractiveTransition', 'MKPolygon', 'AVAssetTrackSegment', 'MTLVertexAttribute', 'NSExpressionDescription', 'HKStatisticsCollectionQuery', 'NSURLAuthenticationChallenge', 'NSDirectoryEnumerator', 'MKDistanceFormatter', 'UIAlertAction', 'NSPropertyListSerialization', 'GKPeerPickerController', 'UIUserNotificationSettings', 'UITableViewController', 'GKNotificationBanner', 'MKPointAnnotation', 'MTLRenderPassColorAttachmentDescriptorArray', 'NSCache', 'SKPhysicsJoint', 'NSXMLParser', 'UIViewController', 'PKPaymentToken', 'MFMessageComposeViewController', 'AVAudioInputNode', 'NSDataDetector', 'CABTMIDICentralViewController', 'AVAudioUnitMIDIInstrument', 'AVCaptureVideoPreviewLayer', 'AVAssetWriterInputPassDescription', 'MPChangePlaybackRateCommand', 'NSURLComponents', 'CAMetalLayer', 'UISnapBehavior', 'AVMetadataMachineReadableCodeObject', 'CKDiscoverUserInfosOperation', 'NSTextAttachment', 'NSException', 'UIMenuItem', 'CMMotionActivityManager', 'SCNGeometryElement', 'NCWidgetController', 'CAEmitterLayer', 'MKUserLocation', 'UIImagePickerController', 'CIFeature', 'AVCaptureDeviceInput', 'ALAsset', 'NSURLSessionDownloadTask', 'SCNPhysicsHingeJoint', 'MPMoviePlayerViewController', 'NSMutableOrderedSet', 'SCNMaterialProperty', 'UIFont', 'AVCaptureVideoDataOutput', 'NSCachedURLResponse', 'ALAssetsLibrary', 'NSInvocation', 'UILongPressGestureRecognizer', 'NSTextStorage', 'WKWebViewConfiguration', 'CIFaceFeature', 'MKMapSnapshot', 'GLKEffectPropertyFog', 'AVComposition', 'CKDiscoverAllContactsOperation', 'AVAudioMixInputParameters', 'CAEmitterBehavior', 'PKPassLibrary', 'UIMutableUserNotificationCategory', 'NSLock', 'NEVPNProtocolIPSec', 'ADBannerView', 'UIDocumentPickerExtensionViewController', 'UIActivityIndicatorView', 'AVPlayerMediaSelectionCriteria', 'CALayer', 'UIAccessibilityCustomAction', 'UIBarButtonItem', 'AVAudioSessionRouteDescription', 'CLBeaconRegion', 'HKBloodTypeObject', 'MTLVertexBufferLayoutDescriptorArray', 'CABasicAnimation', 'AVVideoCompositionInstruction', 'AVMutableTimedMetadataGroup', 'EKRecurrenceEnd', 'NSTextContainer', 'TWTweetComposeViewController', 'PKPaymentAuthorizationViewController', 'UIScrollView', 'WKNavigationAction', 'AVPlayerItemMetadataOutput', 'EKRecurrenceDayOfWeek', 'NSNumberFormatter', 'MTLComputePipelineReflection', 'UIScreen', 'CLRegion', 'NSProcessInfo', 'GLKTextureInfo', 'SCNSkinner', 'AVCaptureMetadataOutput', 'SCNAnimationEvent', 'NSTextTab', 'JSManagedValue', 'NSDate', 'UITextChecker', 'WKBackForwardListItem', 'NSData', 'NSParagraphStyle', 'AVMutableMetadataItem', 'EKCalendar', 'HKWorkoutEvent', 'NSMutableURLRequest', 'UIVideoEditorController', 'HMTimerTrigger', 'AVAudioUnitVarispeed', 'UIDynamicAnimator', 'AVCompositionTrackSegment', 'GCGamepadSnapshot', 'MPMediaEntity', 'GLKSkyboxEffect', 'UISwitch', 'EKStructuredLocation', 'UIGestureRecognizer', 'NSProxy', 'GLKBaseEffect', 'UIPushBehavior', 'GKScoreChallenge', 'NSCoder', 'MPMediaPlaylist', 'NSDateComponents', 'WKUserScript', 'EKEvent', 'NSDateFormatter', 'NSAsynchronousFetchResult', 'AVAssetWriterInputPixelBufferAdaptor', 'UIVisualEffect', 'UICollectionViewCell', 'UITextField', 'CLPlacemark', 'MPPlayableContentManager', 'AVCaptureOutput', 'HMCharacteristicWriteAction', 'CKModifySubscriptionsOperation', 'NSPropertyDescription', 'GCGamepad', 'UIMarkupTextPrintFormatter', 'SCNTube', 'NSPersistentStoreCoordinator', 'AVAudioEnvironmentNode', 'GKMatchmaker', 'CIContext', 'NSThread', 'SLComposeSheetConfigurationItem', 'SKPhysicsJointSliding', 'NSPredicate', 'GKVoiceChat', 'SKCropNode', 'AVCaptureAudioPreviewOutput', 'NSStringDrawingContext', 'GKGameCenterViewController', 'UIPrintPaper', 'SCNPhysicsBallSocketJoint', 'UICollectionViewLayoutInvalidationContext', 'GLKEffectPropertyTransform', 'AVAudioIONode', 'UIDatePicker', 'MKDirections', 'ALAssetsGroup', 'CKRecordZoneNotification', 'SCNScene', 'MPMovieAccessLogEvent', 'CKFetchSubscriptionsOperation', 'CAEmitterCell', 'AVAudioUnitTimeEffect', 'HMCharacteristicMetadata', 'MKPinAnnotationView', 'UIPickerView', 'UIImageView', 'UIUserNotificationCategory', 'SCNPhysicsVehicleWheel', 'HKCategoryType', 'MPMediaQuerySection', 'GKFriendRequestComposeViewController', 'NSError', 'MTLRenderPipelineColorAttachmentDescriptor', 'SCNPhysicsShape', 'UISearchController', 'SCNPhysicsBody', 'CTSubscriberInfo', 'AVPlayerItemAccessLog', 'MPMediaPropertyPredicate', 'CMLogItem', 'NSAutoreleasePool', 'NSSocketPort', 'AVAssetReaderTrackOutput', 'SKNode', 'UIMutableUserNotificationAction', 'SCNProgram', 'AVSpeechSynthesisVoice', 'CMAltimeter', 'AVCaptureAudioChannel', 'GKTurnBasedExchangeReply', 'AVVideoCompositionLayerInstruction', 'AVSpeechSynthesizer', 'GKChallengeEventHandler', 'AVCaptureFileOutput', 'UIControl', 'SCNPhysicsField', 'CKReference', 'LAContext', 'CKRecordID', 'ADInterstitialAd', 'AVAudioSessionDataSourceDescription', 'AVAudioBuffer', 'CIColorKernel', 'GCControllerDirectionPad', 'NSFileManager', 'AVMutableAudioMixInputParameters', 'UIScreenEdgePanGestureRecognizer', 'CAKeyframeAnimation', 'CKQueryNotification', 'PHAdjustmentData', 'EASession', 'AVAssetResourceRenewalRequest', 'UIInputView', 'NSFileWrapper', 'UIResponder', 'NSPointerFunctions', 'UIKeyCommand', 'NSHTTPCookieStorage', 'AVMediaSelectionOption', 'NSRunLoop', 'NSFileAccessIntent', 'CAAnimationGroup', 'MKCircle', 'UIAlertController', 'NSMigrationManager', 'NSDateIntervalFormatter', 'UICollectionViewUpdateItem', 'CKDatabaseOperation', 'PHImageRequestOptions', 'SKReachConstraints', 'CKRecord', 'CAInterAppAudioSwitcherView', 'WKWindowFeatures', 'GKInvite', 'NSMutableData', 'PHAssetCollectionChangeRequest', 'NSMutableParagraphStyle', 'UIDynamicBehavior', 'GLKEffectProperty', 'CKFetchRecordChangesOperation', 'SKShapeNode', 'MPMovieErrorLogEvent', 'MKPolygonView', 'MPContentItem', 'HMAction', 'NSScanner', 'GKAchievementChallenge', 'AVAudioPlayer', 'CKContainer', 'AVVideoComposition', 'NKLibrary', 'NSPersistentStore', 'AVCaptureMovieFileOutput', 'HMRoom', 'GKChallenge', 'UITextRange', 'NSURLProtectionSpace', 'ACAccountStore', 'MPSkipIntervalCommand', 'NSComparisonPredicate', 'HMHome', 'PHVideoRequestOptions', 'NSOutputStream', 'MPSkipIntervalCommandEvent', 'PKAddPassesViewController', 'UITextSelectionRect', 'CTTelephonyNetworkInfo', 'AVTextStyleRule', 'NSFetchedPropertyDescription', 'UIPageViewController', 'CATransformLayer', 'UICollectionViewController', 'AVAudioNode', 'MCNearbyServiceAdvertiser', 'NSObject', 'PHAsset', 'GKLeaderboardViewController', 'CKQueryCursor', 'MPMusicPlayerController', 'MKOverlayPathRenderer', 'CMPedometerData', 'HMService', 'SKFieldNode', 'GKAchievement', 'WKUserContentController', 'AVAssetTrack', 'TWRequest', 'SKLabelNode', 'AVCaptureBracketedStillImageSettings', 'MIDINetworkHost', 'MPMediaPredicate', 'AVFrameRateRange', 'MTLTextureDescriptor', 'MTLVertexBufferLayoutDescriptor', 'MPFeedbackCommandEvent', 'UIUserNotificationAction', 'HKStatisticsQuery', 'SCNParticleSystem', 'NSIndexPath', 'AVVideoCompositionRenderContext', 'CADisplayLink', 'HKObserverQuery', 'UIPopoverPresentationController', 'CKQueryOperation', 'CAEAGLLayer', 'NSMutableString', 'NSMessagePort', 'NSURLQueryItem', 'MTLStructMember', 'AVAudioSessionChannelDescription', 'GLKView', 'UIActivityViewController', 'GKAchievementViewController', 'GKTurnBasedParticipant', 'NSURLProtocol', 'NSUserDefaults', 'NSCalendar', 'SKKeyframeSequence', 'AVMetadataItemFilter', 'CKModifyRecordZonesOperation', 'WKPreferences', 'NSMethodSignature', 'NSRegularExpression', 'EAGLSharegroup', 'AVPlayerItemVideoOutput', 'PHContentEditingInputRequestOptions', 'GKMatch', 'CIColor', 'UIDictationPhrase'])
COCOA_PROTOCOLS = set(['SKStoreProductViewControllerDelegate', 'AVVideoCompositionInstruction', 'AVAudioSessionDelegate', 'GKMatchDelegate', 'NSFileManagerDelegate', 'UILayoutSupport', 'NSCopying', 'UIPrintInteractionControllerDelegate', 'QLPreviewControllerDataSource', 'SKProductsRequestDelegate', 'NSTextStorageDelegate', 'MCBrowserViewControllerDelegate', 'MTLComputeCommandEncoder', 'SCNSceneExportDelegate', 'UISearchResultsUpdating', 'MFMailComposeViewControllerDelegate', 'MTLBlitCommandEncoder', 'NSDecimalNumberBehaviors', 'PHContentEditingController', 'NSMutableCopying', 'UIActionSheetDelegate', 'UIViewControllerTransitioningDelegate', 'UIAlertViewDelegate', 'AVAudioPlayerDelegate', 'MKReverseGeocoderDelegate', 'NSCoding', 'UITextInputTokenizer', 'GKFriendRequestComposeViewControllerDelegate', 'UIActivityItemSource', 'NSCacheDelegate', 'UIAdaptivePresentationControllerDelegate', 'GKAchievementViewControllerDelegate', 'UIViewControllerTransitionCoordinator', 'EKEventEditViewDelegate', 'NSURLConnectionDelegate', 'UITableViewDelegate', 'GKPeerPickerControllerDelegate', 'UIGuidedAccessRestrictionDelegate', 'AVSpeechSynthesizerDelegate', 'AVAudio3DMixing', 'AVPlayerItemLegibleOutputPushDelegate', 'ADInterstitialAdDelegate', 'HMAccessoryBrowserDelegate', 'AVAssetResourceLoaderDelegate', 'UITabBarControllerDelegate', 'CKRecordValue', 'SKPaymentTransactionObserver', 'AVCaptureAudioDataOutputSampleBufferDelegate', 'UIInputViewAudioFeedback', 'GKChallengeListener', 'SKSceneDelegate', 'UIPickerViewDelegate', 'UIWebViewDelegate', 'UIApplicationDelegate', 'GKInviteEventListener', 'MPMediaPlayback', 'MyClassJavaScriptMethods', 'AVAsynchronousKeyValueLoading', 'QLPreviewItem', 'SCNBoundingVolume', 'NSPortDelegate', 'UIContentContainer', 'SCNNodeRendererDelegate', 'SKRequestDelegate', 'SKPhysicsContactDelegate', 'HMAccessoryDelegate', 'UIPageViewControllerDataSource', 'SCNSceneRendererDelegate', 'SCNPhysicsContactDelegate', 'MKMapViewDelegate', 'AVPlayerItemOutputPushDelegate', 'UICollectionViewDelegate', 'UIImagePickerControllerDelegate', 'MTLRenderCommandEncoder', 'PKPaymentAuthorizationViewControllerDelegate', 'UIToolbarDelegate', 'WKUIDelegate', 'SCNActionable', 'NSURLConnectionDataDelegate', 'MKOverlay', 'CBCentralManagerDelegate', 'JSExport', 'NSTextLayoutOrientationProvider', 'UIPickerViewDataSource', 'PKPushRegistryDelegate', 'UIViewControllerTransitionCoordinatorContext', 'NSLayoutManagerDelegate', 'MTLLibrary', 'NSFetchedResultsControllerDelegate', 'ABPeoplePickerNavigationControllerDelegate', 'MTLResource', 'NSDiscardableContent', 'UITextFieldDelegate', 'MTLBuffer', 'MTLSamplerState', 'GKGameCenterControllerDelegate', 'MPMediaPickerControllerDelegate', 'UISplitViewControllerDelegate', 'UIAppearance', 'UIPickerViewAccessibilityDelegate', 'UITraitEnvironment', 'UIScrollViewAccessibilityDelegate', 'ADBannerViewDelegate', 'MPPlayableContentDataSource', 'MTLComputePipelineState', 'NSURLSessionDelegate', 'MTLCommandBuffer', 'NSXMLParserDelegate', 'UIViewControllerRestoration', 'UISearchBarDelegate', 'UIBarPositioning', 'CBPeripheralDelegate', 'UISearchDisplayDelegate', 'CAAction', 'PKAddPassesViewControllerDelegate', 'MCNearbyServiceAdvertiserDelegate', 'MTLDepthStencilState', 'GKTurnBasedMatchmakerViewControllerDelegate', 'MPPlayableContentDelegate', 'AVCaptureVideoDataOutputSampleBufferDelegate', 'UIAppearanceContainer', 'UIStateRestoring', 'UITextDocumentProxy', 'MTLDrawable', 'NSURLSessionTaskDelegate', 'NSFilePresenter', 'AVAudioStereoMixing', 'UIViewControllerContextTransitioning', 'UITextInput', 'CBPeripheralManagerDelegate', 'UITextInputDelegate', 'NSFastEnumeration', 'NSURLAuthenticationChallengeSender', 'SCNProgramDelegate', 'AVVideoCompositing', 'SCNAnimatable', 'NSSecureCoding', 'MCAdvertiserAssistantDelegate', 'GKLocalPlayerListener', 'GLKNamedEffect', 'UIPopoverControllerDelegate', 'AVCaptureMetadataOutputObjectsDelegate', 'NSExtensionRequestHandling', 'UITextSelecting', 'UIPrinterPickerControllerDelegate', 'NCWidgetProviding', 'MTLCommandEncoder', 'NSURLProtocolClient', 'MFMessageComposeViewControllerDelegate', 'UIVideoEditorControllerDelegate', 'WKNavigationDelegate', 'GKSavedGameListener', 'UITableViewDataSource', 'MTLFunction', 'EKCalendarChooserDelegate', 'NSUserActivityDelegate', 'UICollisionBehaviorDelegate', 'NSStreamDelegate', 'MCNearbyServiceBrowserDelegate', 'HMHomeDelegate', 'UINavigationControllerDelegate', 'MCSessionDelegate', 'UIDocumentPickerDelegate', 'UIViewControllerInteractiveTransitioning', 'GKTurnBasedEventListener', 'SCNSceneRenderer', 'MTLTexture', 'GLKViewDelegate', 'EAAccessoryDelegate', 'WKScriptMessageHandler', 'PHPhotoLibraryChangeObserver', 'NSKeyedUnarchiverDelegate', 'AVPlayerItemMetadataOutputPushDelegate', 'NSMachPortDelegate', 'SCNShadable', 'UIPopoverBackgroundViewMethods', 'UIDocumentMenuDelegate', 'UIBarPositioningDelegate', 'ABPersonViewControllerDelegate', 'NSNetServiceBrowserDelegate', 'EKEventViewDelegate', 'UIScrollViewDelegate', 'NSURLConnectionDownloadDelegate', 'UIGestureRecognizerDelegate', 'UINavigationBarDelegate', 'AVAudioMixing', 'NSFetchedResultsSectionInfo', 'UIDocumentInteractionControllerDelegate', 'MTLParallelRenderCommandEncoder', 'QLPreviewControllerDelegate', 'UIAccessibilityReadingContent', 'ABUnknownPersonViewControllerDelegate', 'GLKViewControllerDelegate', 'UICollectionViewDelegateFlowLayout', 'UIPopoverPresentationControllerDelegate', 'UIDynamicAnimatorDelegate', 'NSTextAttachmentContainer', 'MKAnnotation', 'UIAccessibilityIdentification', 'UICoordinateSpace', 'ABNewPersonViewControllerDelegate', 'MTLDevice', 'CAMediaTiming', 'AVCaptureFileOutputRecordingDelegate', 'HMHomeManagerDelegate', 'UITextViewDelegate', 'UITabBarDelegate', 'GKLeaderboardViewControllerDelegate', 'UISearchControllerDelegate', 'EAWiFiUnconfiguredAccessoryBrowserDelegate', 'UITextInputTraits', 'MTLRenderPipelineState', 'GKVoiceChatClient', 'UIKeyInput', 'UICollectionViewDataSource', 'SCNTechniqueSupport', 'NSLocking', 'AVCaptureFileOutputDelegate', 'GKChallengeEventHandlerDelegate', 'UIObjectRestoration', 'CIFilterConstructor', 'AVPlayerItemOutputPullDelegate', 'EAGLDrawable', 'AVVideoCompositionValidationHandling', 'UIViewControllerAnimatedTransitioning', 'NSURLSessionDownloadDelegate', 'UIAccelerometerDelegate', 'UIPageViewControllerDelegate', 'MTLCommandQueue', 'UIDataSourceModelAssociation', 'AVAudioRecorderDelegate', 'GKSessionDelegate', 'NSKeyedArchiverDelegate', 'CAMetalDrawable', 'UIDynamicItem', 'CLLocationManagerDelegate', 'NSMetadataQueryDelegate', 'NSNetServiceDelegate', 'GKMatchmakerViewControllerDelegate', 'NSURLSessionDataDelegate'])
COCOA_PRIMITIVES = set(['ROTAHeader', '__CFBundle', 'MortSubtable', 'AudioFilePacketTableInfo', 'CGPDFOperatorTable', 'KerxStateEntry', 'ExtendedTempoEvent', 'CTParagraphStyleSetting', 'OpaqueMIDIPort', '_GLKMatrix3', '_GLKMatrix2', '_GLKMatrix4', 'ExtendedControlEvent', 'CAFAudioDescription', 'OpaqueCMBlockBuffer', 'CGTextDrawingMode', 'EKErrorCode', 'gss_buffer_desc_struct', 'AudioUnitParameterInfo', '__SCPreferences', '__CTFrame', '__CTLine', 'AudioFile_SMPTE_Time', 'gss_krb5_lucid_context_v1', 'OpaqueJSValue', 'TrakTableEntry', 'AudioFramePacketTranslation', 'CGImageSource', 'OpaqueJSPropertyNameAccumulator', 'JustPCGlyphRepeatAddAction', '__CFBinaryHeap', 'OpaqueMIDIThruConnection', 'opaqueCMBufferQueue', 'OpaqueMusicSequence', 'MortRearrangementSubtable', 'MixerDistanceParams', 'MorxSubtable', 'MIDIObjectPropertyChangeNotification', 'SFNTLookupSegment', 'CGImageMetadataErrors', 'CGPath', 'OpaqueMIDIEndpoint', 'AudioComponentPlugInInterface', 'gss_ctx_id_t_desc_struct', 'sfntFontFeatureSetting', 'OpaqueJSContextGroup', '__SCNetworkConnection', 'AudioUnitParameterValueTranslation', 'CGImageMetadataType', 'CGPattern', 'AudioFileTypeAndFormatID', 'CGContext', 'AUNodeInteraction', 'SFNTLookupTable', 'JustPCDecompositionAction', 'KerxControlPointHeader', 'AudioStreamPacketDescription', 'KernSubtableHeader', '__SecCertificate', 'AUMIDIOutputCallbackStruct', 'MIDIMetaEvent', 'AudioQueueChannelAssignment', 'AnchorPoint', 'JustTable', '__CFNetService', 'CF_BRIDGED_TYPE', 'gss_krb5_lucid_key', 'CGPDFDictionary', 'KerxSubtableHeader', 'CAF_UUID_ChunkHeader', 'gss_krb5_cfx_keydata', 'OpaqueJSClass', 'CGGradient', 'OpaqueMIDISetup', 'JustPostcompTable', '__CTParagraphStyle', 'AudioUnitParameterHistoryInfo', 'OpaqueJSContext', 'CGShading', 'MIDIThruConnectionParams', 'BslnFormat0Part', 'SFNTLookupSingle', '__CFHost', '__SecRandom', '__CTFontDescriptor', '_NSRange', 'sfntDirectory', 'AudioQueueLevelMeterState', 'CAFPositionPeak', 'PropLookupSegment', '__CVOpenGLESTextureCache', 'sfntInstance', '_GLKQuaternion', 'AnkrTable', '__SCNetworkProtocol', 'CAFFileHeader', 'KerxOrderedListHeader', 'CGBlendMode', 'STXEntryOne', 'CAFRegion', 'SFNTLookupTrimmedArrayHeader', 'SCNMatrix4', 'KerxControlPointEntry', 'OpaqueMusicTrack', '_GLKVector4', 'gss_OID_set_desc_struct', 'OpaqueMusicPlayer', '_CFHTTPAuthentication', 'CGAffineTransform', 'CAFMarkerChunk', 'AUHostIdentifier', 'ROTAGlyphEntry', 'BslnTable', 'gss_krb5_lucid_context_version', '_GLKMatrixStack', 'CGImage', 'KernStateEntry', 'SFNTLookupSingleHeader', 'MortLigatureSubtable', 'CAFUMIDChunk', 'SMPTETime', 'CAFDataChunk', 'CGPDFStream', 'AudioFileRegionList', 'STEntryTwo', 'SFNTLookupBinarySearchHeader', 'OpbdTable', '__CTGlyphInfo', 'BslnFormat2Part', 'KerxIndexArrayHeader', 'TrakTable', 'KerxKerningPair', '__CFBitVector', 'KernVersion0SubtableHeader', 'OpaqueAudioComponentInstance', 'AudioChannelLayout', '__CFUUID', 'MIDISysexSendRequest', '__CFNumberFormatter', 'CGImageSourceStatus', 'AudioFileMarkerList', 'AUSamplerBankPresetData', 'CGDataProvider', 'AudioFormatInfo', '__SecIdentity', 'sfntCMapExtendedSubHeader', 'MIDIChannelMessage', 'KernOffsetTable', 'CGColorSpaceModel', 'MFMailComposeErrorCode', 'CGFunction', '__SecTrust', 'AVAudio3DAngularOrientation', 'CGFontPostScriptFormat', 'KernStateHeader', 'AudioUnitCocoaViewInfo', 'CGDataConsumer', 'OpaqueMIDIDevice', 'KernVersion0Header', 'AnchorPointTable', 'CGImageDestination', 'CAFInstrumentChunk', 'AudioUnitMeterClipping', 'MorxChain', '__CTFontCollection', 'STEntryOne', 'STXEntryTwo', 'ExtendedNoteOnEvent', 'CGColorRenderingIntent', 'KerxSimpleArrayHeader', 'MorxTable', '_GLKVector3', '_GLKVector2', 'MortTable', 'CGPDFBox', 'AudioUnitParameterValueFromString', '__CFSocket', 'ALCdevice_struct', 'MIDINoteMessage', 'sfntFeatureHeader', 'CGRect', '__SCNetworkInterface', '__CFTree', 'MusicEventUserData', 'TrakTableData', 'GCQuaternion', 'MortContextualSubtable', '__CTRun', 'AudioUnitFrequencyResponseBin', 'MortChain', 'MorxInsertionSubtable', 'CGImageMetadata', 'gss_auth_identity', 'AudioUnitMIDIControlMapping', 'CAFChunkHeader', 'CGImagePropertyOrientation', 'CGPDFScanner', 'OpaqueMusicEventIterator', 'sfntDescriptorHeader', 'AudioUnitNodeConnection', 'OpaqueMIDIDeviceList', 'ExtendedAudioFormatInfo', 'BslnFormat1Part', 'sfntFontDescriptor', 'KernSimpleArrayHeader', '__CFRunLoopObserver', 'CGPatternTiling', 'MIDINotification', 'MorxLigatureSubtable', 'MessageComposeResult', 'MIDIThruConnectionEndpoint', 'MusicDeviceStdNoteParams', 'opaqueCMSimpleQueue', 'ALCcontext_struct', 'OpaqueAudioQueue', 'PropLookupSingle', 'CGInterpolationQuality', 'CGColor', 'AudioOutputUnitStartAtTimeParams', 'gss_name_t_desc_struct', 'CGFunctionCallbacks', 'CAFPacketTableHeader', 'AudioChannelDescription', 'sfntFeatureName', 'MorxContextualSubtable', 'CVSMPTETime', 'AudioValueRange', 'CGTextEncoding', 'AudioStreamBasicDescription', 'AUNodeRenderCallback', 'AudioPanningInfo', 'KerxOrderedListEntry', '__CFAllocator', 'OpaqueJSPropertyNameArray', '__SCDynamicStore', 'OpaqueMIDIEntity', '__CTRubyAnnotation', 'SCNVector4', 'CFHostClientContext', 'CFNetServiceClientContext', 'AudioUnitPresetMAS_SettingData', 'opaqueCMBufferQueueTriggerToken', 'AudioUnitProperty', 'CAFRegionChunk', 'CGPDFString', '__GLsync', '__CFStringTokenizer', 'JustWidthDeltaEntry', 'sfntVariationAxis', '__CFNetDiagnostic', 'CAFOverviewSample', 'sfntCMapEncoding', 'CGVector', '__SCNetworkService', 'opaqueCMSampleBuffer', 'AUHostVersionIdentifier', 'AudioBalanceFade', 'sfntFontRunFeature', 'KerxCoordinateAction', 'sfntCMapSubHeader', 'CVPlanarPixelBufferInfo', 'AUNumVersion', 'AUSamplerInstrumentData', 'AUPreset', '__CTRunDelegate', 'OpaqueAudioQueueProcessingTap', 'KerxTableHeader', '_NSZone', 'OpaqueExtAudioFile', '__CFRunLoopSource', '__CVMetalTextureCache', 'KerxAnchorPointAction', 'OpaqueJSString', 'AudioQueueParameterEvent', '__CFHTTPMessage', 'OpaqueCMClock', 'ScheduledAudioFileRegion', 'STEntryZero', 'AVAudio3DPoint', 'gss_channel_bindings_struct', 'sfntVariationHeader', 'AUChannelInfo', 'UIOffset', 'GLKEffectPropertyPrv', 'KerxStateHeader', 'CGLineJoin', 'CGPDFDocument', '__CFBag', 'KernOrderedListHeader', '__SCNetworkSet', '__SecKey', 'MIDIObjectAddRemoveNotification', 'AudioUnitParameter', 'JustPCActionSubrecord', 'AudioComponentDescription', 'AudioUnitParameterValueName', 'AudioUnitParameterEvent', 'KerxControlPointAction', 'AudioTimeStamp', 'KernKerningPair', 'gss_buffer_set_desc_struct', 'MortFeatureEntry', 'FontVariation', 'CAFStringID', 'LcarCaretClassEntry', 'AudioUnitParameterStringFromValue', 'ACErrorCode', 'ALMXGlyphEntry', 'LtagTable', '__CTTypesetter', 'AuthorizationOpaqueRef', 'UIEdgeInsets', 'CGPathElement', 'CAFMarker', 'KernTableHeader', 'NoteParamsControlValue', 'SSLContext', 'gss_cred_id_t_desc_struct', 'AudioUnitParameterNameInfo', 'CGDataConsumerCallbacks', 'ALMXHeader', 'CGLineCap', 'MIDIControlTransform', 'CGPDFArray', '__SecPolicy', 'AudioConverterPrimeInfo', '__CTTextTab', '__CFNetServiceMonitor', 'AUInputSamplesInOutputCallbackStruct', '__CTFramesetter', 'CGPDFDataFormat', 'STHeader', 'CVPlanarPixelBufferInfo_YCbCrPlanar', 'MIDIValueMap', 'JustDirectionTable', '__SCBondStatus', 'SFNTLookupSegmentHeader', 'OpaqueCMMemoryPool', 'CGPathDrawingMode', 'CGFont', '__SCNetworkReachability', 'AudioClassDescription', 'CGPoint', 'AVAudio3DVectorOrientation', 'CAFStrings', '__CFNetServiceBrowser', 'opaqueMTAudioProcessingTap', 'sfntNameRecord', 'CGPDFPage', 'CGLayer', 'ComponentInstanceRecord', 'CAFInfoStrings', 'HostCallbackInfo', 'MusicDeviceNoteParams', 'OpaqueVTCompressionSession', 'KernIndexArrayHeader', 'CVPlanarPixelBufferInfo_YCbCrBiPlanar', 'MusicTrackLoopInfo', 'opaqueCMFormatDescription', 'STClassTable', 'sfntDirectoryEntry', 'OpaqueCMTimebase', 'CGDataProviderDirectCallbacks', 'MIDIPacketList', 'CAFOverviewChunk', 'MIDIPacket', 'ScheduledAudioSlice', 'CGDataProviderSequentialCallbacks', 'AudioBuffer', 'MorxRearrangementSubtable', 'CGPatternCallbacks', 'AUDistanceAttenuationData', 'MIDIIOErrorNotification', 'CGPDFContentStream', 'IUnknownVTbl', 'MIDITransform', 'MortInsertionSubtable', 'CABarBeatTime', 'AudioBufferList', '__CVBuffer', 'AURenderCallbackStruct', 'STXEntryZero', 'JustPCDuctilityAction', 'OpaqueAudioQueueTimeline', 'VTDecompressionOutputCallbackRecord', 'OpaqueMIDIClient', '__CFPlugInInstance', 'AudioQueueBuffer', '__CFFileDescriptor', 'AudioUnitConnection', '_GKTurnBasedExchangeStatus', 'LcarCaretTable', 'CVPlanarComponentInfo', 'JustWidthDeltaGroup', 'OpaqueAudioComponent', 'ParameterEvent', '__CVPixelBufferPool', '__CTFont', 'CGColorSpace', 'CGSize', 'AUDependentParameter', 'MIDIDriverInterface', 'gss_krb5_rfc1964_keydata', '__CFDateFormatter', 'LtagStringRange', 'OpaqueVTDecompressionSession', 'gss_iov_buffer_desc_struct', 'AUPresetEvent', 'PropTable', 'KernOrderedListEntry', 'CF_BRIDGED_MUTABLE_TYPE', 'gss_OID_desc_struct', 'AudioUnitPresetMAS_Settings', 'AudioFileMarker', 'JustPCConditionalAddAction', 'BslnFormat3Part', '__CFNotificationCenter', 'MortSwashSubtable', 'AUParameterMIDIMapping', 'SCNVector3', 'OpaqueAudioConverter', 'MIDIRawData', 'sfntNameHeader', '__CFRunLoop', 'MFMailComposeResult', 'CATransform3D', 'OpbdSideValues', 'CAF_SMPTE_Time', '__SecAccessControl', 'JustPCAction', 'OpaqueVTFrameSilo', 'OpaqueVTMultiPassStorage', 'CGPathElementType', 'AudioFormatListItem', 'AudioUnitExternalBuffer', 'AudioFileRegion', 'AudioValueTranslation', 'CGImageMetadataTag', 'CAFPeakChunk', 'AudioBytePacketTranslation', 'sfntCMapHeader', '__CFURLEnumerator', 'STXHeader', 'CGPDFObjectType', 'SFNTLookupArrayHeader'])
if __name__ == '__main__': # pragma: no cover
import os
import re
FRAMEWORKS_PATH = '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS8.1.sdk/System/Library/Frameworks/'
frameworks = os.listdir(FRAMEWORKS_PATH)
all_interfaces = set()
all_protocols = set()
all_primitives = set()
for framework in frameworks:
frameworkHeadersDir = FRAMEWORKS_PATH + framework + '/Headers/'
if not os.path.exists(frameworkHeadersDir):
continue
headerFilenames = os.listdir(frameworkHeadersDir)
for f in headerFilenames:
if not f.endswith('.h'):
continue
headerFilePath = frameworkHeadersDir + f
content = open(headerFilePath).read()
res = re.findall('(?<=@interface )\w+', content)
for r in res:
all_interfaces.add(r)
res = re.findall('(?<=@protocol )\w+', content)
for r in res:
all_protocols.add(r)
res = re.findall('(?<=typedef enum )\w+', content)
for r in res:
all_primitives.add(r)
res = re.findall('(?<=typedef struct )\w+', content)
for r in res:
all_primitives.add(r)
res = re.findall('(?<=typedef const struct )\w+', content)
for r in res:
all_primitives.add(r)
print("ALL interfaces: \n")
print(all_interfaces)
print("\nALL protocols: \n")
print(all_protocols)
print("\nALL primitives: \n")
print(all_primitives)
|
apache-2.0
|
ChenJunor/hue
|
desktop/core/ext-py/pysaml2-2.4.0/src/s2repoze/plugins/challenge_decider.py
|
32
|
3196
|
from paste.request import construct_url
import zope.interface
from repoze.who.interfaces import IRequestClassifier
from paste.httpheaders import REQUEST_METHOD
from paste.httpheaders import CONTENT_TYPE
from paste.httpheaders import USER_AGENT
import re
_DAV_METHODS = (
'OPTIONS',
'PROPFIND',
'PROPPATCH',
'MKCOL',
'LOCK',
'UNLOCK',
'TRACE',
'DELETE',
'COPY',
'MOVE'
)
_DAV_USERAGENTS = (
'Microsoft Data Access Internet Publishing Provider',
'WebDrive',
'Zope External Editor',
'WebDAVFS',
'Goliath',
'neon',
'davlib',
'wsAPI',
'Microsoft-WebDAV'
)
def my_request_classifier(environ):
""" Returns one of the classifiers 'dav', 'xmlpost', or 'browser',
depending on the imperative logic below"""
request_method = REQUEST_METHOD(environ)
if request_method in _DAV_METHODS:
return 'dav'
useragent = USER_AGENT(environ)
if useragent:
for agent in _DAV_USERAGENTS:
if useragent.find(agent) != -1:
return 'dav'
if request_method == 'POST':
if CONTENT_TYPE(environ) == 'text/xml':
return 'xmlpost'
elif CONTENT_TYPE(environ) == "application/soap+xml":
return 'soap'
return 'browser'
zope.interface.directlyProvides(my_request_classifier, IRequestClassifier)
class MyChallengeDecider:
def __init__(self, path_login="", path_logout=""):
self.path_login = path_login
self.path_logout = path_logout
def __call__(self, environ, status, _headers):
if status.startswith('401 '):
return True
else:
if environ.has_key('samlsp.pending'):
return True
uri = environ.get('REQUEST_URI', None)
if uri is None:
uri = construct_url(environ)
# require and challenge for logout and inform the challenge plugin that it is a logout we want
for regex in self.path_logout:
if regex.match(uri) is not None:
environ['samlsp.logout'] = True
return True
# If the user is already authent, whatever happens(except logout),
# don't make a challenge
if environ.has_key('repoze.who.identity'):
return False
# require a challenge for login
for regex in self.path_login:
if regex.match(uri) is not None:
return True
return False
def make_plugin(path_login = None, path_logout = None):
if path_login is None:
raise ValueError(
'must include path_login in configuration')
# make regexp out of string passed via the config file
list_login = []
for arg in path_login.splitlines():
carg = arg.lstrip()
if carg != '':
list_login.append(re.compile(carg))
list_logout = []
if path_logout is not None:
for arg in path_logout.splitlines():
carg = arg.lstrip()
if carg != '':
list_logout.append(re.compile(carg))
plugin = MyChallengeDecider(list_login, list_logout)
return plugin
|
apache-2.0
|
jiahaoliang/group-based-policy
|
gbpservice/nfp/configurator/lib/generic_config_constants.py
|
1
|
1089
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DRIVERS_DIR = 'gbpservice.nfp.configurator.drivers'
SERVICE_TYPE = 'generic_config'
EVENT_CONFIGURE_INTERFACES = 'CONFIGURE_INTERFACES'
EVENT_CLEAR_INTERFACES = 'CLEAR_INTERFACES'
EVENT_CONFIGURE_ROUTES = 'CONFIGURE_ROUTES'
EVENT_CLEAR_ROUTES = 'CLEAR_ROUTES'
EVENT_CONFIGURE_HEALTHMONITOR = 'CONFIGURE_HEALTHMONITOR'
EVENT_CLEAR_HEALTHMONITOR = 'CLEAR_HEALTHMONITOR'
MAX_FAIL_COUNT = 12 # 5 secs delay * 12 = 60 secs
INITIAL = 'initial'
FOREVER = 'forever'
INITIAL_HM_RETRIES = 24 # 5 secs delay * 24 = 120 secs
|
apache-2.0
|
epuzanov/ZenPacks.community.HPMon
|
ZenPacks/community/HPMon/HPCPU.py
|
1
|
1291
|
################################################################################
#
# This program is part of the HPMon Zenpack for Zenoss.
# Copyright (C) 2008, 2009, 2010, 2011 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""HPCPU
HPCPU is an abstraction of a CPU.
$Id: HPCPU.py,v 1.1 2011/01/04 23:09:20 egor Exp $"""
__version__ = "$Revision: 1.1 $"[11:-2]
from Globals import InitializeClass
from Products.ZenModel.CPU import CPU
class HPCPU(CPU):
"""CPU object"""
core = 1
socket = 0
clockspeed = 0
extspeed = 0
voltage = 0
cacheSizeL1 = 0
cacheSizeL2 = 0
_properties = (
{'id':'core', 'type':'int', 'mode':'w'},
{'id':'socket', 'type':'int', 'mode':'w'},
{'id':'clockspeed', 'type':'int', 'mode':'w'}, #MHz
{'id':'extspeed', 'type':'int', 'mode':'w'}, #MHz
{'id':'voltage', 'type':'int', 'mode':'w'}, #Millivolts
{'id':'cacheSizeL1', 'type':'int', 'mode':'w'}, #KBytes
{'id':'cacheSizeL2', 'type':'int', 'mode':'w'}, #KBytes
)
InitializeClass(HPCPU)
|
gpl-2.0
|
grupoprog3/proyecto_final
|
Entrega Final/flask/Lib/site-packages/openid/yadis/discover.py
|
12
|
6054
|
# -*- test-case-name: openid.test.test_yadis_discover -*-
__all__ = ['discover', 'DiscoveryResult', 'DiscoveryFailure']
from io import StringIO
from openid import fetchers
from openid.yadis.constants import \
YADIS_HEADER_NAME, YADIS_CONTENT_TYPE, YADIS_ACCEPT_HEADER
from openid.yadis.parsehtml import MetaNotFound, findHTMLMeta
class DiscoveryFailure(Exception):
"""Raised when a YADIS protocol error occurs in the discovery process"""
identity_url = None
def __init__(self, message, http_response):
Exception.__init__(self, message)
self.http_response = http_response
class DiscoveryResult(object):
"""Contains the result of performing Yadis discovery on a URI"""
# The URI that was passed to the fetcher
request_uri = None
# The result of following redirects from the request_uri
normalized_uri = None
# The URI from which the response text was returned (set to
# None if there was no XRDS document found)
xrds_uri = None
# The content-type returned with the response_text
content_type = None
# The document returned from the xrds_uri
response_text = None
def __init__(self, request_uri):
"""Initialize the state of the object
sets all attributes to None except the request_uri
"""
self.request_uri = request_uri
def usedYadisLocation(self):
"""Was the Yadis protocol's indirection used?"""
if self.xrds_uri is None:
return False
return self.normalized_uri != self.xrds_uri
def isXRDS(self):
"""Is the response text supposed to be an XRDS document?"""
return (self.usedYadisLocation() or
self.content_type == YADIS_CONTENT_TYPE)
def discover(uri):
"""Discover services for a given URI.
@param uri: The identity URI as a well-formed http or https
URI. The well-formedness and the protocol are not checked, but
the results of this function are undefined if those properties
do not hold.
@return: DiscoveryResult object
@raises Exception: Any exception that can be raised by fetching a URL with
the given fetcher.
@raises DiscoveryFailure: When the HTTP response does not have a 200 code.
"""
result = DiscoveryResult(uri)
resp = fetchers.fetch(uri, headers={'Accept': YADIS_ACCEPT_HEADER})
if resp.status not in (200, 206):
raise DiscoveryFailure(
'HTTP Response status from identity URL host is not 200. '
'Got status %r' % (resp.status,), resp)
# Note the URL after following redirects
result.normalized_uri = resp.final_url
# Attempt to find out where to go to discover the document
# or if we already have it
result.content_type = resp.headers.get('content-type')
result.xrds_uri = whereIsYadis(resp)
if result.xrds_uri and result.usedYadisLocation():
resp = fetchers.fetch(result.xrds_uri)
if resp.status not in (200, 206):
exc = DiscoveryFailure(
'HTTP Response status from Yadis host is not 200. '
'Got status %r' % (resp.status,), resp)
exc.identity_url = result.normalized_uri
raise exc
result.content_type = resp.headers.get('content-type')
result.response_text = resp.body
return result
def whereIsYadis(resp):
"""Given a HTTPResponse, return the location of the Yadis document.
May be the URL just retrieved, another URL, or None if no suitable URL can
be found.
[non-blocking]
@returns: str or None
"""
# Attempt to find out where to go to discover the document
# or if we already have it
content_type = resp.headers.get('content-type')
# According to the spec, the content-type header must be an exact
# match, or else we have to look for an indirection.
if (content_type and
content_type.split(';', 1)[0].lower() == YADIS_CONTENT_TYPE):
return resp.final_url
else:
# Try the header
yadis_loc = resp.headers.get(YADIS_HEADER_NAME.lower())
if not yadis_loc:
# Parse as HTML if the header is missing.
#
# XXX: do we want to do something with content-type, like
# have a whitelist or a blacklist (for detecting that it's
# HTML)?
# Decode body by encoding of file
content_type = content_type or ''
encoding = content_type.rsplit(';', 1)
if (len(encoding) == 2 and
encoding[1].strip().startswith('charset=')):
encoding = encoding[1].split('=', 1)[1].strip()
else:
encoding = 'utf-8'
if isinstance(resp.body, bytes):
try:
content = resp.body.decode(encoding)
except UnicodeError:
# All right, the detected encoding has failed. Try with
# UTF-8 (even if there was no detected encoding and we've
# defaulted to UTF-8, it's not that expensive an operation)
try:
content = resp.body.decode('utf-8')
except UnicodeError:
# At this point the content cannot be decoded to a str
# using the detected encoding or falling back to utf-8,
# so we have to resort to replacing undecodable chars.
# This *will* result in broken content but there isn't
# anything else that can be done.
content = resp.body.decode(encoding, 'replace')
else:
content = resp.body
try:
yadis_loc = findHTMLMeta(StringIO(content))
except (MetaNotFound, UnicodeError):
# UnicodeError: Response body could not be encoded and xrds
# location could not be found before troubles occur.
pass
return yadis_loc
|
apache-2.0
|
AIML/scikit-learn
|
sklearn/datasets/california_housing.py
|
198
|
3877
|
"""California housing dataset.
The original database is available from StatLib
http://lib.stat.cmu.edu/
The data contains 20,640 observations on 9 variables.
This dataset contains the average house value as target variable
and the following input variables (features): average income,
housing average age, average rooms, average bedrooms, population,
average occupation, latitude, and longitude in that order.
References
----------
Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
Statistics and Probability Letters, 33 (1997) 291-297.
"""
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from io import BytesIO
from os.path import join, exists
from os import makedirs
from zipfile import ZipFile
try:
# Python 2
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.request import urlopen
import numpy as np
from .base import get_data_home, Bunch
from ..externals import joblib
DATA_URL = "http://lib.stat.cmu.edu/modules.php?op=modload&name=Downloads&"\
"file=index&req=getit&lid=83"
TARGET_FILENAME = "cal_housing.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_california_housing(data_home=None, download_if_missing=True):
"""Loader for the California housing dataset from StatLib.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : ndarray, shape [20640, 8]
Each row corresponding to the 8 feature values in order.
dataset.target : numpy array of shape (20640,)
Each value corresponds to the average house value in units of 100,000.
dataset.feature_names : array of length 8
Array of ordered feature names used in the dataset.
dataset.DESCR : string
Description of the California housing dataset.
Notes
------
This dataset consists of 20,640 samples and 9 features.
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
if not exists(join(data_home, TARGET_FILENAME)):
print('downloading Cal. housing from %s to %s' % (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
buf = BytesIO(fhandle.read())
zip_file = ZipFile(buf)
try:
cadata_fd = zip_file.open('cadata.txt', 'r')
cadata = BytesIO(cadata_fd.read())
# skip the first 27 lines (documentation)
cal_housing = np.loadtxt(cadata, skiprows=27)
joblib.dump(cal_housing, join(data_home, TARGET_FILENAME),
compress=6)
finally:
zip_file.close()
else:
cal_housing = joblib.load(join(data_home, TARGET_FILENAME))
feature_names = ["MedInc", "HouseAge", "AveRooms", "AveBedrms",
"Population", "AveOccup", "Latitude", "Longitude"]
target, data = cal_housing[:, 0], cal_housing[:, 1:]
# avg rooms = total rooms / households
data[:, 2] /= data[:, 5]
# avg bed rooms = total bed rooms / households
data[:, 3] /= data[:, 5]
# avg occupancy = population / housholds
data[:, 5] = data[:, 4] / data[:, 5]
# target in units of 100,000
target = target / 100000.0
return Bunch(data=data,
target=target,
feature_names=feature_names,
DESCR=MODULE_DOCS)
|
bsd-3-clause
|
pydsigner/wesnoth
|
data/tools/unit_tree/html_output.py
|
17
|
44265
|
#encoding: utf8
import os, gettext, time, copy, sys, re
import traceback
import unit_tree.helpers as helpers
import wesnoth.wmlparser2 as wmlparser2
pics_location = "../../pics"
html_header = '''
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<link rel="stylesheet" href=\"%(path)sstyle.css\" type=\"text/css\"/>
<script type="text/javascript" src="%(path)s/menu.js"></script>
<title>%(title)s</title>
</head>
<body><div>'''.strip()
top_bar = '''
<div class="header">
<a href="http://www.wesnoth.org">
<img src="%(path)swesnoth-logo.jpg" alt="Wesnoth logo"/>
</a>
</div>
<div class="topnav">
<a href="%(path)sindex.html">Wesnoth Units database</a>
</div>'''.strip()
html_footer = '''
<div id="footer">
<p>%(generation_note)s</p>
<p><a href="http://wiki.wesnoth.org/Site_Map">Site map</a></p>
<p><a href="http://www.wesnoth.org/wiki/Wesnoth:Copyrights">Copyright</a> © 2003–2015 The Battle for Wesnoth</p>
<p>Supported by <a href="http://www.jexiste.fr/">Jexiste</a></p>
</div>
</div>
</body></html>
'''.strip()
all_written_html_files = []
error_only_once = {}
def error_message(message):
if message in error_only_once: return
error_only_once[message] = 1
write_error(message)
helpers.error_message = error_message
def reset_errors():
error_only_once = {}
class MyFile:
"""
Python 2 is a bit weird with encodings, really should switch this to
Python 3.
"""
def __init__(self, filename, mode):
self.filename = filename
self.f = open(filename, mode)
def write(self, x):
x = x.encode("utf8")
self.f.write(x)
def close(self):
self.f.close()
class Translation:
def __init__(self, localedir, langcode):
self.catalog = {}
self.localedir = localedir
self.langcode = langcode
class Dummy:
def ugettext(self, x):
if not x: return ""
caret = x.find("^")
if caret < 0: return x
return x[caret + 1:]
self.dummy = Dummy()
def translate(self, string, textdomain):
if textdomain not in self.catalog:
try:
self.catalog[textdomain] = gettext.translation(
textdomain, self.localedir, [self.langcode])
self.catalog[textdomain].add_fallback(self.dummy)
except IOError:
self.catalog[textdomain] = self.dummy
except AttributeError:
self.catalog[textdomain] = self.dummy
except IndexError:
# not sure why, but this happens within the
# gettext.translation call sometimes
self.catalog[textdomain] = self.dummy
r = self.catalog[textdomain].ugettext(string)
return r
class GroupByRace:
def __init__(self, wesnoth, campaign):
self.wesnoth = wesnoth
self.campaign = campaign
def unitfilter(self, unit):
if not self.campaign: return True
return unit.campaigns and self.campaign == unit.campaigns[0]
def groups(self, unit):
return [T(unit.race, "plural_name")]
def group_name(self, group):
if not group: return "None"
return group
class GroupByNothing:
def __init__(self):
pass
def unitfilter(self, unit):
return True
def groups(self, unit):
return ["units"]
def group_name(self, group):
return "units"
class GroupByFaction:
def __init__(self, wesnoth, era):
self.wesnoth = wesnoth
self.era = era
def unitfilter(self, unit):
return self.era in unit.eras
def groups(self, unit):
return [x for x in unit.factions if x[0] == self.era]
def group_name(self, group):
era = self.wesnoth.era_lookup[group[0]]
if group[1]:
faction = era.faction_lookup[group[1]]
name = T(faction, "name")
name = name[name.rfind("=") + 1:]
else:
name = "factionless"
return name
global_htmlout = None
def T(tag, att):
if not tag: return "none"
return tag.get_text_val(att, translation = global_htmlout.translate)
class HTMLOutput:
def __init__(self, isocode, output, addon, campaign, is_era, wesnoth, verbose = False):
global global_htmlout
self.output = output
self.addon = addon
self.campaign = campaign
self.is_era = is_era
self.verbose = verbose
self.target = "index.html"
self.wesnoth = wesnoth
self.forest = None
self.translation = Translation(options.transdir, isocode)
self.isocode = isocode
global_htmlout = self
def translate(self, string, domain):
return self.translation.translate(string, domain)
def analyze_units(self, grouper, add_parents):
"""
This takes all units belonging to a campaign, then groups them either
by race or faction, and creates an advancements tree out of it.
"""
# Build an advancement tree forest of all units.
forest = self.forest = helpers.UnitForest()
units_added = {}
for uid, u in self.wesnoth.unit_lookup.items():
if u.hidden: continue
if grouper.unitfilter(u):
forest.add_node(helpers.UnitNode(u))
units_added[uid] = u
#print(" %d/%d units" % (len(units_added), len(self.wesnoth.unit_lookup)))
# Always add any child units, even if they have been filtered out..
while units_added:
new_units_added = {}
for uid, u in units_added.items():
for auid in u.advance:
if not auid in units_added:
try:
au = self.wesnoth.unit_lookup[auid]
except KeyError:
error_message(
"Warning: Unit %s not found as advancement of %s\n" %
(auid, repr(uid)))
continue
forest.add_node(helpers.UnitNode(au))
new_units_added[auid] = au
units_added = new_units_added
if add_parents:
# Also add parent units
added = True
while added:
added = False
for uid, u in self.wesnoth.unit_lookup.items():
if uid in forest.lookup: continue
for auid in u.advance:
if auid in forest.lookup:
forest.add_node(helpers.UnitNode(u))
added = True
break
forest.update()
# Partition trees by race/faction of first unit.
groups = {}
breadth = 0
for tree in forest.trees.values():
u = tree.unit
ugroups = grouper.groups(u)
for group in ugroups:
groups[group] = groups.get(group, []) + [tree]
breadth += tree.breadth
thelist = groups.keys()
thelist.sort(key = lambda x: grouper.group_name(x))
rows_count = breadth + len(thelist)
# Create empty grid.
rows = []
for j in xrange(rows_count):
column = []
for i in xrange(6):
column.append((1, 1, None))
rows.append(column)
# Sort advancement trees by name of first unit and place into the grid.
def by_name(t1, t2):
u1 = t1.unit
u2 = t2.unit
u1name = T(u1, "name")
u2name = T(u2, "name")
return cmp(u1name, u2name)
def grid_place(nodes, x):
nodes.sort(by_name)
for node in nodes:
level = node.unit.level
if level < 0: level = 0
if level > 5: level = 5
rows[x][level] = (1, node.breadth, node)
for i in xrange(1, node.breadth):
rows[x + i][level] = (0, 0, node)
grid_place(node.children, x)
x += node.breadth
return x
x = 0
for group in thelist:
node = helpers.GroupNode(group)
node.name = grouper.group_name(group)
rows[x][0] = (6, 1, node)
for i in xrange(1, 6):
rows[x][i] = (0, 0, None)
nodes = groups[group]
x += 1
x = grid_place(nodes, x)
self.unitgrid = rows
return len(forest.lookup)
def write_navbar(self, report_type):
def write(x): self.output.write(x)
all_written_html_files.append((self.isocode, self.output.filename))
languages = self.wesnoth.languages_found
langlist = languages.keys()
langlist.sort()
write(top_bar % {"path" : "../../"})
write("""
<div class="navbar">
""")
write("<ul class=\"navbar\">")
def abbrev(name):
abbrev = name[0]
word_seperators = [" ", "_", "+", "(", ")"]
for i in xrange(1, len(name)):
if name[i] in ["+", "(", ")"] or name[i - 1] in word_seperators and name[i] not in word_seperators:
abbrev += name[i]
return abbrev
def add_menu(id, name, class2=""):
write("""<li class="popuptrigger"
onclick="toggle_menu(this, '""" + id + """', 2)"
onmouseover="toggle_menu(this, '""" + id + """', 1)"
onmouseout="toggle_menu(this, '""" + id + """', 0)">""")
write('<a class="' + class2 + '">' + name + "</a>")
write('<div class="popupmenu" id="' + id + '">')
write("<div>" + name + "</div>")
# We may not have all the required info yet so defer writing the
# campaigns/eras navigation.
# Campaigns
x = self.translate("addon_type^Campaign", "wesnoth")
add_menu("campaigns_menu", x)
write("PLACE CAMPAIGNS HERE\n")
write("</div></li>\n")
# Eras
x = self.translate("Era", "wesnoth")
add_menu("eras_menu", x)
write("PLACE ERAS HERE\n")
write("</div></li>\n")
# Races / Factions
target = self.target
if self.campaign == "units":
target = "mainline.html"
if not self.is_era:
x = self.translate("Race", "wesnoth-lib")
add_menu("races_menu", x)
write("<a href=\"mainline.html\">%s</a><br/>\n" % (
self.translate("all", "wesnoth-editor")))
r = {}, {}
for u in self.wesnoth.unit_lookup.values():
race = u.race
racename = T(race, "plural_name")
m = 1
if u:
m = 0
r[m][racename] = race.get_text_val("id") if race else "none"
racenames = sorted(r[0].items())
if r[1].items():
racenames += [("-", "-")] + sorted(r[1].items())
for racename, rid in racenames:
if racename == "-":
write(" -<br/>")
else:
write(" <a href=\"%s#%s\">%s</a><br/>" % (
target, racename, racename))
write("</div></li>\n")
else:
x = self.translate("Factions", "wesnoth-help")
add_menu("races_menu", x)
for row in self.unitgrid:
for column in xrange(6):
hspan, vspan, un = row[column]
if not un: continue
if isinstance(un, helpers.GroupNode):
html = "../%s/%s.html" % (
self.isocode, self.campaign)
write(" <a href=\"%s#%s\">%s</a><br/>" % (
html, un.name, un.name))
write("</div></li>\n")
# Add entries for the races also to the navbar itself.
if not self.is_era:
class Entry: pass
races = {}
for uid, u in self.wesnoth.unit_lookup.items():
if self.campaign != "units":
if self.campaign not in u.campaigns: continue
if u.race:
racename = T(u.race, "plural_name")
else:
racename = "none"
runits = races.get(racename, [])
runits.append(uid)
races[racename] = runits
racelist = sorted(races.keys())
got_menu = False
menuid = 0
for r in racelist:
if not r: continue
if got_menu: write("</div></li>\n")
add_menu("units_menu" + str(menuid), r, "unitmenu")
menuid += 1
got_menu = True
c = self.campaign
if c == "units": c = "mainline"
write("<a href=\"%s#%s\">%s</a><br/>" % (
target, r, r))
for uid in races[r]:
un = self.wesnoth.unit_lookup[uid]
if un.hidden: continue
if "mainline" in un.campaigns: addon = "mainline"
else: addon = self.addon
link = "../../%s/%s/%s.html" % (addon, self.isocode, uid)
name = self.wesnoth.get_unit_value(un,
"name", translation=self.translation.translate)
if not name:
error_message("Warning: Unit uid=" + uid + " has no name.\n")
name = uid
write("<a href=\"" + link + "\">" + name + "</a><br />")
if got_menu: write("</div></li>\n")
# Languages
x = self.translate("Language", "wesnoth")
add_menu("languages_menu", x)
col = 0
maxcol = len(langlist) - 1
write("<table>")
write("<tr>")
for lang in langlist:
col += 1
write("<td>")
labb = lang
#underscore = labb.find("_")
#if underscore > 0: labb = labb[:underscore]
if self.addon == "mainline":
write(" <a title=\"%s\" href=\"../%s/%s\">%s</a><br/>\n" % (
languages[lang], lang, self.target,
labb))
else:
write(" <a title=\"%s\" href=\"../%s/%s\">%s</a><br/>\n" % (
languages[lang], lang, "mainline.html",
labb))
write("</td>")
if col % 5 == 0:
if col < maxcol: write("</tr><tr>")
write("</tr>")
write("</table>")
write("</div></li>\n")
write("<li><div> </div></li>")
write("<li><div> </div></li>")
write('<li><a class="unitmenu" href="../../overview.html">Overview</a></li>')
write("</ul>\n")
write("</div>\n")
def pic(self, u, x, recursion = 0):
if recursion >= 4:
error_message(
"Warning: Cannot find image for unit %s(%s).\n" % (
u.get_text_val("id"), x.name))
return None, None
image = self.wesnoth.get_unit_value(x, "image")
portrait = x.get_all(tag="portrait")
if not portrait:
bu = self.wesnoth.get_base_unit(u)
if bu:
portrait = bu.get_all(tag="portrait")
if portrait:
portrait = portrait[0].get_text_val("image")
if not image:
if x.name == "female":
baseunit = self.wesnoth.get_base_unit(u)
if baseunit:
female = baseunit.get_all(tag="female")
return self.pic(u, female[0], recursion = recursion + 1)
else:
return self.pic(u, u, recursion = recursion + 1)
error_message(
"Warning: Missing image for unit %s(%s).\n" % (
u.get_text_val("id"), x.name))
return None, None
icpic = image_collector.add_image_check(self.addon, image)
if not icpic.ipath:
error_message("Warning: No picture %s for unit %s.\n" %
(image, u.get_text_val("id")))
picname = icpic.id_name
image = os.path.join(pics_location, picname)
if portrait:
picname = image_collector.add_image(self.addon, portrait,
no_tc=True)
portrait = os.path.join(pics_location, picname)
return image, portrait
def get_abilities(self, u):
anames = []
already = {}
for abilities in u.get_all(tag="abilities"):
try: c = abilities.get_all()
except AttributeError: c = []
for ability in c:
id = ability.get_text_val("id")
if id in already: continue
already[id] = True
name = T(ability, "name")
if not name: name = id
if not name: name = ability.name
anames.append(name)
return anames
def get_recursive_attacks(self, this_unit):
def copy_attributes(copy_from, copy_to):
for c in copy_from.data:
if isinstance(c, wmlparser2.AttributeNode):
copy_to.data.append(c)
# Use attacks of base_units as base, if we have one.
base_unit = self.wesnoth.get_base_unit(this_unit)
attacks = []
if base_unit:
attacks = copy.deepcopy(self.get_recursive_attacks(base_unit))
base_attacks_count = len(attacks)
for i, attack in enumerate(this_unit.get_all(tag="attack")):
# Attack merging is order based.
if i < base_attacks_count:
copy_attributes(attack, attacks[i])
else:
attacks.append(attack)
return attacks
def write_units(self):
def write(x): self.output.write(x)
def _(x, c="wesnoth"): return self.translate(x, c)
rows = self.unitgrid
write("<table class=\"units\">\n")
write("<colgroup>")
for i in xrange(6):
write("<col class=\"col%d\" />" % i)
write("</colgroup>")
pic = image_collector.add_image("general",
"../../../images/misc/leader-crown.png", no_tc=True)
crownimage = os.path.join(pics_location, pic)
ms = None
for row in xrange(len(rows)):
write("<tr>\n")
for column in xrange(6):
hspan, vspan, un = rows[row][column]
if vspan:
attributes = ""
if hspan == 1 and vspan == 1:
pass
elif hspan == 1:
attributes += " rowspan=\"%d\"" % vspan
elif vspan == 1:
attributes += " colspan=\"%d\"" % hspan
if un and isinstance(un, helpers.GroupNode):
# Find the current multiplayer side so we can show the
# little crowns..
ms = None
if self.is_era:
try:
eid, fid = un.data
era = self.wesnoth.era_lookup[eid]
if fid:
ms = era.faction_lookup[fid]
except TypeError:
pass
racename = un.name
attributes += " class=\"raceheader\""
write("<td%s>" % attributes)
write("<a name=\"%s\">%s</a>" % (racename, racename))
write("</td>\n")
elif un:
u = un.unit
attributes += " class=\"unitcell\""
write("<td%s>" % attributes)
uid = u.get_text_val("id")
def uval(name):
return self.wesnoth.get_unit_value(u, name,
translation=self.translation.translate)
name = uval("name")
cost = uval("cost")
hp = uval("hitpoints")
mp = uval("movement")
xp = uval("experience")
level = uval("level")
crown = ""
if ms:
if un.id in ms.units:
crown = u" ♟"
if un.id in ms.is_leader:
crown = u" ♚"
uaddon = "mainline"
if "mainline" not in u.campaigns: uaddon = self.addon
link = "../../%s/%s/%s.html" % (uaddon, self.isocode, uid)
write("<div class=\"i\"><a href=\"%s\" title=\"id=%s\">%s</a>" % (
link, uid, u"i"))
write("</div>")
write("<div class=\"l\">L%s%s</div>" % (level, crown))
write("<a href=\"%s\">%s</a><br/>" % (link, name))
write('<div class="pic">')
image, portrait = self.pic(u, u)
write('<a href=\"%s\">' % link)
if crown == u" ♚":
write('<div style="background: url(%s)">' % image)
write('<img src="%s" alt="(image)" />' % crownimage)
write("</div>")
else:
write('<img src="%s" alt="(image)" />' % image)
write('</a>\n</div>\n')
write("<div class=\"attributes\">")
write("%s%s<br />" % (_("Cost: ", "wesnoth-help"), cost))
write("%s%s<br />" % (_("HP: "), hp))
write("%s%s<br />" % (_("MP: "), mp))
write("%s%s<br />" % (_("XP: "), xp))
# Write info about abilities.
anames = self.get_abilities(u)
if anames:
write("\n<div style=\"clear:both\">")
write(", ".join(anames))
write("</div>")
# Write info about attacks.
write("\n<div style=\"clear:both\">")
attacks = self.get_recursive_attacks(u)
for attack in attacks:
n = T(attack, "number")
x = T(attack, "damage")
x = "%s - %s" % (x, n)
write("%s " % x)
r = T(attack, "range")
t = T(attack, "type")
write("%s (%s)" % (_(r), _(t)))
s = []
specials = attack.get_all(tag="specials")
if specials:
for special in specials[0].get_all(tag=""):
sname = T(special, "name")
if sname:
s.append(sname)
s = ", ".join(s)
if s: write(" (%s)" % s)
write("<br />")
write("</div>")
write("</div>")
write("</td>\n")
else:
write("<td class=\"empty\"></td>")
write("</tr>\n")
write("</table>\n")
def write_units_tree(self, grouper, title, add_parents):
self.output.write(html_header % {"path": "../../",
"title": title})
n = self.analyze_units(grouper, add_parents)
self.write_navbar("units_tree")
self.output.write("<div class=\"main\">")
self.output.write("<h1>%s</h1>" % title)
self.write_units()
self.output.write('<div id="clear" style="clear:both;"></div>')
self.output.write("</div>")
self.output.write(html_footer % {
"generation_note": "generated on " + time.ctime()})
return n
def write_unit_report(self, output, unit):
def write(x): self.output.write(x)
def _(x, c="wesnoth"): return self.translate(x, c)
def find_attr(what, key):
if unit.movetype:
mtx = unit.movetype.get_all(tag=what)
mty = None
if mtx:
mty = mtx[0].get_text_val(key)
x = unit.get_all(tag=what)
y = None
if x:
y = x[0].get_text_val(key,
translation=self.translation.translate)
if y:
return True, y
if unit.movetype and mty != None:
return False, mty
return False, "-"
def uval(name):
return self.wesnoth.get_unit_value(unit, name,
translation=self.translation.translate)
# Write unit name, picture and description.
uid = unit.get_text_val("id")
uname = uval("name")
display_name = uname
self.output = output
write(html_header % {"path": "../../",
"title": display_name})
self.write_navbar("unit_report")
self.output.write("<div class=\"main\">")
female = unit.get_all(tag="female")
if female:
fname = T(female[0], "name")
if fname and fname != uname:
display_name += "<br/>" + fname
write('<div class="unit-columns">')
write('<div class="unit-column-left">')
write("<h1>%s</h1>\n" % display_name)
write('<div class="pic">')
if female:
mimage, portrait = self.pic(unit, unit)
fimage, fportrait = self.pic(unit, female[0])
if not fimage: fimage = mimage
if not fportrait: fportrait = portrait
write('<img src="%s" alt="(image)" />\n' % mimage)
write('<img src="%s" alt="(image)" />\n' % fimage)
image = mimage
else:
image, portrait = self.pic(unit, unit)
write('<img src="%s" alt="(image)" />\n' % image)
write('</div>\n')
description = uval("description")
# TODO: what is unit_description?
if not description: description = uval("unit_description")
if not description: description = "-"
write("<p>%s</p>\n" % re.sub("\n", "\n<br />", description))
# Base info.
hp = uval("hitpoints")
mp = uval("movement")
xp = uval("experience")
vision = uval("vision")
jamming = uval("jamming")
level = uval("level")
alignment = uval("alignment")
write("<h2>Information</h2>\n")
write("<table class=\"unitinfo\">\n")
write("<tr>\n")
write("<td>%s" % _("Advances from: ", "wesnoth-help"))
write("</td><td>\n")
for pid in self.forest.get_parents(uid):
punit = self.wesnoth.unit_lookup[pid]
if "mainline" in unit.campaigns and "mainline" not in punit.campaigns:
continue
if "mainline" in unit.campaigns: addon = "mainline"
else: addon = self.addon
link = "../../%s/%s/%s.html" % (addon, self.isocode, pid)
name = self.wesnoth.get_unit_value(punit, "name",
translation=self.translation.translate)
write("\n<a href=\"%s\">%s</a>" % (link, name))
write("</td>\n")
write("</tr><tr>\n")
write("<td>%s" % _("Advances to: ", "wesnoth-help"))
write("</td><td>\n")
for cid in self.forest.get_children(uid):
try:
cunit = self.wesnoth.unit_lookup[cid]
if "mainline" in cunit.campaigns: addon = "mainline"
else: addon = self.addon
link = "../../%s/%s/%s.html" % (addon, self.isocode, cid)
if "mainline" in unit.campaigns and "mainline" not in cunit.campaigns:
continue
name = self.wesnoth.get_unit_value(cunit, "name",
translation=self.translation.translate)
except KeyError:
error_message("Warning: Unit %s not found.\n" % cid)
name = cid
if "mainline" in unit.campaigns: continue
link = self.target
write("\n<a href=\"%s\">%s</a>" % (link, name))
write("</td>\n")
write("</tr>\n")
for val, text in [
("cost", _("Cost: ", "wesnoth-help")),
("hitpoints", _("HP: ")),
("movement", _("Movement", "wesnoth-help") + ": "),
("vision", _("Vision", "wesnoth-help") + ": "),
("jamming", _("Jamming", "wesnoth-help") + ":"),
("experience", _("XP: ")),
("level", _("Level") + ": "),
("alignment", _("Alignment: ")),
("id", "ID")]:
x = uval(val)
if not x and val in ("jamming", "vision"): continue
if val == "alignment": x = _(x)
write("<tr>\n")
write("<td>%s</td>" % text)
write("<td class=\"val\">%s</td>" % x)
write("</tr>\n")
# Write info about abilities.
anames = self.get_abilities(unit)
write("<tr>\n")
write("<td>%s</td>" % _("Abilities: ", "wesnoth-help"))
write("<td class=\"val\">" + (", ".join(anames)) + "</td>")
write("</tr>\n")
write("</table>\n")
# Write info about attacks.
write("<h2>" + _("unit help^Attacks", "wesnoth-help") + " <small>(damage - count)</small></h2> \n")
write("<table class=\"unitinfo attacks\">\n")
write('<colgroup><col class="col0" /><col class="col1" /><col class="col2" /><col class="col3" /><col class="col4" /></colgroup>')
attacks = self.get_recursive_attacks(unit)
for attack in attacks:
write("<tr>")
aid = attack.get_text_val("name")
aname = T(attack, "description")
icon = attack.get_text_val("icon")
if not icon:
icon = "attacks/%s.png" % aid
image_add = image_collector.add_image_check(self.addon,
icon, no_tc = True)
if not image_add.ipath:
error_message("Error: No attack icon '%s' found for '%s'.\n" % (
icon, uid))
icon = os.path.join(pics_location, "unit$elves-wood$shaman.png")
else:
icon = os.path.join(pics_location, image_add.id_name)
write("<td><img src=\"%s\" alt=\"(image)\"/></td>" % icon)
write("<td><b>%s</b>" % aname)
r = T(attack, "range")
write("<br/>%s</td>" % _(r))
n = attack.get_text_val("number")
x = attack.get_text_val("damage")
x = "%s - %s" % (x, n)
write("<td><i>%s</i>" % x)
t = T(attack, "type")
write("<br/>%s</td>" % _(t))
s = []
specials = attack.get_all(tag="specials")
if specials:
for special in specials[0].get_all(tag=""):
sname = T(special, "name")
if sname:
s.append(sname)
else:
error_message(
"Warning: Weapon special %s has no name for %s.\n" % (
special.name, uid))
s = "<br/>".join(s)
write("<td>%s</td>" % s)
write("</tr>")
write("</table>\n")
# Write info about resistances.
resistances = [
("blade", "attacks/sword-human.png"),
("pierce", "attacks/spear.png"),
("impact", "attacks/club.png"),
("fire", "attacks/fireball.png"),
("cold", "attacks/iceball.png"),
("arcane", "attacks/faerie-fire.png")]
write("<h2>%s</h2>\n" % _("Resistances: ").strip(" :"))
write("<table class=\"unitinfo resistances\">\n")
write('<colgroup><col class="col0" /><col class="col1" /><col class="col2" /><col class="col3" /><col class="col4" /><col class="col5" /><col class="col6" /><col class="col7" /></colgroup>')
write("<tr>\n")
write("</tr>\n")
row = 0
for rid, ricon in resistances:
special, r = find_attr("resistance", rid)
if r == "-": r = 100
try: r = "<i>%d%%</i>" % (100 - int(r))
except ValueError:
error_message("Warning: Invalid resistance %s for %s.\n" % (
r, uid))
rcell = "td"
if special: rcell += ' class="special"'
if row % 2 == 0: write("<tr>\n")
else: write("<td></td>")
picname = image_collector.add_image(self.addon, ricon,
no_tc = True)
icon = os.path.join(pics_location, picname)
write("<td><img src=\"%s\" alt=\"(icon)\" /></td>\n" % (icon, ))
write("<th>%s</th><td class=\"num\">%s</td>\n" % (_(rid), r))
if row % 2 == 1: write("</tr>\n")
row += 1
write("</table>\n")
# end left column
write('</div>')
write('<div class="unit-column-right">')
for si in xrange(2):
if si and not female: break
if si:
sportrait = fportrait
simage = fimage
else:
simage = image
sportrait = portrait
style = "background-image: url(%s);" % simage
write('<div class="portrait">')
write('<div style="%s"> </div>' % style)
if portrait:
write('<img src="%s" alt="(portrait)" />\n' % sportrait)
write('</div>')
# Write info about movement costs and terrain defense.
write("<h2>" + _("Terrain", "wesnoth-help") + "</h2>\n")
write("<table class=\"unitinfo terrain\">\n")
write('<colgroup><col class="col0" /><col class="col1" /><col class="col2" /><col class="col3" /><col class="col4" /></colgroup>')
write("<tr><th colspan=\"2\"></th><th colspan=\"2\">%s</th></tr>\n" % (
_("Movement Cost", "wesnoth-help")))
write("<tr><th colspan=\"2\">%s</th><th></th><th class=\"numheader\">%s</th></tr>\n" % (
_("Terrain", "wesnoth-help"), _("Defense", "wesnoth-help")))
terrains = self.wesnoth.terrain_lookup
terrainlist = []
already = {}
for tstring, t in terrains.items():
tid = t.get_text_val("id")
if tid in ["off_map", "off_map2", "fog", "shroud", "impassable",
"void", "rails"]: continue
if t.get_all(att="aliasof"): continue
if tid in already: continue
already[tid] = 1
name = T(t, "name")
ticon = t.get_text_val("symbol_image")
if not ticon:
ticon = t.get_text_val("icon_image")
# Use nice images for known mainline terrain types
if tid == "fungus": ticon = "forest/mushrooms-tile"
elif tid == "cave": ticon = "cave/floor6"
elif tid == "sand": ticon = "sand/beach"
elif tid == "reef": ticon = "water/reef-tropical-tile"
elif tid == "hills": ticon = "hills/regular"
elif tid == "swamp_water": ticon = "swamp/water-tile"
elif tid == "shallow_water": ticon = "water/coast-tile"
elif tid == "castle": ticon = "castle/castle-tile"
elif tid == "mountains": ticon = "mountains/snow-tile"
elif tid == "deep_water": ticon = "water/ocean-tile"
elif tid == "flat": ticon = "grass/green-symbol"
elif tid == "forest": ticon = "forest/pine-tile"
elif tid == "frozen": ticon = "frozen/ice"
elif tid == "village": ticon = "village/human-tile"
elif tid == "impassable": ticon = "void/void"
elif tid == "unwalkable": ticon = "unwalkable/lava"
elif tid == "rails": ticon = "misc/rails-ne-sw"
if ticon:
terrainlist.append((name, tid, ticon))
else:
error_message("Terrain " + tid + " has no symbol_image\n")
terrainlist.sort()
for tname, tid, ticon in terrainlist:
not_from_race, c = find_attr("movement_costs", tid)
ccell = "td"
if c == "99": ccell += ' class="grayed"'
dcell = "td"
not_from_race, d = find_attr("defense", tid)
if d == "-": d = 100
try:
d = int(d)
# negative defense has something to do with best defense if
# there's multiple terrain types
if d < 0: d = -d
d = "%d%%" % (100 - d)
except ValueError:
error_message("Warning: Invalid defense %s for %s.\n" % (
d, uid))
write("<tr>\n")
picname = image_collector.add_image(self.addon,
"terrain/" + ticon + ".png", no_tc=True)
icon = os.path.join(pics_location, picname)
write("<td><img src=\"%s\" alt=\"(icon)\" /></td>\n" % (icon, ))
write("<td>%s</td><%s><i>%s</i></td><%s class=\"num\"><i>%s</i></td>\n" % (
tname, ccell, c, dcell, d))
write("</tr>\n")
write("</table>\n")
write('</div>') # right column
write('</div>') # columns parent
self.output.write('<div id="clear" style="clear:both;"></div>')
write('</div>') # main
self.output.write(html_footer % {
"generation_note": "generated on " + time.ctime()})
def generate_campaign_report(addon, isocode, campaign, wesnoth):
if campaign:
cid = campaign.get_text_val("id")
else:
cid = "mainline"
if not cid: cid = addon + "_" + campaign.get_text_val("define")
print("campaign " + addon + " " + cid + " " + isocode)
path = os.path.join(options.output, addon, isocode)
if not os.path.isdir(path): os.mkdir(path)
output = MyFile(os.path.join(path, "%s.html" % cid), "w")
html = HTMLOutput(isocode, output, addon, cid, False, wesnoth)
html.target = "%s.html" % cid
grouper = GroupByRace(wesnoth, cid)
if campaign:
title = campaign.get_text_val("name", translation = html.translate)
else:
title = html.translate("Units", "wesnoth-help")
if not title:
title = cid
n = html.write_units_tree(grouper, title, True)
output.close()
return n
def generate_era_report(addon, isocode, era, wesnoth):
eid = era.get_text_val("id")
print("era " + addon + " " + eid + " " + isocode)
path = os.path.join(options.output, addon, isocode)
if not os.path.isdir(path): os.mkdir(path)
output = MyFile(os.path.join(path, "%s.html" % eid), "w")
html = HTMLOutput(isocode, output, addon, eid, True, wesnoth)
html.target = "%s.html" % eid
grouper = GroupByFaction(wesnoth, eid)
ename = era.get_text_val("name", translation = html.translate)
n = html.write_units_tree(grouper, ename, False)
output.close()
return n
def generate_single_unit_reports(addon, isocode, wesnoth):
path = os.path.join(options.output, addon, isocode)
if not os.path.isdir(path): os.mkdir(path)
html = HTMLOutput(isocode, None, addon, "units", False, wesnoth)
grouper = GroupByNothing()
html.analyze_units(grouper, True)
for uid, unit in wesnoth.unit_lookup.items():
if unit.hidden: continue
if "mainline" in unit.campaigns and addon != "mainline": continue
try:
htmlname = u"%s.html" % uid
filename = os.path.join(path, htmlname).encode("utf8")
# We probably can come up with something better.
if os.path.exists(filename):
age = time.time() - os.path.getmtime(filename)
# was modified in the last 12 hours - we should be ok
if age < 3600 * 12: continue
except (UnicodeDecodeError, UnicodeEncodeError) as e:
traceback.print_exc()
error_message("Unicode problem: " + repr(path) + " + " + repr(uid) + "\n")
error_message(str(e) + "\n")
continue
output = MyFile(filename, "w")
html.target = "%s.html" % uid
html.write_unit_report(output, unit)
output.close()
def html_postprocess_file(filename, isocode, batchlist):
print(u"postprocessing " + repr(filename))
chtml = u""
ehtml = u""
cids = [[], []]
for addon in batchlist:
for campaign in addon.get("campaigns", []):
if campaign["units"] == "?": continue
if campaign["units"] <= 0: continue
if addon["name"] == "mainline": lang = isocode
else: lang = "en_US"
c = addon["name"], campaign["id"], campaign["translations"].get(
lang, campaign["name"]), lang
if addon["name"] == "mainline":
cids[0].append(c)
else:
cids[1].append(c)
for i in xrange(2):
campaigns = cids[i]
campaigns.sort(key = lambda x: "A" if x[1] == "mainline" else "B" + x[2])
for campaign in campaigns:
addon, cname, campname, lang = campaign
chtml += u" <a title=\"%s\" href=\"../../%s/%s/%s.html\">%s</a><br/>\n" % (
campname, addon, lang, cname, campname)
if i == 0 and cids[1]:
chtml += u"-<br/>\n"
eids = [[], []]
for addon in batchlist:
for era in addon.get("eras", []):
if era["units"] == "?": continue
if era["units"] <= 0: continue
if addon["name"] == "mainline": lang = isocode
else: lang = "en_US"
e = addon["name"], era["id"], era["translations"].get(
lang, era["name"]), lang
if addon["name"] == "mainline":
eids[0].append(e)
else:
eids[1].append(e)
for i in xrange(2):
eras = eids[i]
eras.sort(key = lambda x: x[2])
for era in eras:
addon, eid, eraname, lang = era
ehtml += u" <a title=\"%s\" href=\"../../%s/%s/%s.html\">%s</a><br/>" % (
eraname, addon, lang, eid, eraname)
if i == 0 and eids[1]:
ehtml += u"-<br/>\n"
f = open(filename, "r+b")
html = f.read().decode("utf8")
html = html.replace(u"PLACE CAMPAIGNS HERE\n", chtml)
html = html.replace(u"PLACE ERAS HERE\n", ehtml)
f.seek(0)
f.write(html.encode("utf8"))
f.close()
def html_postprocess_all(batchlist):
for isocode, filename in all_written_html_files:
html_postprocess_file(filename, isocode, batchlist)
def write_index(out_path):
output = MyFile(os.path.join(out_path, "index.html"), "w")
output.write("""
<html><head>
<meta http-equiv="refresh" content="0;url=mainline/en_US/mainline.html">
</head>
<body>
<a href="mainline/en_US/mainline.html">Redirecting to Wesnoth units database...</a>
</body>
</html>
""")
|
gpl-2.0
|
dataDogma/Computer-Science
|
Django-MVP-page/venv/Lib/encodings/iso8859_1.py
|
266
|
13176
|
""" Python Character Mapping Codec iso8859_1 generated from 'MAPPINGS/ISO8859/8859-1.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
gpl-3.0
|
IrvinT/webapp
|
node_modules/grunt-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/ordered_dict.py
|
2354
|
10366
|
# Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
mit
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/setuptools-0.6c11/setuptools/command/install_egg_info.py
|
39
|
3724
|
from setuptools import Command
from setuptools.archive_util import unpack_archive
from distutils import log, dir_util
import os, shutil, pkg_resources
class install_egg_info(Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name()+'.egg-info'
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
self.run_command('egg_info')
target = self.target
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink,(self.target,),"Removing "+self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(self.copytree, (),
"Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src,dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/','CVS/':
if src.startswith(skip) or '/'+skip in src:
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp: return
filename,ext = os.path.splitext(self.target)
filename += '-nspkg.pth'; self.outputs.append(filename)
log.info("Installing %s",filename)
if not self.dry_run:
f = open(filename,'wb')
for pkg in nsp:
pth = tuple(pkg.split('.'))
trailer = '\n'
if '.' in pkg:
trailer = (
"; m and setattr(sys.modules[%r], %r, m)\n"
% ('.'.join(pth[:-1]), pth[-1])
)
f.write(
"import sys,new,os; "
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], "
"*%(pth)r); "
"ie = os.path.exists(os.path.join(p,'__init__.py')); "
"m = not ie and "
"sys.modules.setdefault(%(pkg)r,new.module(%(pkg)r)); "
"mp = (m or []) and m.__dict__.setdefault('__path__',[]); "
"(p not in mp) and mp.append(p)%(trailer)s"
% locals()
)
f.close()
def _get_all_ns_packages(self):
nsp = {}
for pkg in self.distribution.namespace_packages or []:
pkg = pkg.split('.')
while pkg:
nsp['.'.join(pkg)] = 1
pkg.pop()
nsp=list(nsp)
nsp.sort() # set up shorter names first
return nsp
|
bsd-3-clause
|
DreamerKing/LightweightHtmlWidgets
|
LightweightHtmlWidgets/bin/Debug/Ipy.Lib/encodings/cp500.py
|
593
|
13377
|
""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'[' # 0x4A -> LEFT SQUARE BRACKET
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u']' # 0x5A -> RIGHT SQUARE BRACKET
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
gpl-3.0
|
skosukhin/spack
|
lib/spack/docs/tutorial/examples/Autotools/1.package.py
|
1
|
2197
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Mpileaks(AutoToolsPackage):
"""Tool to detect and report leaked MPI objects like MPI_Requests and
MPI_Datatypes."""
homepage = "https://github.com/hpc/mpileaks"
url = "https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz"
version('1.0', '8838c574b39202a57d7c2d68692718aa')
variant("stackstart", values=int, default=0,
description="Specify the number of stack frames to truncate")
depends_on("mpi")
depends_on("adept-utils")
depends_on("callpath")
def configure_args(self):
stackstart = int(self.spec.variants['stackstart'].value)
args = ["--with-adept-utils=" + spec['adept-utils'].prefix,
"--with-callpath=" + spec['callpath'].prefix]
if stackstart:
args.extend(['--with-stack-start-c=%s' % stackstart,
'--with-stack-start-fortran=%s' % stackstart])
return args
|
lgpl-2.1
|
kjw0106/GCM_app_server
|
venv/lib/python2.7/site-packages/setuptools/command/bdist_rpm.py
|
470
|
2025
|
# This is just a kludge so that bdist_rpm doesn't guess wrong about the
# distribution name and version, if the egg_info command is going to alter
# them, another kludge to allow you to build old-style non-egg RPMs, and
# finally, a kludge to track .rpm files for uploading when run on Python <2.5.
from distutils.command.bdist_rpm import bdist_rpm as _bdist_rpm
import sys, os
class bdist_rpm(_bdist_rpm):
def initialize_options(self):
_bdist_rpm.initialize_options(self)
self.no_egg = None
if sys.version<"2.5":
# Track for uploading any .rpm file(s) moved to self.dist_dir
def move_file(self, src, dst, level=1):
_bdist_rpm.move_file(self, src, dst, level)
if dst==self.dist_dir and src.endswith('.rpm'):
getattr(self.distribution,'dist_files',[]).append(
('bdist_rpm',
src.endswith('.src.rpm') and 'any' or sys.version[:3],
os.path.join(dst, os.path.basename(src)))
)
def run(self):
self.run_command('egg_info') # ensure distro name is up-to-date
_bdist_rpm.run(self)
def _make_spec_file(self):
version = self.distribution.get_version()
rpmversion = version.replace('-','_')
spec = _bdist_rpm._make_spec_file(self)
line23 = '%define version '+version
line24 = '%define version '+rpmversion
spec = [
line.replace(
"Source0: %{name}-%{version}.tar",
"Source0: %{name}-%{unmangled_version}.tar"
).replace(
"setup.py install ",
"setup.py install --single-version-externally-managed "
).replace(
"%setup",
"%setup -n %{name}-%{unmangled_version}"
).replace(line23,line24)
for line in spec
]
spec.insert(spec.index(line24)+1, "%define unmangled_version "+version)
return spec
|
mit
|
code-iai/semrec
|
scripts/bstools/Beliefstate Tools/ExperienceProcessor/expproc/ExperienceProcessor.py
|
1
|
1611
|
#!/usr/bin/python
import sys
from PyQt4 import QtCore, QtGui
from OwlReader import OwlReader
from DesignatorReader import DesignatorReader
from Log import Log
from Visualization_ui import Ui_MainWindow
class ExperienceProcessor(QtGui.QMainWindow):
def __init__(self, parent=None):
self.rdrOwl = OwlReader()
self.rdrDesig = DesignatorReader()
self.arrExperiences = []
self.dicEntities = {}
self.app = QtGui.QApplication(sys.argv)
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.show()
self.loadExperience("/home/winkler/ros/catkin/src/semrec/scripts/bstools/Beliefstate Tools/Datasets/ds4/cram_log.owl", "/home/winkler/ros/catkin/src/semrec/scripts/bstools/Beliefstate Tools/Datasets/ds4/logged_designators.json")
sys.exit(self.app.exec_())
def addExperience(self, expAdd):
self.arrExperiences.append(expAdd)
def loadExperience(self, strOwlFile, strDesignatorFile):
logReturn = Log()
logReturn.setOwlData(self.rdrOwl.loadOwl(strOwlFile))
logReturn.setDesignatorData(self.rdrDesig.loadDesignators(strDesignatorFile))
self.addExperience(logReturn)
def update(self):
self.updateEntities()
self.renderCanvas()
def updateEntities(self):
pass
def renderCanvas(self):
for strName in self.dicEntities:
self.renderEntity(dicEntities[strName])
def renderEntity(self):
pass
|
bsd-2-clause
|
asurve/arvind-sysml2
|
src/main/python/systemml/mlcontext.py
|
1
|
26974
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# Methods to create Script object
script_factory_methods = [ 'dml', 'pydml', 'dmlFromResource', 'pydmlFromResource', 'dmlFromFile', 'pydmlFromFile', 'dmlFromUrl', 'pydmlFromUrl' ]
# Utility methods
util_methods = [ 'jvm_stdout', '_java2py', 'getHopDAG' ]
__all__ = ['MLResults', 'MLContext', 'Script', 'Matrix' ] + script_factory_methods + util_methods
import os
import numpy as np
import pandas as pd
import threading, time
try:
import py4j.java_gateway
from py4j.java_gateway import JavaObject
from pyspark import SparkContext
from pyspark.conf import SparkConf
import pyspark.mllib.common
from pyspark.sql import SparkSession
except ImportError:
raise ImportError('Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')
from .converters import *
from .classloader import *
_loadedSystemML = False
def _get_spark_context():
"""
Internal method to get already initialized SparkContext. Developers should always use
_get_spark_context() instead of SparkContext._active_spark_context to ensure SystemML loaded.
Returns
-------
sc: SparkContext
SparkContext
"""
if SparkContext._active_spark_context is not None:
sc = SparkContext._active_spark_context
global _loadedSystemML
if not _loadedSystemML:
createJavaObject(sc, 'dummy')
_loadedSystemML = True
return sc
else:
raise Exception('Expected spark context to be created.')
# This is useful utility class to get the output of the driver JVM from within a Jupyter notebook
# Example usage:
# with jvm_stdout():
# ml.execute(script)
class jvm_stdout(object):
"""
This is useful utility class to get the output of the driver JVM from within a Jupyter notebook
Parameters
----------
parallel_flush: boolean
Should flush the stdout in parallel
"""
def __init__(self, parallel_flush=False):
self.util = _get_spark_context()._jvm.org.apache.sysml.api.ml.Utils()
self.parallel_flush = parallel_flush
self.t = threading.Thread(target=self.flush_stdout)
self.stop = False
def flush_stdout(self):
while not self.stop:
time.sleep(1) # flush stdout every 1 second
str = self.util.flushStdOut()
if str != '':
str = str[:-1] if str.endswith('\n') else str
print(str)
def __enter__(self):
self.util.startRedirectStdOut()
if self.parallel_flush:
self.t.start()
def __exit__(self, *args):
if self.parallel_flush:
self.stop = True
self.t.join()
print(self.util.stopRedirectStdOut())
def getHopDAG(ml, script, lines=None, conf=None, apply_rewrites=True, with_subgraph=False):
"""
Compile a DML / PyDML script.
Parameters
----------
ml: MLContext instance
MLContext instance.
script: Script instance
Script instance defined with the appropriate input and output variables.
lines: list of integers
Optional: only display the hops that have begin and end line number equals to the given integers.
conf: SparkConf instance
Optional spark configuration
apply_rewrites: boolean
If True, perform static rewrites, perform intra-/inter-procedural analysis to propagate size information into functions and apply dynamic rewrites
with_subgraph: boolean
If False, the dot graph will be created without subgraphs for statement blocks.
Returns
-------
hopDAG: string
hop DAG in dot format
"""
if not isinstance(script, Script):
raise ValueError("Expected script to be an instance of Script")
scriptString = script.scriptString
script_java = script.script_java
lines = [ int(x) for x in lines ] if lines is not None else [int(-1)]
sc = _get_spark_context()
if conf is not None:
hopDAG = sc._jvm.org.apache.sysml.api.mlcontext.MLContextUtil.getHopDAG(ml._ml, script_java, lines, conf._jconf, apply_rewrites, with_subgraph)
else:
hopDAG = sc._jvm.org.apache.sysml.api.mlcontext.MLContextUtil.getHopDAG(ml._ml, script_java, lines, apply_rewrites, with_subgraph)
return hopDAG
def dml(scriptString):
"""
Create a dml script object based on a string.
Parameters
----------
scriptString: string
Can be a path to a dml script or a dml script itself.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(scriptString, str):
raise ValueError("scriptString should be a string, got %s" % type(scriptString))
return Script(scriptString, scriptType="dml")
def dmlFromResource(resourcePath):
"""
Create a dml script object based on a resource path.
Parameters
----------
resourcePath: string
Path to a dml script on the classpath.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(resourcePath, str):
raise ValueError("resourcePath should be a string, got %s" % type(resourcePath))
return Script(resourcePath, scriptType="dml", isResource=True)
def pydml(scriptString):
"""
Create a pydml script object based on a string.
Parameters
----------
scriptString: string
Can be a path to a pydml script or a pydml script itself.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(scriptString, str):
raise ValueError("scriptString should be a string, got %s" % type(scriptString))
return Script(scriptString, scriptType="pydml")
def pydmlFromResource(resourcePath):
"""
Create a pydml script object based on a resource path.
Parameters
----------
resourcePath: string
Path to a pydml script on the classpath.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(resourcePath, str):
raise ValueError("resourcePath should be a string, got %s" % type(resourcePath))
return Script(resourcePath, scriptType="pydml", isResource=True)
def dmlFromFile(filePath):
"""
Create a dml script object based on a file path.
Parameters
----------
filePath: string
Path to a dml script.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(filePath, str):
raise ValueError("filePath should be a string, got %s" % type(filePath))
return Script(filePath, scriptType="dml", isResource=False, scriptFormat="file")
def pydmlFromFile(filePath):
"""
Create a pydml script object based on a file path.
Parameters
----------
filePath: string
Path to a pydml script.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(filePath, str):
raise ValueError("filePath should be a string, got %s" % type(filePath))
return Script(filePath, scriptType="pydml", isResource=False, scriptFormat="file")
def dmlFromUrl(url):
"""
Create a dml script object based on a url.
Parameters
----------
url: string
URL to a dml script.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(url, str):
raise ValueError("url should be a string, got %s" % type(url))
return Script(url, scriptType="dml", isResource=False, scriptFormat="url")
def pydmlFromUrl(url):
"""
Create a pydml script object based on a url.
Parameters
----------
url: string
URL to a pydml script.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(url, str):
raise ValueError("url should be a string, got %s" % type(url))
return Script(url, scriptType="pydml", isResource=False, scriptFormat="url")
def _java2py(sc, obj):
""" Convert Java object to Python. """
# TODO: Port this private PySpark function.
obj = pyspark.mllib.common._java2py(sc, obj)
if isinstance(obj, JavaObject):
class_name = obj.getClass().getSimpleName()
if class_name == 'Matrix':
obj = Matrix(obj, sc)
return obj
def _py2java(sc, obj):
""" Convert Python object to Java. """
if isinstance(obj, SUPPORTED_TYPES):
obj = convertToMatrixBlock(sc, obj)
else:
if isinstance(obj, Matrix):
obj = obj._java_matrix
# TODO: Port this private PySpark function.
obj = pyspark.mllib.common._py2java(sc, obj)
return obj
class Matrix(object):
"""
Wrapper around a Java Matrix object.
Parameters
----------
javaMatrix: JavaObject
A Java Matrix object as returned by calling `ml.execute().get()`.
sc: SparkContext
SparkContext
"""
def __init__(self, javaMatrix, sc):
self._java_matrix = javaMatrix
self._sc = sc
def __repr__(self):
return "Matrix"
def toDF(self):
"""
Convert the Matrix to a PySpark SQL DataFrame.
Returns
-------
PySpark SQL DataFrame
A PySpark SQL DataFrame representing the matrix, with
one "__INDEX" column containing the row index (since Spark
DataFrames are unordered), followed by columns of doubles
for each column in the matrix.
"""
jdf = self._java_matrix.toDF()
df = _java2py(self._sc, jdf)
return df
def toNumPy(self):
"""
Convert the Matrix to a NumPy Array.
Returns
-------
NumPy Array
A NumPy Array representing the Matrix object.
"""
np_array = convertToNumPyArr(self._sc, self._java_matrix.toMatrixBlock())
return np_array
class MLResults(object):
"""
Wrapper around a Java ML Results object.
Parameters
----------
results: JavaObject
A Java MLResults object as returned by calling `ml.execute()`.
sc: SparkContext
SparkContext
"""
def __init__(self, results, sc):
self._java_results = results
self._sc = sc
def __repr__(self):
return "MLResults"
def get(self, *outputs):
"""
Parameters
----------
outputs: string, list of strings
Output variables as defined inside the DML script.
"""
outs = [_java2py(self._sc, self._java_results.get(out)) for out in outputs]
if len(outs) == 1:
return outs[0]
return outs
class Script(object):
"""
Instance of a DML/PyDML Script.
Parameters
----------
scriptString: string
Can be either a file path to a DML script or a DML script itself.
scriptType: string
Script language, either "dml" for DML (R-like) or "pydml" for PyDML (Python-like).
isResource: boolean
If true, scriptString is a path to a resource on the classpath
scriptFormat: string
Optional script format, either "auto" or "url" or "file" or "resource" or "string"
"""
def __init__(self, scriptString, scriptType="dml", isResource=False, scriptFormat="auto"):
self.sc = _get_spark_context()
self.scriptString = scriptString
self.scriptType = scriptType
self.isResource = isResource
if scriptFormat != "auto":
if scriptFormat == "url" and self.scriptType == "dml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromUrl(scriptString)
elif scriptFormat == "url" and self.scriptType == "pydml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromUrl(scriptString)
elif scriptFormat == "file" and self.scriptType == "dml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromFile(scriptString)
elif scriptFormat == "file" and self.scriptType == "pydml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromFile(scriptString)
elif isResource and self.scriptType == "dml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromResource(scriptString)
elif isResource and self.scriptType == "pydml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromResource(scriptString)
elif scriptFormat == "string" and self.scriptType == "dml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dml(scriptString)
elif scriptFormat == "string" and self.scriptType == "pydml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydml(scriptString)
else:
raise ValueError('Unsupported script format' + scriptFormat)
elif self.scriptType == "dml":
if scriptString.endswith(".dml"):
if scriptString.startswith("http"):
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromUrl(scriptString)
elif os.path.exists(scriptString):
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromFile(scriptString)
elif self.isResource == True:
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromResource(scriptString)
else:
raise ValueError("path: %s does not exist" % scriptString)
else:
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dml(scriptString)
elif self.scriptType == "pydml":
if scriptString.endswith(".pydml"):
if scriptString.startswith("http"):
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromUrl(scriptString)
elif os.path.exists(scriptString):
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromFile(scriptString)
elif self.isResource == True:
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromResource(scriptString)
else:
raise ValueError("path: %s does not exist" % scriptString)
else:
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydml(scriptString)
def getScriptString(self):
"""
Obtain the script string (in unicode).
"""
return self.script_java.getScriptString()
def setScriptString(self, scriptString):
"""
Set the script string.
Parameters
----------
scriptString: string
Can be either a file path to a DML script or a DML script itself.
"""
self.scriptString = scriptString
self.script_java.setScriptString(scriptString)
return self
def getInputVariables(self):
"""
Obtain the input variable names.
"""
return self.script_java.getInputVariables()
def getOutputVariables(self):
"""
Obtain the output variable names.
"""
return self.script_java.getOutputVariables()
def clearIOS(self):
"""
Clear the inputs, outputs, and symbol table.
"""
self.script_java.clearIOS()
return self
def clearIO(self):
"""
Clear the inputs and outputs, but not the symbol table.
"""
self.script_java.clearIO()
return self
def clearAll(self):
"""
Clear the script string, inputs, outputs, and symbol table.
"""
self.script_java.clearAll()
return self
def clearInputs(self):
"""
Clear the inputs.
"""
self.script_java.clearInputs()
return self
def clearOutputs(self):
"""
Clear the outputs.
"""
self.script_java.clearOutputs()
return self
def clearSymbolTable(self):
"""
Clear the symbol table.
"""
self.script_java.clearSymbolTable()
return self
def results(self):
"""
Obtain the results of the script execution.
"""
return MLResults(self.script_java.results(), self.sc)
def getResults(self):
"""
Obtain the results of the script execution.
"""
return MLResults(self.script_java.getResults(), self.sc)
def setResults(self, results):
"""
Set the results of the script execution.
"""
self.script_java.setResults(results._java_results)
return self
def isDML(self):
"""
Is the script type DML?
"""
return self.script_java.isDML()
def isPYDML(self):
"""
Is the script type DML?
"""
return self.script_java.isPYDML()
def getScriptExecutionString(self):
"""
Generate the script execution string, which adds read/load/write/save
statements to the beginning and end of the script to execute.
"""
return self.script_java.getScriptExecutionString()
def __repr__(self):
return "Script"
def info(self):
"""
Display information about the script as a String. This consists of the
script type, inputs, outputs, input parameters, input variables, output
variables, the symbol table, the script string, and the script execution string.
"""
return self.script_java.info()
def displayInputs(self):
"""
Display the script inputs.
"""
return self.script_java.displayInputs()
def displayOutputs(self):
"""
Display the script outputs.
"""
return self.script_java.displayOutputs()
def displayInputParameters(self):
"""
Display the script input parameters.
"""
return self.script_java.displayInputParameters()
def displayInputVariables(self):
"""
Display the script input variables.
"""
return self.script_java.displayInputVariables()
def displayOutputVariables(self):
"""
Display the script output variables.
"""
return self.script_java.displayOutputVariables()
def displaySymbolTable(self):
"""
Display the script symbol table.
"""
return self.script_java.displaySymbolTable()
def getName(self):
"""
Obtain the script name.
"""
return self.script_java.getName()
def setName(self, name):
"""
Set the script name.
"""
self.script_java.setName(name)
return self
def getScriptType(self):
"""
Obtain the script type.
"""
return self.scriptType
def input(self, *args, **kwargs):
"""
Parameters
----------
args: name, value tuple
where name is a string, and currently supported value formats
are double, string, dataframe, rdd, and list of such object.
kwargs: dict of name, value pairs
To know what formats are supported for name and value, look above.
"""
if args and len(args) != 2:
raise ValueError("Expected name, value pair.")
elif args:
self._setInput(args[0], args[1])
for name, value in kwargs.items():
self._setInput(name, value)
return self
def _setInput(self, key, val):
# `in` is a reserved word ("keyword") in Python, so `script_java.in(...)` is not
# allowed. Therefore, we use the following code in which we retrieve a function
# representing `script_java.in`, and then call it with the arguments. This is in
# lieu of adding a new `input` method on the JVM side, as that would complicate use
# from Scala/Java.
if isinstance(val, py4j.java_gateway.JavaObject):
py4j.java_gateway.get_method(self.script_java, "in")(key, val)
else:
py4j.java_gateway.get_method(self.script_java, "in")(key, _py2java(self.sc, val))
def output(self, *names):
"""
Parameters
----------
names: string, list of strings
Output variables as defined inside the DML script.
"""
for val in names:
self.script_java.out(val)
return self
class MLContext(object):
"""
Wrapper around the new SystemML MLContext.
Parameters
----------
sc: SparkContext or SparkSession
An instance of pyspark.SparkContext or pyspark.sql.SparkSession.
"""
def __init__(self, sc):
if isinstance(sc, pyspark.sql.session.SparkSession):
sc = sc._sc
elif not isinstance(sc, SparkContext):
raise ValueError("Expected sc to be a SparkContext or SparkSession, got " % str(type(sc)))
self._sc = sc
self._ml = createJavaObject(sc, 'mlcontext')
def __repr__(self):
return "MLContext"
def execute(self, script):
"""
Execute a DML / PyDML script.
Parameters
----------
script: Script instance
Script instance defined with the appropriate input and output variables.
Returns
-------
ml_results: MLResults
MLResults instance.
"""
if not isinstance(script, Script):
raise ValueError("Expected script to be an instance of Script")
scriptString = script.scriptString
script_java = script.script_java
return MLResults(self._ml.execute(script_java), self._sc)
def setStatistics(self, statistics):
"""
Whether or not to output statistics (such as execution time, elapsed time)
about script executions.
Parameters
----------
statistics: boolean
"""
self._ml.setStatistics(bool(statistics))
return self
def setGPU(self, enable):
"""
Whether or not to enable GPU.
Parameters
----------
enable: boolean
"""
self._ml.setGPU(bool(enable))
return self
def setForceGPU(self, enable):
"""
Whether or not to force the usage of GPU operators.
Parameters
----------
enable: boolean
"""
self._ml.setForceGPU(bool(enable))
return self
def setStatisticsMaxHeavyHitters(self, maxHeavyHitters):
"""
The maximum number of heavy hitters that are printed as part of the statistics.
Parameters
----------
maxHeavyHitters: int
"""
self._ml.setStatisticsMaxHeavyHitters(maxHeavyHitters)
return self
def setExplain(self, explain):
"""
Explanation about the program. Mainly intended for developers.
Parameters
----------
explain: boolean
"""
self._ml.setExplain(bool(explain))
return self
def setExplainLevel(self, explainLevel):
"""
Set explain level.
Parameters
----------
explainLevel: string
Can be one of "hops", "runtime", "recompile_hops", "recompile_runtime"
or in the above in upper case.
"""
self._ml.setExplainLevel(explainLevel)
return self
def setConfigProperty(self, propertyName, propertyValue):
"""
Set configuration property, such as setConfigProperty("localtmpdir", "/tmp/systemml").
Parameters
----------
propertyName: String
propertyValue: String
"""
self._ml.setConfigProperty(propertyName, propertyValue)
return self
def setConfig(self, configFilePath):
"""
Set SystemML configuration based on a configuration file.
Parameters
----------
configFilePath: String
"""
self._ml.setConfig(configFilePath)
return self
def resetConfig(self):
"""
Reset configuration settings to default values.
"""
self._ml.resetConfig()
return self
def version(self):
"""Display the project version."""
return self._ml.version()
def buildTime(self):
"""Display the project build time."""
return self._ml.buildTime()
def info(self):
"""Display the project information."""
return self._ml.info().toString()
def isExplain(self):
"""Returns True if program instruction details should be output, False otherwise."""
return self._ml.isExplain()
def isStatistics(self):
"""Returns True if program execution statistics should be output, False otherwise."""
return self._ml.isStatistics()
def isGPU(self):
"""Returns True if GPU mode is enabled, False otherwise."""
return self._ml.isGPU()
def isForceGPU(self):
"""Returns True if "force" GPU mode is enabled, False otherwise."""
return self._ml.isForceGPU()
def close(self):
"""
Closes this MLContext instance to cleanup buffer pool, static/local state and scratch space.
Note the SparkContext is not explicitly closed to allow external reuse.
"""
self._ml.close()
return self
|
apache-2.0
|
rcbops/horizon-buildpackage
|
horizon/dashboards/nova/access_and_security/floating_ips/tests.py
|
2
|
8943
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from django import http
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from mox import IsA, IgnoreArg
from novaclient import exceptions as novaclient_exceptions
from horizon import api
from horizon import test
from horizon.dashboards.nova.access_and_security.floating_ips.forms import \
FloatingIpAssociate
INDEX_URL = reverse('horizon:nova:access_and_security:index')
class FloatingIpViewTests(test.BaseViewTests):
def setUp(self):
super(FloatingIpViewTests, self).setUp()
keypair = api.KeyPair(None)
keypair.name = 'keyName'
self.keypairs = (keypair,)
server = api.Server(None, self.request)
server.id = 1
server.name = 'serverName'
self.server = server
self.servers = (server, )
floating_ip = api.FloatingIp(None)
floating_ip.id = 1
floating_ip.fixed_ip = '10.0.0.4'
floating_ip.instance_id = 1
floating_ip.ip = '58.58.58.58'
self.floating_ip = floating_ip
self.floating_ips = [floating_ip, ]
security_group = api.SecurityGroup(None)
security_group.id = '1'
security_group.name = 'default'
self.security_groups = (security_group,)
def test_associate(self):
self.mox.StubOutWithMock(api, 'server_list')
api.server_list = self.mox.CreateMockAnything()
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers)
self.mox.StubOutWithMock(api, 'tenant_floating_ip_get')
api.tenant_floating_ip_get = self.mox.CreateMockAnything()
api.tenant_floating_ip_get(IsA(http.HttpRequest), str(1)).\
AndReturn(self.floating_ip)
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:nova:access_and_security:floating_ips:associate',
args=[1]))
self.assertTemplateUsed(res,
'nova/access_and_security/floating_ips/associate.html')
def test_associate_post(self):
server = self.server
self.mox.StubOutWithMock(api, 'server_list')
api.server_list = self.mox.CreateMockAnything()
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers)
self.mox.StubOutWithMock(api, 'tenant_floating_ip_list')
api.tenant_floating_ip_list(IsA(http.HttpRequest)).\
AndReturn(self.floating_ips)
self.mox.StubOutWithMock(api, 'server_add_floating_ip')
api.server_add_floating_ip = self.mox.CreateMockAnything()
api.server_add_floating_ip(IsA(http.HttpRequest), IsA(unicode),
IsA(unicode)).\
AndReturn(None)
self.mox.StubOutWithMock(api, 'tenant_floating_ip_get')
api.tenant_floating_ip_get = self.mox.CreateMockAnything()
api.tenant_floating_ip_get(IsA(http.HttpRequest), str(1)).\
AndReturn(self.floating_ip)
self.mox.ReplayAll()
res = self.client.post(
reverse('horizon:nova:access_and_security:floating_ips:associate',
args=[1]),
{'instance_id': 1,
'floating_ip_id': self.floating_ip.id,
'floating_ip': self.floating_ip.ip,
'method': 'FloatingIpAssociate'})
self.assertRedirects(res, INDEX_URL)
def test_associate_post_with_exception(self):
server = self.server
self.mox.StubOutWithMock(api, 'server_list')
api.server_list = self.mox.CreateMockAnything()
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers)
self.mox.StubOutWithMock(api, 'tenant_floating_ip_list')
api.tenant_floating_ip_list(IsA(http.HttpRequest)).\
AndReturn(self.floating_ips)
self.mox.StubOutWithMock(api, 'security_group_list')
api.security_group_list(IsA(http.HttpRequest)).\
AndReturn(self.security_groups)
self.mox.StubOutWithMock(api.nova, 'keypair_list')
api.nova.keypair_list(IsA(http.HttpRequest)).AndReturn(self.keypairs)
self.mox.StubOutWithMock(api, 'server_add_floating_ip')
api.server_add_floating_ip = self.mox.CreateMockAnything()
exception = novaclient_exceptions.ClientException('ClientException',
message='clientException')
api.server_add_floating_ip(IsA(http.HttpRequest), IsA(unicode),
IsA(unicode)).\
AndRaise(exception)
self.mox.StubOutWithMock(api, 'tenant_floating_ip_get')
api.tenant_floating_ip_get = self.mox.CreateMockAnything()
api.tenant_floating_ip_get(IsA(http.HttpRequest), IsA(unicode)).\
AndReturn(self.floating_ip)
self.mox.ReplayAll()
res = self.client.post(reverse(
'horizon:nova:access_and_security:floating_ips:associate',
args=[1]),
{'instance_id': 1,
'floating_ip_id': self.floating_ip.id,
'floating_ip': self.floating_ip.ip,
'method': 'FloatingIpAssociate'})
self.assertRaises(novaclient_exceptions.ClientException)
self.assertRedirects(res, INDEX_URL)
def test_disassociate_post(self):
self.mox.StubOutWithMock(api.nova, 'keypair_list')
self.mox.StubOutWithMock(api, 'security_group_list')
self.mox.StubOutWithMock(api, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api, 'tenant_floating_ip_get')
self.mox.StubOutWithMock(api, 'server_remove_floating_ip')
api.nova.keypair_list(IsA(http.HttpRequest)).AndReturn(self.keypairs)
api.security_group_list(IsA(http.HttpRequest)).\
AndReturn(self.security_groups)
api.tenant_floating_ip_list(IsA(http.HttpRequest)).\
AndReturn(self.floating_ips)
api.server_remove_floating_ip(IsA(http.HttpRequest), IsA(int),
IsA(int)).\
AndReturn(None)
self.mox.ReplayAll()
action = "floating_ips__disassociate__%s" % self.floating_ip.id
res = self.client.post(INDEX_URL, {"action": action})
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_disassociate_post_with_exception(self):
self.mox.StubOutWithMock(api.nova, 'keypair_list')
self.mox.StubOutWithMock(api, 'security_group_list')
self.mox.StubOutWithMock(api, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api, 'tenant_floating_ip_get')
self.mox.StubOutWithMock(api, 'server_remove_floating_ip')
api.nova.keypair_list(IsA(http.HttpRequest)).AndReturn(self.keypairs)
api.security_group_list(IsA(http.HttpRequest)).\
AndReturn(self.security_groups)
api.tenant_floating_ip_list(IsA(http.HttpRequest)).\
AndReturn(self.floating_ips)
exception = novaclient_exceptions.ClientException('ClientException',
message='clientException')
api.server_remove_floating_ip(IsA(http.HttpRequest),
IsA(int),
IsA(int)).AndRaise(exception)
self.mox.ReplayAll()
action = "floating_ips__disassociate__%s" % self.floating_ip.id
res = self.client.post(INDEX_URL, {"action": action})
self.assertRaises(novaclient_exceptions.ClientException)
self.assertRedirectsNoFollow(res, INDEX_URL)
|
apache-2.0
|
cujam/SMOS
|
excelmaster/tests/base_operating.py
|
1
|
2563
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import re
import tools
import mysqlDB
import Auxfunction
import sentiment
class Read:
def __init__(self, filename):
self.filename = filename
self.db = mysqlDB.Mysql('root', 'cujam137', 'Data')
self.Rxlsx = tools.ReadXlsx(self.filename)
self.find_classnames = ['楼智161', '楼智162', '电光161', '机器人161', \
'楼智171', '楼智172', '电光171', '机器人171']
def _scan(self):
'''
获取扫描结果
'''
alldatas = [self.Rxlsx.cat_data(i, 'col') for i in range(self.Rxlsx.ncols)]
self.scan = Auxfunction.Scan(alldatas)
result = self.scan.findall()
return result
def _get_all_datas(self):
'''
获取表中所有数据
'''
sheet_names = self.Rxlsx.sheet_names
for i in sheet_names:
self.Rxlsx.get_sheet_data(i)
result = self._scan()
if len(result) == 2:
# 非常规表
if result[0]:
for i in range(3, self.Rxlsx.nrows):
date, classname, name, value, thing = self.Rxlsx.cat_data(i, 'row')
date = Auxfunction.change(date)
yield date, classname, name, value, thing
else:
date, classname_location, result_location = result
if result_location != None:
classname_data = self.Rxlsx.cat_data(classname_location, 'col')
result_data = self.Rxlsx.cat_data(result_location, 'col')
for classname in self.find_classnames:
data = result_data[classname_data.index(classname)]
for search_result in sentiment.RecognitionName(data):
name, result = search_result
things = re.split('[、,]', result)
for thing in things:
yield date, classname, name, 1, thing
def write_to_mysql(self):
'''
将数据写入到数据库中
'''
for datas in self._get_all_datas():
date = datas[0]
classname = datas[1]
name = datas[2]
value = datas[3]
thing = datas[4]
self.db.insert('datas', date, classname, name, value, thing)
if __name__ == "__main__":
r = Read('../../docs/lession/jqr161.xlsx')
r.write_to_mysql()
|
gpl-3.0
|
Weasyl/weasyl
|
weasyl/files.py
|
1
|
3462
|
import os
import glob
import errno
import codecs
import shutil
from libweasyl.constants import Category
from libweasyl.exceptions import InvalidFileFormat, UnknownFileFormat
from libweasyl.files import file_type_for_category, makedirs_exist_ok
from libweasyl import security
from weasyl.error import WeasylError
import weasyl.define as d
import weasyl.macro as m
def read(filename):
with codecs.open(filename, "r", encoding="utf-8", errors="replace") as f:
return f.read()
def ensure_file_directory(filename):
dirname = os.path.dirname(filename)
makedirs_exist_ok(dirname)
def write(filename, content):
"""
Writes bytes to the specified file.
"""
ensure_file_directory(filename)
with open(filename, "wb") as f:
f.write(content)
def append(filename, content):
"""
Appends text to the specified file.
"""
with codecs.open(filename, "a", "utf-8") as f:
f.write(content)
# Copy the specified file.
copy = shutil.copy
def _remove_glob(glob_path):
"""
Removes files matching the specified pattern.
"""
for f in glob.glob(glob_path):
try:
os.remove(f)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def get_temporary(userid, feature):
"""
Return the full pathname to a temporary file.
Temporary files are named so as to be owned by a user.
"""
return "{temp}{userid}.{feature}.{random}".format(temp=m.MACRO_SYS_TEMP_PATH, userid=userid,
feature=feature, random=security.generate_key(20))
def clear_temporary(userid):
"""
Remove temporary files owned by a user.
"""
_remove_glob("{temp}{userid}.*".format(temp=m.MACRO_SYS_TEMP_PATH, userid=userid))
def make_character_directory(target):
path = d.get_character_directory(target)
makedirs_exist_ok(path)
def make_resource(userid, target, feature, extension=None):
"""
Returns the full path to the specified resource.
"""
root = d.get_character_directory(target)
if feature == "char/submit":
filename = "%d.submit.%d%s" % (target, userid, extension)
elif feature == "char/cover":
filename = "%d.cover%s" % (target, extension)
elif feature == "char/thumb":
filename = "%d.thumb%s" % (target, extension)
elif feature == "char/.thumb":
filename = "%d.new.thumb" % (target,)
else:
raise ValueError("Unknown character resource %r" % (feature,))
return os.path.join(root, filename)
_feature_typeflags = {
"submit": "=",
"cover": "~",
}
_extension_typeflags = {
".jpg": "J",
".png": "P",
".gif": "G",
}
def typeflag(feature, extension):
symbol = _feature_typeflags[feature]
letter = _extension_typeflags[extension]
return symbol + letter
_categories = {
m.ART_SUBMISSION_CATEGORY: Category.visual,
m.TEXT_SUBMISSION_CATEGORY: Category.literary,
m.MULTIMEDIA_SUBMISSION_CATEGORY: Category.multimedia,
}
def get_extension_for_category(filedata, category):
try:
_, fmt = file_type_for_category(filedata, _categories[category])
except UnknownFileFormat as uff:
e = WeasylError('FileType')
e.error_suffix = uff.args[0]
raise e
except InvalidFileFormat as iff:
e = WeasylError('FileType')
e.error_suffix = iff.args[0]
raise e
else:
return '.' + fmt
|
apache-2.0
|
andela-ooladayo/django
|
tests/gis_tests/gis_migrations/migrations/0001_initial.py
|
269
|
2465
|
from django.db import connection, migrations, models
from ...models import models as gis_models
ops = [
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('geom', gis_models.MultiPolygonField(srid=4326)),
],
options={
'required_db_features': ['gis_enabled'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Household',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('neighborhood', models.ForeignKey(
'gis_migrations.Neighborhood',
models.SET_NULL,
to_field='id',
null=True,
)),
('address', models.CharField(max_length=100)),
('zip_code', models.IntegerField(null=True, blank=True)),
('geom', gis_models.PointField(srid=4326, geography=True)),
],
options={
'required_db_features': ['gis_enabled'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Family',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='household',
name='family',
field=models.ForeignKey('gis_migrations.Family', models.SET_NULL, blank=True, null=True),
preserve_default=True,
)
]
if connection.features.gis_enabled and connection.features.supports_raster:
ops += [
migrations.CreateModel(
name='Heatmap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('rast', gis_models.fields.RasterField(srid=4326)),
],
options={
},
bases=(models.Model,),
),
]
class Migration(migrations.Migration):
"""
Used for gis-specific migration tests.
"""
operations = ops
|
bsd-3-clause
|
rbenjamin/namebench
|
nb_third_party/dns/rdtypes/ANY/NXT.py
|
248
|
3725
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.rdatatype
import dns.name
class NXT(dns.rdata.Rdata):
"""NXT record
@ivar next: the next name
@type next: dns.name.Name object
@ivar bitmap: the type bitmap
@type bitmap: string
@see: RFC 2535"""
__slots__ = ['next', 'bitmap']
def __init__(self, rdclass, rdtype, next, bitmap):
super(NXT, self).__init__(rdclass, rdtype)
self.next = next
self.bitmap = bitmap
def to_text(self, origin=None, relativize=True, **kw):
next = self.next.choose_relativity(origin, relativize)
bits = []
for i in xrange(0, len(self.bitmap)):
byte = ord(self.bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns.rdatatype.to_text(i * 8 + j))
text = ' '.join(bits)
return '%s %s' % (next, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
next = tok.get_name()
next = next.choose_relativity(origin, relativize)
bitmap = ['\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00' ]
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
if token.value.isdigit():
nrdtype = int(token.value)
else:
nrdtype = dns.rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns.exception.SyntaxError("NXT with bit 0")
if nrdtype > 127:
raise dns.exception.SyntaxError("NXT with bit > 127")
i = nrdtype // 8
bitmap[i] = chr(ord(bitmap[i]) | (0x80 >> (nrdtype % 8)))
bitmap = dns.rdata._truncate_bitmap(bitmap)
return cls(rdclass, rdtype, next, bitmap)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.next.to_wire(file, None, origin)
file.write(self.bitmap)
def to_digestable(self, origin = None):
return self.next.to_digestable(origin) + self.bitmap
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(next, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
bitmap = wire[current : current + rdlen]
if not origin is None:
next = next.relativize(origin)
return cls(rdclass, rdtype, next, bitmap)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.next = self.next.choose_relativity(origin, relativize)
def _cmp(self, other):
v = cmp(self.next, other.next)
if v == 0:
v = cmp(self.bitmap, other.bitmap)
return v
|
apache-2.0
|
herczy/typesafety
|
typesafety/tests/test_plugin.py
|
3
|
2955
|
#
# Copyright (c) 2013-2018 Balabit
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import unittest
import optparse
import shlex
import nose
from ..noseplugin import TypesafetyPlugin
class TestPlugin(unittest.TestCase):
def __activate_call(self, *, filter_func=None):
self.filter = filter_func
def setUp(self):
self.filter = None
self.plugin = TypesafetyPlugin(activate=self.__activate_call)
def test_is_nose_plugin(self):
self.assertIsInstance(self.plugin, nose.plugins.Plugin)
def __get_options(self, enable_for=()):
res = type(
'Options',
(object,),
{
'enable_typesafety': list(enable_for),
'keep_typesafety_trace': False,
}
)
return res()
def test_not_enabled_by_default(self):
self.assertFalse(self.plugin.enabled)
def test_name(self):
self.assertEqual('typesafety', self.plugin.name)
def test_options(self):
parser = optparse.OptionParser()
self.plugin.options(parser, {})
try:
opts, _ = parser.parse_args(
shlex.split(
'nosetests3 --enable-typesafety example ' +
'--enable-typesafety example2'
)
)
except SystemExit as exc:
self.fail('Option parser exited with code {}'.format(exc))
self.assertEqual(
['example', 'example2'],
opts.enable_typesafety
)
def test_not_enabled_without_modules_given(self):
self.plugin.configure(self.__get_options(), None)
self.assertFalse(self.plugin.enabled)
def test_enabled_with_at_least_one_module_given(self):
self.plugin.configure(
self.__get_options(('example',)),
None
)
self.assertTrue(self.plugin.enabled)
def test_activate_called_with_filter_func(self):
self.plugin.configure(
self.__get_options(('example', 'example2')),
None
)
self.assertTrue(self.filter('example'))
self.assertTrue(self.filter('example2'))
self.assertFalse(self.filter('example3'))
self.assertTrue(self.filter('example.submodule'))
|
lgpl-2.1
|
40023154/2015cd_midterm2
|
static/Brython3.1.1-20150328-091302/Lib/types.py
|
756
|
3167
|
"""
Define names for built-in types that aren't directly accessible as a builtin.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "__next__" attributes instead.
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
CodeType = type(_f.__code__)
MappingProxyType = type(type.__dict__)
SimpleNamespace = type(sys.implementation)
def _g():
yield 1
GeneratorType = type(_g())
class _C:
def _m(self): pass
MethodType = type(_C()._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
tb = None; del tb
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.__code__)
MemberDescriptorType = type(FunctionType.__globals__)
del sys, _f, _g, _C, # Not for export
# Provide a PEP 3115 compliant mechanism for class creation
def new_class(name, bases=(), kwds=None, exec_body=None):
"""Create a class object dynamically using the appropriate metaclass."""
meta, ns, kwds = prepare_class(name, bases, kwds)
if exec_body is not None:
exec_body(ns)
return meta(name, bases, ns, **kwds)
def prepare_class(name, bases=(), kwds=None):
"""Call the __prepare__ method of the appropriate metaclass.
Returns (metaclass, namespace, kwds) as a 3-tuple
*metaclass* is the appropriate metaclass
*namespace* is the prepared class namespace
*kwds* is an updated copy of the passed in kwds argument with any
'metaclass' entry removed. If no kwds argument is passed in, this will
be an empty dict.
"""
if kwds is None:
kwds = {}
else:
kwds = dict(kwds) # Don't alter the provided mapping
if 'metaclass' in kwds:
meta = kwds.pop('metaclass')
else:
if bases:
meta = type(bases[0])
else:
meta = type
if isinstance(meta, type):
# when meta is a type, we first determine the most-derived metaclass
# instead of invoking the initial candidate directly
meta = _calculate_meta(meta, bases)
if hasattr(meta, '__prepare__'):
ns = meta.__prepare__(name, bases, **kwds)
else:
ns = {}
return meta, ns, kwds
def _calculate_meta(meta, bases):
"""Calculate the most derived metaclass."""
winner = meta
for base in bases:
base_meta = type(base)
if issubclass(winner, base_meta):
continue
if issubclass(base_meta, winner):
winner = base_meta
continue
# else:
raise TypeError("metaclass conflict: "
"the metaclass of a derived class "
"must be a (non-strict) subclass "
"of the metaclasses of all its bases")
return winner
|
gpl-2.0
|
petrutlucian94/nova
|
nova/tests/unit/api/openstack/compute/contrib/test_quota_classes.py
|
26
|
6784
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.contrib import quota_classes
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import quota_classes \
as quota_classes_v21
from nova.api.openstack import extensions
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
def quota_set(class_name):
return {'quota_class_set': {'id': class_name, 'metadata_items': 128,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'instances': 10,
'injected_files': 5, 'cores': 20,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20, 'key_pairs': 100,
'injected_file_path_bytes': 255}}
class QuotaClassSetsTestV21(test.TestCase):
validation_error = exception.ValidationError
def setUp(self):
super(QuotaClassSetsTestV21, self).setUp()
self.req_admin = fakes.HTTPRequest.blank('', use_admin_context=True)
self.req = fakes.HTTPRequest.blank('')
self._setup()
def _setup(self):
ext_info = plugins.LoadedExtensionInfo()
self.controller = quota_classes_v21.QuotaClassSetsController(
extension_info=ext_info)
def test_format_quota_set(self):
raw_quota_set = {
'instances': 10,
'cores': 20,
'ram': 51200,
'floating_ips': 10,
'fixed_ips': -1,
'metadata_items': 128,
'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
}
quota_set = self.controller._format_quota_set('test_class',
raw_quota_set)
qs = quota_set['quota_class_set']
self.assertEqual(qs['id'], 'test_class')
self.assertEqual(qs['instances'], 10)
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['fixed_ips'], -1)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
self.assertEqual(qs['injected_file_path_bytes'], 255)
self.assertEqual(qs['injected_file_content_bytes'], 10240)
self.assertEqual(qs['security_groups'], 10)
self.assertEqual(qs['security_group_rules'], 20)
self.assertEqual(qs['key_pairs'], 100)
def test_quotas_show_as_admin(self):
res_dict = self.controller.show(self.req_admin, 'test_class')
self.assertEqual(res_dict, quota_set('test_class'))
def test_quotas_show_as_unauthorized_user(self):
self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
self.req, 'test_class')
def test_quotas_update_as_admin(self):
body = {'quota_class_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
res_dict = self.controller.update(self.req_admin, 'test_class',
body=body)
self.assertEqual(res_dict, body)
def test_quotas_update_as_user(self):
body = {'quota_class_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
}}
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
self.req, 'test_class', body=body)
def test_quotas_update_with_empty_body(self):
body = {}
self.assertRaises(self.validation_error, self.controller.update,
self.req_admin, 'test_class', body=body)
def test_quotas_update_with_non_integer(self):
body = {'quota_class_set': {'instances': "abc"}}
self.assertRaises(self.validation_error, self.controller.update,
self.req_admin, 'test_class', body=body)
body = {'quota_class_set': {'instances': 50.5}}
self.assertRaises(self.validation_error, self.controller.update,
self.req_admin, 'test_class', body=body)
body = {'quota_class_set': {
'instances': u'\u30aa\u30fc\u30d7\u30f3'}}
self.assertRaises(self.validation_error, self.controller.update,
self.req_admin, 'test_class', body=body)
def test_quotas_update_with_unsupported_quota_class(self):
body = {'quota_class_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'unsupported': 12}}
self.assertRaises(self.validation_error, self.controller.update,
self.req_admin, 'test_class', body=body)
class QuotaClassSetsTestV2(QuotaClassSetsTestV21):
validation_error = webob.exc.HTTPBadRequest
def _setup(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {}
self.controller = quota_classes.QuotaClassSetsController(ext_mgr)
|
apache-2.0
|
sreichholf/python-coherence
|
coherence/backends/yamj_storage.py
|
5
|
13719
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# a backend to expose a YMAJ library via UPnP
# see http://code.google.com/p/moviejukebox/ for more info on YAMJ (Yet Another Movie Jukebox):
# Copyright 2007, Frank Scholz <[email protected]>
# Copyright 2009, Jean-Michel Sizun <jm.sizun AT free.fr>
#
# TODO: add comments
import urllib
from coherence.upnp.core import DIDLLite
from coherence.backend import BackendStore, BackendItem, Container, \
LazyContainer, AbstractBackendStore
from coherence import log
from coherence.upnp.core.utils import getPage
from coherence.extern.et import parse_xml
import mimetypes
mimetypes.init()
mimetypes.add_type('audio/x-m4a', '.m4a')
mimetypes.add_type('video/mp4', '.mp4')
mimetypes.add_type('video/mpegts', '.ts')
mimetypes.add_type('video/divx', '.divx')
mimetypes.add_type('video/divx', '.avi')
mimetypes.add_type('video/x-matroska', '.mkv')
class MovieItem(BackendItem):
def __init__(self, movie, store, title = None, url = None):
self.movie_id = 'UNK'
if movie.find('./id') is not None:
self.movie_id = movie.find('./id').text
self.title = movie.find('./title').text
self.baseFilename = movie.find('./baseFilename').text
self.plot = movie.find('./plot').text
self.outline = movie.find('./outline').text
self.posterFilename = movie.find('./posterFile').text
self.thumbnailFilename = movie.find('./thumbnail').text
self.rating = movie.find('./rating').text
self.director = movie.find('./director').text
self.genres = movie.findall('./genres/genre')
self.actors = movie.findall('./cast/actor')
self.year = movie.find('year').text
self.audioChannels = movie.find('audioChannels').text
self.resolution = movie.find('resolution').text
self.language = movie.find('language').text
self.season = movie.find('season').text
if title is not None:
self.upnp_title = title
else:
self.upnp_title = self.title
if url is not None:
self.movie_url = url
else:
self.movie_url = movie.find('./files/file/fileURL').text
self.posterURL = "%s/%s" % (store.jukebox_url, self.posterFilename)
self.thumbnailURL = "%s/%s" % (store.jukebox_url, self.thumbnailFilename)
#print self.movie_id, self.title, self.url, self.posterURL
self.str_genres = []
for genre in self.genres:
self.str_genres.append(genre.text)
self.str_actors = []
for actor in self.actors:
self.str_actors.append(actor.text)
url_mimetype,_ = mimetypes.guess_type(self.movie_url,strict=False)
if url_mimetype == None:
url_mimetype = "video"
self.name = self.title
self.duration = None
self.size = None
self.mimetype = url_mimetype
self.item = None
def get_item(self):
if self.item == None:
upnp_id = self.get_id()
upnp_parent_id = self.parent.get_id()
self.item = DIDLLite.Movie(upnp_id, upnp_parent_id, self.upnp_title)
self.item.album = None
self.item.albumArtURI = self.posterURL
self.item.artist = None
self.item.creator = self.director
self.item.date = self.year
self.item.description = self.plot
self.item.director = self.director
self.item.longDescription = self.outline
self.item.originalTrackNumber = None
self.item.restricted = None
self.item.title = self.upnp_title
self.item.writeStatus = "PROTECTED"
self.item.icon = self.thumbnailURL
self.item.genre = None
self.item.genres = self.str_genres
self.item.language = self.language
self.item.actors = self.str_actors
res = DIDLLite.Resource(self.movie_url, 'http-get:*:%s:*' % self.mimetype)
res.duration = self.duration
res.size = self.size
res.nrAudioChannels = self.audioChannels
res.resolution = self.resolution
self.item.res.append(res)
return self.item
def get_path(self):
return self.movie_url
def get_id(self):
return self.storage_id
class YamjStore(AbstractBackendStore):
logCategory = 'yamj_store'
implements = ['MediaServer']
description = ('YAMJ', 'exposes the movie/TV series data files and metadata from a given YAMJ (Yet Another Movie Jukebox) library.', None)
options = [{'option':'name', 'text':'Server Name:', 'type':'string','default':'my media','help': 'the name under this MediaServer shall show up with on other UPnP clients'},
{'option':'version','text':'UPnP Version:','type':'int','default':2,'enum': (2,1),'help': 'the highest UPnP version this MediaServer shall support','level':'advance'},
{'option':'uuid','text':'UUID Identifier:','type':'string','help':'the unique (UPnP) identifier for this MediaServer, usually automatically set','level':'advance'},
{'option':'refresh','text':'Refresh period','type':'string'},
{'option':'yamj_url','text':'Library URL:','type':'string', 'help':'URL to the library root directory.'}
]
def __init__(self, server, **kwargs):
AbstractBackendStore.__init__(self, server, **kwargs)
self.name = kwargs.get('name','YAMJ')
self.yamj_url = kwargs.get('yamj_url',"http://localhost/yamj");
self.jukebox_url = self.yamj_url + "/Jukebox/"
self.refresh = int(kwargs.get('refresh',60))*60
self.nbMoviesPerFile = None
rootItem = Container(None, self.name)
self.set_root_item(rootItem)
d = self.retrieveCategories(rootItem)
def upnp_init(self):
self.current_connection_id = None
if self.server:
self.server.presentationURL = self.yamj_url
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
['internal:%s:video/mp4:*' % self.server.coherence.hostname,
'http-get:*:video/mp4:*',
'internal:%s:video/x-msvideo:*' % self.server.coherence.hostname,
'http-get:*:video/x-msvideo:*',
'internal:%s:video/mpeg:*' % self.server.coherence.hostname,
'http-get:*:video/mpeg:*',
'internal:%s:video/avi:*' % self.server.coherence.hostname,
'http-get:*:video/avi:*',
'internal:%s:video/divx:*' % self.server.coherence.hostname,
'http-get:*:video/divx:*',
'internal:%s:video/quicktime:*' % self.server.coherence.hostname,
'http-get:*:video/quicktime:*'],
default=True)
self.server.content_directory_server.set_variable(0, 'SystemUpdateID', self.update_id)
#self.server.content_directory_server.set_variable(0, 'SortCapabilities', '*')
def retrieveCategories (self, parent):
filepath = self.jukebox_url + "Categories.xml"
dfr = getPage(filepath)
def read_categories(data, parent_item, jukebox_url):
#nbMoviesPerFile = 1 #int(data.find('preferences/mjb.nbThumbnailsPerPage').text)
#self.debug("YMAJ: Nb Movies per file = %s" % nbMoviesPerFile)
for category in data.findall('category'):
type = category.get('name')
category_title = type
if (type != 'Other'):
category_title = "By %s" % category_title
categoryItem = Container(parent_item, category_title)
parent_item.add_child(categoryItem)
for index in category.findall('./index'):
name = index.get('name')
first_filename = index.text
root_name = first_filename[:-2]
self.debug("adding index %s:%s" % (type,name))
parent = categoryItem
if (type == 'Other'):
parent = parent_item
indexItem = LazyContainer(parent, name, None, self.refresh, self.retrieveIndexMovies, per_page=1, name=name, root_name=root_name)
parent.add_child(indexItem)
self.init_completed()
def fail_categories_read(f):
self.warning("failure reading yamj categories (%s): %r" % (filepath,f.getErrorMessage()))
return f
dfr.addCallback(parse_xml)
dfr.addErrback(fail_categories_read)
dfr.addCallback(read_categories, parent_item=parent, jukebox_url=self.jukebox_url)
dfr.addErrback(fail_categories_read)
return dfr
def retrieveIndexMovies (self, parent, name, root_name, per_page=10, page=0, offset=0):
#print offset, per_page
if self.nbMoviesPerFile is None:
counter = 1
else:
counter = abs(offset / self.nbMoviesPerFile) + 1
fileUrl = "%s/%s_%d.xml" % (self.jukebox_url, urllib.quote(root_name), counter)
def fail_readPage(f):
self.warning("failure reading yamj index (%s): %r" % (fileUrl,f.getErrorMessage()))
return f
def fail_parseIndex(f):
self.warning("failure parsing yamj index (%s): %r" % (fileUrl,f.getErrorMessage()))
return f
def readIndex(data):
for index in data.findall('category/index'):
current = index.get('current')
if (current == "true"):
currentIndex = index.get('currentIndex')
lastIndex = index.get('lastIndex')
if (currentIndex != lastIndex):
parent.childrenRetrievingNeeded = True
self.debug("%s: %s/%s" % (root_name, currentIndex, lastIndex))
break
movies = data.findall('movies/movie')
if self.nbMoviesPerFile is None:
self.nbMoviesPerFile = len(movies)
for movie in movies:
isSet = (movie.attrib['isSet'] == 'true')
if isSet is True:
# the movie corresponds to a set
name = movie.find('./title').text
index_name = movie.find('./baseFilename').text
set_root_name = index_name[:-2]
self.debug("adding set %s" % name)
indexItem = LazyContainer(parent, name, None, self.refresh, self.retrieveIndexMovies, per_page=1, name=name, root_name=set_root_name)
parent.add_child(indexItem, set_root_name)
else:
# this is a real movie
movie_id = "UNK"
movie_id_xml = movie.find('./id')
if movie_id_xml is not None:
movie_id = movie_id_xml.text
files = movie.findall('./files/file')
if (len(files) == 1):
url = files[0].find('./fileURL').text
external_id = "%s/%s" % (movie_id,url)
movieItem = MovieItem(movie, self)
parent.add_child(movieItem, external_id)
else:
name = movie.find('./title').text
if name is None or name == '':
name = movie.find('./baseFilename').text
season = movie.find('season').text
if season is not None and season != '-1':
name = "%s - season %s" % (name, season)
container_item = Container(parent, name)
parent.add_child(container_item, name)
container_item.store = self
for file in files:
episodeIndex = file.attrib['firstPart']
episodeTitle = file.attrib['title']
if (episodeTitle == 'UNKNOWN'):
title = "%s - %s" %(name, episodeIndex)
else:
title = "%s - %s " % (episodeIndex, episodeTitle)
episodeUrl = file.find('./fileURL').text
fileItem = MovieItem(movie, self, title=title, url=episodeUrl)
file_external_id = "%s/%s" % (movie_id,episodeUrl)
container_item.add_child(fileItem, file_external_id)
self.debug("Reading index file %s" % fileUrl)
d = getPage(fileUrl)
d.addCallback(parse_xml)
d.addErrback(fail_readPage)
d.addCallback(readIndex)
d.addErrback(fail_parseIndex)
return d
def __repr__(self):
return self.__class__.__name__
|
mit
|
aldebaran/qibuild
|
python/qisrc/test/test_qisrc_maintainers.py
|
1
|
2468
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Test QiSrc Maintainers """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import qisrc.maintainers
def test_no_project(qisrc_action):
""" Test No Project """
error = qisrc_action("maintainers", "--list", raises=True)
assert "at least one project" in error
def test_no_maintainers_yet(qisrc_action, record_messages):
""" Test No Maintainers Yet """
_foo1 = qisrc_action.worktree.create_project("foo")
qisrc_action("maintainers", "--list", "--project", "foo")
assert record_messages.find("No maintainer")
def test_add_cmdline(qisrc_action, record_messages):
""" Test Add Cmd Line """
_foo1 = qisrc_action.worktree.create_project("foo")
qisrc_action("maintainers", "--add", "--project", "foo",
"--name", "John Smith", "--email", "[email protected]")
qisrc_action("maintainers", "--project", "foo")
assert record_messages.find("John Smith")
assert record_messages.find("<[email protected]>")
def test_add_interact(qisrc_action, interact):
""" Test Add Interact """
foo1 = qisrc_action.worktree.create_project("foo")
qisrc_action.chdir(foo1.path)
interact.answers = ["John Doe", "[email protected]"]
qisrc_action("maintainers", "--add")
maintainers = qisrc.maintainers.get(foo1)
assert maintainers == [{"name": "John Doe",
"email": "[email protected]"}]
def test_remove_maintainer(qisrc_action, interact):
""" Test Remove Maintainer """
foo1 = qisrc_action.worktree.create_project("foo")
qisrc.maintainers.add(foo1, name="John Smith",
email="[email protected]")
interact.answers = [1]
qisrc_action.chdir("foo")
qisrc_action("maintainers", "--remove")
assert not qisrc.maintainers.get(foo1)
def test_add_utf8(qisrc_action):
""" Test Add UTF-8 """
foo1 = qisrc_action.worktree.create_project("foo")
qisrc.maintainers.add(foo1, name="Noé", email="[email protected]")
def test_list_utf8(qisrc_action):
""" Test List UTF-8 """
foo1 = qisrc_action.worktree.create_project("foo")
qisrc.maintainers.add(foo1, name="Noé", email="[email protected]")
qisrc_action("maintainers", "--list", "--project", "foo")
|
bsd-3-clause
|
biddisco/VTK
|
ThirdParty/Twisted/twisted/mail/test/test_mailmail.py
|
77
|
2683
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.mail.scripts.mailmail}, the implementation of the
command line program I{mailmail}.
"""
import sys
from StringIO import StringIO
from twisted.trial.unittest import TestCase
from twisted.mail.scripts.mailmail import parseOptions
class OptionsTests(TestCase):
"""
Tests for L{parseOptions} which parses command line arguments and reads
message text from stdin to produce an L{Options} instance which can be
used to send a message.
"""
def test_unspecifiedRecipients(self):
"""
If no recipients are given in the argument list and there is no
recipient header in the message text, L{parseOptions} raises
L{SystemExit} with a string describing the problem.
"""
self.addCleanup(setattr, sys, 'stdin', sys.stdin)
sys.stdin = StringIO(
'Subject: foo\n'
'\n'
'Hello, goodbye.\n')
exc = self.assertRaises(SystemExit, parseOptions, [])
self.assertEqual(exc.args, ('No recipients specified.',))
def test_listQueueInformation(self):
"""
The I{-bp} option for listing queue information is unsupported and
if it is passed to L{parseOptions}, L{SystemExit} is raised.
"""
exc = self.assertRaises(SystemExit, parseOptions, ['-bp'])
self.assertEqual(exc.args, ("Unsupported option.",))
def test_stdioTransport(self):
"""
The I{-bs} option for using stdin and stdout as the SMTP transport
is unsupported and if it is passed to L{parseOptions}, L{SystemExit}
is raised.
"""
exc = self.assertRaises(SystemExit, parseOptions, ['-bs'])
self.assertEqual(exc.args, ("Unsupported option.",))
def test_ignoreFullStop(self):
"""
The I{-i} and I{-oi} options for ignoring C{"."} by itself on a line
are unsupported and if either is passed to L{parseOptions},
L{SystemExit} is raised.
"""
exc = self.assertRaises(SystemExit, parseOptions, ['-i'])
self.assertEqual(exc.args, ("Unsupported option.",))
exc = self.assertRaises(SystemExit, parseOptions, ['-oi'])
self.assertEqual(exc.args, ("Unsupported option.",))
def test_copyAliasedSender(self):
"""
The I{-om} option for copying the sender if they appear in an alias
expansion is unsupported and if it is passed to L{parseOptions},
L{SystemExit} is raised.
"""
exc = self.assertRaises(SystemExit, parseOptions, ['-om'])
self.assertEqual(exc.args, ("Unsupported option.",))
|
bsd-3-clause
|
nashuiliang/zookeeper
|
src/contrib/zkpython/src/test/zktestbase.py
|
98
|
3572
|
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest, threading, zookeeper
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"}
class TestBase(unittest.TestCase):
SERVER_PORT = 22182
def __init__(self,methodName='runTest'):
unittest.TestCase.__init__(self,methodName)
self.host = "localhost:%d" % self.SERVER_PORT
self.connected = False
self.handle = -1
logdir = os.environ.get("ZKPY_LOG_DIR")
logfile = os.path.join(logdir, self.__class__.__name__ + ".log")
try:
f = open(logfile,"w")
zookeeper.set_log_stream(f)
except IOError:
print("Couldn't open " + logfile + " for writing")
def setUp(self):
self.callback_flag = False
self.cv = threading.Condition()
self.connected = False
def connection_watcher(handle, type, state, path):
self.cv.acquire()
self.connected = True
self.cv.notify()
self.cv.release()
self.cv.acquire()
self.handle = zookeeper.init(self.host, connection_watcher)
self.cv.wait(15.0)
self.cv.release()
if not self.connected:
raise Exception("Couldn't connect to host -", self.host)
def newConnection(self):
cv = threading.Condition()
self.pending_connection = False
def connection_watcher(handle, type, state, path):
cv.acquire()
self.pending_connection = True
cv.notify()
cv.release()
cv.acquire()
handle = zookeeper.init(self.host, connection_watcher)
cv.wait(15.0)
cv.release()
if not self.pending_connection:
raise Exception("Couldn't connect to host -", self.host)
return handle
def ensureDeleted(self,path):
self.assertEqual(zookeeper.CONNECTED_STATE, zookeeper.state(self.handle), "Not connected!")
try:
self.assertEqual(zookeeper.OK, zookeeper.delete(self.handle, path))
except zookeeper.NoNodeException:
pass
def ensureCreated(self,path,data="",flags=zookeeper.EPHEMERAL):
"""
It's possible not to get the flags you want here if the node already exists
"""
self.assertEqual(zookeeper.CONNECTED_STATE, zookeeper.state(self.handle), "Not connected!")
try:
self.assertEqual(path, zookeeper.create(self.handle, path, data, [ZOO_OPEN_ACL_UNSAFE], flags))
except zookeeper.NodeExistsException:
pass
def tearDown(self):
if self.connected:
zookeeper.close(self.handle)
def all(self, iterable):
for element in iterable:
if not element:
return False
return True
|
apache-2.0
|
sdeepanshu02/microblog
|
flask/Lib/site-packages/sqlalchemy/sql/type_api.py
|
12
|
46121
|
# sql/types_api.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base types API.
"""
from .. import exc, util
from . import operators
from .visitors import Visitable, VisitableType
from .base import SchemaEventTarget
# these are back-assigned by sqltypes.
BOOLEANTYPE = None
INTEGERTYPE = None
NULLTYPE = None
STRINGTYPE = None
MATCHTYPE = None
INDEXABLE = None
_resolve_value_to_type = None
class TypeEngine(Visitable):
"""The ultimate base class for all SQL datatypes.
Common subclasses of :class:`.TypeEngine` include
:class:`.String`, :class:`.Integer`, and :class:`.Boolean`.
For an overview of the SQLAlchemy typing system, see
:ref:`types_toplevel`.
.. seealso::
:ref:`types_toplevel`
"""
_sqla_type = True
_isnull = False
class Comparator(operators.ColumnOperators):
"""Base class for custom comparison operations defined at the
type level. See :attr:`.TypeEngine.comparator_factory`.
"""
__slots__ = 'expr', 'type'
default_comparator = None
def __init__(self, expr):
self.expr = expr
self.type = expr.type
@util.dependencies('sqlalchemy.sql.default_comparator')
def operate(self, default_comparator, op, *other, **kwargs):
o = default_comparator.operator_lookup[op.__name__]
return o[0](self.expr, op, *(other + o[1:]), **kwargs)
@util.dependencies('sqlalchemy.sql.default_comparator')
def reverse_operate(self, default_comparator, op, other, **kwargs):
o = default_comparator.operator_lookup[op.__name__]
return o[0](self.expr, op, other,
reverse=True, *o[1:], **kwargs)
def _adapt_expression(self, op, other_comparator):
"""evaluate the return type of <self> <op> <othertype>,
and apply any adaptations to the given operator.
This method determines the type of a resulting binary expression
given two source types and an operator. For example, two
:class:`.Column` objects, both of the type :class:`.Integer`, will
produce a :class:`.BinaryExpression` that also has the type
:class:`.Integer` when compared via the addition (``+``) operator.
However, using the addition operator with an :class:`.Integer`
and a :class:`.Date` object will produce a :class:`.Date`, assuming
"days delta" behavior by the database (in reality, most databases
other than PostgreSQL don't accept this particular operation).
The method returns a tuple of the form <operator>, <type>.
The resulting operator and type will be those applied to the
resulting :class:`.BinaryExpression` as the final operator and the
right-hand side of the expression.
Note that only a subset of operators make usage of
:meth:`._adapt_expression`,
including math operators and user-defined operators, but not
boolean comparison or special SQL keywords like MATCH or BETWEEN.
"""
return op, self.type
def __reduce__(self):
return _reconstitute_comparator, (self.expr, )
hashable = True
"""Flag, if False, means values from this type aren't hashable.
Used by the ORM when uniquing result lists.
"""
comparator_factory = Comparator
"""A :class:`.TypeEngine.Comparator` class which will apply
to operations performed by owning :class:`.ColumnElement` objects.
The :attr:`.comparator_factory` attribute is a hook consulted by
the core expression system when column and SQL expression operations
are performed. When a :class:`.TypeEngine.Comparator` class is
associated with this attribute, it allows custom re-definition of
all existing operators, as well as definition of new operators.
Existing operators include those provided by Python operator overloading
such as :meth:`.operators.ColumnOperators.__add__` and
:meth:`.operators.ColumnOperators.__eq__`,
those provided as standard
attributes of :class:`.operators.ColumnOperators` such as
:meth:`.operators.ColumnOperators.like`
and :meth:`.operators.ColumnOperators.in_`.
Rudimentary usage of this hook is allowed through simple subclassing
of existing types, or alternatively by using :class:`.TypeDecorator`.
See the documentation section :ref:`types_operators` for examples.
.. versionadded:: 0.8 The expression system was enhanced to support
customization of operators on a per-type level.
"""
should_evaluate_none = False
"""If True, the Python constant ``None`` is considered to be handled
explicitly by this type.
The ORM uses this flag to indicate that a positive value of ``None``
is passed to the column in an INSERT statement, rather than omitting
the column from the INSERT statement which has the effect of firing
off column-level defaults. It also allows types which have special
behavior for Python None, such as a JSON type, to indicate that
they'd like to handle the None value explicitly.
To set this flag on an existing type, use the
:meth:`.TypeEngine.evaluates_none` method.
.. seealso::
:meth:`.TypeEngine.evaluates_none`
.. versionadded:: 1.1
"""
def evaluates_none(self):
"""Return a copy of this type which has the :attr:`.should_evaluate_none`
flag set to True.
E.g.::
Table(
'some_table', metadata,
Column(
String(50).evaluates_none(),
nullable=True,
server_default='no value')
)
The ORM uses this flag to indicate that a positive value of ``None``
is passed to the column in an INSERT statement, rather than omitting
the column from the INSERT statement which has the effect of firing
off column-level defaults. It also allows for types which have
special behavior associated with the Python None value to indicate
that the value doesn't necessarily translate into SQL NULL; a
prime example of this is a JSON type which may wish to persist the
JSON value ``'null'``.
In all cases, the actual NULL SQL value can be always be
persisted in any column by using
the :obj:`~.expression.null` SQL construct in an INSERT statement
or associated with an ORM-mapped attribute.
.. note::
The "evaulates none" flag does **not** apply to a value
of ``None`` passed to :paramref:`.Column.default` or
:paramref:`.Column.server_default`; in these cases, ``None``
still means "no default".
.. versionadded:: 1.1
.. seealso::
:ref:`session_forcing_null` - in the ORM documentation
:paramref:`.postgresql.JSON.none_as_null` - PostgreSQL JSON
interaction with this flag.
:attr:`.TypeEngine.should_evaluate_none` - class-level flag
"""
typ = self.copy()
typ.should_evaluate_none = True
return typ
def copy(self, **kw):
return self.adapt(self.__class__)
def compare_against_backend(self, dialect, conn_type):
"""Compare this type against the given backend type.
This function is currently not implemented for SQLAlchemy
types, and for all built in types will return ``None``. However,
it can be implemented by a user-defined type
where it can be consumed by schema comparison tools such as
Alembic autogenerate.
A future release of SQLAlchemy will potentially impement this method
for builtin types as well.
The function should return True if this type is equivalent to the
given type; the type is typically reflected from the database
so should be database specific. The dialect in use is also
passed. It can also return False to assert that the type is
not equivalent.
:param dialect: a :class:`.Dialect` that is involved in the comparison.
:param conn_type: the type object reflected from the backend.
.. versionadded:: 1.0.3
"""
return None
def copy_value(self, value):
return value
def literal_processor(self, dialect):
"""Return a conversion function for processing literal values that are
to be rendered directly without using binds.
This function is used when the compiler makes use of the
"literal_binds" flag, typically used in DDL generation as well
as in certain scenarios where backends don't accept bound parameters.
.. versionadded:: 0.9.0
"""
return None
def bind_processor(self, dialect):
"""Return a conversion function for processing bind values.
Returns a callable which will receive a bind parameter value
as the sole positional argument and will return a value to
send to the DB-API.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
"""
return None
def result_processor(self, dialect, coltype):
"""Return a conversion function for processing result row values.
Returns a callable which will receive a result row column
value as the sole positional argument and will return a value
to return to the user.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
:param coltype: DBAPI coltype argument received in cursor.description.
"""
return None
def column_expression(self, colexpr):
"""Given a SELECT column expression, return a wrapping SQL expression.
This is typically a SQL function that wraps a column expression
as rendered in the columns clause of a SELECT statement.
It is used for special data types that require
columns to be wrapped in some special database function in order
to coerce the value before being sent back to the application.
It is the SQL analogue of the :meth:`.TypeEngine.result_processor`
method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
See also:
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_column_expression(self):
"""memoized boolean, check if column_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return self.__class__.column_expression.__code__ \
is not TypeEngine.column_expression.__code__
def bind_expression(self, bindvalue):
""""Given a bind value (i.e. a :class:`.BindParameter` instance),
return a SQL expression in its place.
This is typically a SQL function that wraps the existing bound
parameter within the statement. It is used for special data types
that require literals being wrapped in some special database function
in order to coerce an application-level value into a database-specific
format. It is the SQL analogue of the
:meth:`.TypeEngine.bind_processor` method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
Note that this method, when implemented, should always return
the exact same structure, without any conditional logic, as it
may be used in an executemany() call against an arbitrary number
of bound parameter sets.
See also:
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_bind_expression(self):
"""memoized boolean, check if bind_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return self.__class__.bind_expression.__code__ \
is not TypeEngine.bind_expression.__code__
def compare_values(self, x, y):
"""Compare two values for equality."""
return x == y
def get_dbapi_type(self, dbapi):
"""Return the corresponding type object from the underlying DB-API, if
any.
This can be useful for calling ``setinputsizes()``, for example.
"""
return None
@property
def python_type(self):
"""Return the Python type object expected to be returned
by instances of this type, if known.
Basically, for those types which enforce a return type,
or are known across the board to do such for all common
DBAPIs (like ``int`` for example), will return that type.
If a return type is not defined, raises
``NotImplementedError``.
Note that any type also accommodates NULL in SQL which
means you can also get back ``None`` from any type
in practice.
"""
raise NotImplementedError()
def with_variant(self, type_, dialect_name):
"""Produce a new type object that will utilize the given
type when applied to the dialect of the given name.
e.g.::
from sqlalchemy.types import String
from sqlalchemy.dialects import mysql
s = String()
s = s.with_variant(mysql.VARCHAR(collation='foo'), 'mysql')
The construction of :meth:`.TypeEngine.with_variant` is always
from the "fallback" type to that which is dialect specific.
The returned type is an instance of :class:`.Variant`, which
itself provides a :meth:`.Variant.with_variant`
that can be called repeatedly.
:param type_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
.. versionadded:: 0.7.2
"""
return Variant(self, {dialect_name: to_instance(type_)})
@util.memoized_property
def _type_affinity(self):
"""Return a rudimental 'affinity' value expressing the general class
of type."""
typ = None
for t in self.__class__.__mro__:
if t in (TypeEngine, UserDefinedType):
return typ
elif issubclass(t, (TypeEngine, UserDefinedType)):
typ = t
else:
return self.__class__
def dialect_impl(self, dialect):
"""Return a dialect-specific implementation for this
:class:`.TypeEngine`.
"""
try:
return dialect._type_memos[self]['impl']
except KeyError:
return self._dialect_info(dialect)['impl']
def _cached_literal_processor(self, dialect):
"""Return a dialect-specific literal processor for this type."""
try:
return dialect._type_memos[self]['literal']
except KeyError:
d = self._dialect_info(dialect)
d['literal'] = lp = d['impl'].literal_processor(dialect)
return lp
def _cached_bind_processor(self, dialect):
"""Return a dialect-specific bind processor for this type."""
try:
return dialect._type_memos[self]['bind']
except KeyError:
d = self._dialect_info(dialect)
d['bind'] = bp = d['impl'].bind_processor(dialect)
return bp
def _cached_result_processor(self, dialect, coltype):
"""Return a dialect-specific result processor for this type."""
try:
return dialect._type_memos[self][coltype]
except KeyError:
d = self._dialect_info(dialect)
# key assumption: DBAPI type codes are
# constants. Else this dictionary would
# grow unbounded.
d[coltype] = rp = d['impl'].result_processor(dialect, coltype)
return rp
def _dialect_info(self, dialect):
"""Return a dialect-specific registry which
caches a dialect-specific implementation, bind processing
function, and one or more result processing functions."""
if self in dialect._type_memos:
return dialect._type_memos[self]
else:
impl = self._gen_dialect_impl(dialect)
if impl is self:
impl = self.adapt(type(self))
# this can't be self, else we create a cycle
assert impl is not self
dialect._type_memos[self] = d = {'impl': impl}
return d
def _gen_dialect_impl(self, dialect):
return dialect.type_descriptor(self)
def adapt(self, cls, **kw):
"""Produce an "adapted" form of this type, given an "impl" class
to work with.
This method is used internally to associate generic
types with "implementation" types that are specific to a particular
dialect.
"""
return util.constructor_copy(self, cls, **kw)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Given an operator and value, gives the type a chance
to return a type which the value should be coerced into.
The default behavior here is conservative; if the right-hand
side is already coerced into a SQL type based on its
Python type, it is usually left alone.
End-user functionality extension here should generally be via
:class:`.TypeDecorator`, which provides more liberal behavior in that
it defaults to coercing the other side of the expression into this
type, thus applying special Python conversions above and beyond those
needed by the DBAPI to both ides. It also provides the public method
:meth:`.TypeDecorator.coerce_compared_value` which is intended for
end-user customization of this behavior.
"""
_coerced_type = _resolve_value_to_type(value)
if _coerced_type is NULLTYPE or _coerced_type._type_affinity \
is self._type_affinity:
return self
else:
return _coerced_type
def _compare_type_affinity(self, other):
return self._type_affinity is other._type_affinity
def compile(self, dialect=None):
"""Produce a string-compiled form of this :class:`.TypeEngine`.
When called with no arguments, uses a "default" dialect
to produce a string result.
:param dialect: a :class:`.Dialect` instance.
"""
# arg, return value is inconsistent with
# ClauseElement.compile()....this is a mistake.
if not dialect:
dialect = self._default_dialect()
return dialect.type_compiler.process(self)
@util.dependencies("sqlalchemy.engine.default")
def _default_dialect(self, default):
if self.__class__.__module__.startswith("sqlalchemy.dialects"):
tokens = self.__class__.__module__.split(".")[0:3]
mod = ".".join(tokens)
return getattr(__import__(mod).dialects, tokens[-1]).dialect()
else:
return default.DefaultDialect()
def __str__(self):
if util.py2k:
return unicode(self.compile()).\
encode('ascii', 'backslashreplace')
else:
return str(self.compile())
def __repr__(self):
return util.generic_repr(self)
class VisitableCheckKWArg(util.EnsureKWArgType, VisitableType):
pass
class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)):
"""Base for user defined types.
This should be the base of new types. Note that
for most cases, :class:`.TypeDecorator` is probably
more appropriate::
import sqlalchemy.types as types
class MyType(types.UserDefinedType):
def __init__(self, precision = 8):
self.precision = precision
def get_col_spec(self, **kw):
return "MYTYPE(%s)" % self.precision
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
Once the type is made, it's immediately usable::
table = Table('foo', meta,
Column('id', Integer, primary_key=True),
Column('data', MyType(16))
)
The ``get_col_spec()`` method will in most cases receive a keyword
argument ``type_expression`` which refers to the owning expression
of the type as being compiled, such as a :class:`.Column` or
:func:`.cast` construct. This keyword is only sent if the method
accepts keyword arguments (e.g. ``**kw``) in its argument signature;
introspection is used to check for this in order to support legacy
forms of this function.
.. versionadded:: 1.0.0 the owning expression is passed to
the ``get_col_spec()`` method via the keyword argument
``type_expression``, if it receives ``**kw`` in its signature.
"""
__visit_name__ = "user_defined"
ensure_kwarg = 'get_col_spec'
class Comparator(TypeEngine.Comparator):
__slots__ = ()
def _adapt_expression(self, op, other_comparator):
if hasattr(self.type, 'adapt_operator'):
util.warn_deprecated(
"UserDefinedType.adapt_operator is deprecated. Create "
"a UserDefinedType.Comparator subclass instead which "
"generates the desired expression constructs, given a "
"particular operator."
)
return self.type.adapt_operator(op), self.type
else:
return op, self.type
comparator_factory = Comparator
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Default behavior for :class:`.UserDefinedType` is the
same as that of :class:`.TypeDecorator`; by default it returns
``self``, assuming the compared value should be coerced into
the same type as this one. See
:meth:`.TypeDecorator.coerce_compared_value` for more detail.
.. versionchanged:: 0.8 :meth:`.UserDefinedType.coerce_compared_value`
now returns ``self`` by default, rather than falling onto the
more fundamental behavior of
:meth:`.TypeEngine.coerce_compared_value`.
"""
return self
class TypeDecorator(SchemaEventTarget, TypeEngine):
"""Allows the creation of types which add additional functionality
to an existing type.
This method is preferred to direct subclassing of SQLAlchemy's
built-in types as it ensures that all required functionality of
the underlying type is kept in place.
Typical usage::
import sqlalchemy.types as types
class MyType(types.TypeDecorator):
'''Prefixes Unicode values with "PREFIX:" on the way in and
strips it off on the way out.
'''
impl = types.Unicode
def process_bind_param(self, value, dialect):
return "PREFIX:" + value
def process_result_value(self, value, dialect):
return value[7:]
def copy(self, **kw):
return MyType(self.impl.length)
The class-level "impl" attribute is required, and can reference any
TypeEngine class. Alternatively, the load_dialect_impl() method
can be used to provide different type classes based on the dialect
given; in this case, the "impl" variable can reference
``TypeEngine`` as a placeholder.
Types that receive a Python type that isn't similar to the ultimate type
used may want to define the :meth:`TypeDecorator.coerce_compared_value`
method. This is used to give the expression system a hint when coercing
Python objects into bind parameters within expressions. Consider this
expression::
mytable.c.somecol + datetime.date(2009, 5, 15)
Above, if "somecol" is an ``Integer`` variant, it makes sense that
we're doing date arithmetic, where above is usually interpreted
by databases as adding a number of days to the given date.
The expression system does the right thing by not attempting to
coerce the "date()" value into an integer-oriented bind parameter.
However, in the case of ``TypeDecorator``, we are usually changing an
incoming Python type to something new - ``TypeDecorator`` by default will
"coerce" the non-typed side to be the same type as itself. Such as below,
we define an "epoch" type that stores a date value as an integer::
class MyEpochType(types.TypeDecorator):
impl = types.Integer
epoch = datetime.date(1970, 1, 1)
def process_bind_param(self, value, dialect):
return (value - self.epoch).days
def process_result_value(self, value, dialect):
return self.epoch + timedelta(days=value)
Our expression of ``somecol + date`` with the above type will coerce the
"date" on the right side to also be treated as ``MyEpochType``.
This behavior can be overridden via the
:meth:`~TypeDecorator.coerce_compared_value` method, which returns a type
that should be used for the value of the expression. Below we set it such
that an integer value will be treated as an ``Integer``, and any other
value is assumed to be a date and will be treated as a ``MyEpochType``::
def coerce_compared_value(self, op, value):
if isinstance(value, int):
return Integer()
else:
return self
.. warning::
Note that the **behavior of coerce_compared_value is not inherited
by default from that of the base type**.
If the :class:`.TypeDecorator` is augmenting a
type that requires special logic for certain types of operators,
this method **must** be overridden. A key example is when decorating
the :class:`.postgresql.JSON` and :class:`.postgresql.JSONB` types;
the default rules of :meth:`.TypeEngine.coerce_compared_value` should
be used in order to deal with operators like index operations::
class MyJsonType(TypeDecorator):
impl = postgresql.JSON
def coerce_compared_value(self, op, value):
return self.impl.coerce_compared_value(op, value)
Without the above step, index operations such as ``mycol['foo']``
will cause the index value ``'foo'`` to be JSON encoded.
"""
__visit_name__ = "type_decorator"
def __init__(self, *args, **kwargs):
"""Construct a :class:`.TypeDecorator`.
Arguments sent here are passed to the constructor
of the class assigned to the ``impl`` class level attribute,
assuming the ``impl`` is a callable, and the resulting
object is assigned to the ``self.impl`` instance attribute
(thus overriding the class attribute of the same name).
If the class level ``impl`` is not a callable (the unusual case),
it will be assigned to the same instance attribute 'as-is',
ignoring those arguments passed to the constructor.
Subclasses can override this to customize the generation
of ``self.impl`` entirely.
"""
if not hasattr(self.__class__, 'impl'):
raise AssertionError("TypeDecorator implementations "
"require a class-level variable "
"'impl' which refers to the class of "
"type being decorated")
self.impl = to_instance(self.__class__.impl, *args, **kwargs)
coerce_to_is_types = (util.NoneType, )
"""Specify those Python types which should be coerced at the expression
level to "IS <constant>" when compared using ``==`` (and same for
``IS NOT`` in conjunction with ``!=``.
For most SQLAlchemy types, this includes ``NoneType``, as well as
``bool``.
:class:`.TypeDecorator` modifies this list to only include ``NoneType``,
as typedecorator implementations that deal with boolean types are common.
Custom :class:`.TypeDecorator` classes can override this attribute to
return an empty tuple, in which case no values will be coerced to
constants.
.. versionadded:: 0.8.2
Added :attr:`.TypeDecorator.coerce_to_is_types` to allow for easier
control of ``__eq__()`` ``__ne__()`` operations.
"""
class Comparator(TypeEngine.Comparator):
__slots__ = ()
def operate(self, op, *other, **kwargs):
kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).operate(
op, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).reverse_operate(
op, other, **kwargs)
@property
def comparator_factory(self):
if TypeDecorator.Comparator in self.impl.comparator_factory.__mro__:
return self.impl.comparator_factory
else:
return type("TDComparator",
(TypeDecorator.Comparator,
self.impl.comparator_factory),
{})
def _gen_dialect_impl(self, dialect):
"""
#todo
"""
adapted = dialect.type_descriptor(self)
if adapted is not self:
return adapted
# otherwise adapt the impl type, link
# to a copy of this TypeDecorator and return
# that.
typedesc = self.load_dialect_impl(dialect).dialect_impl(dialect)
tt = self.copy()
if not isinstance(tt, self.__class__):
raise AssertionError('Type object %s does not properly '
'implement the copy() method, it must '
'return an object of type %s' %
(self, self.__class__))
tt.impl = typedesc
return tt
@property
def _type_affinity(self):
"""
#todo
"""
return self.impl._type_affinity
def _set_parent(self, column):
"""Support SchemaEentTarget"""
super(TypeDecorator, self)._set_parent(column)
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEentTarget"""
super(TypeDecorator, self)._set_parent_with_dispatch(parent)
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent_with_dispatch(parent)
def type_engine(self, dialect):
"""Return a dialect-specific :class:`.TypeEngine` instance
for this :class:`.TypeDecorator`.
In most cases this returns a dialect-adapted form of
the :class:`.TypeEngine` type represented by ``self.impl``.
Makes usage of :meth:`dialect_impl` but also traverses
into wrapped :class:`.TypeDecorator` instances.
Behavior can be customized here by overriding
:meth:`load_dialect_impl`.
"""
adapted = dialect.type_descriptor(self)
if not isinstance(adapted, type(self)):
return adapted
elif isinstance(self.impl, TypeDecorator):
return self.impl.type_engine(dialect)
else:
return self.load_dialect_impl(dialect)
def load_dialect_impl(self, dialect):
"""Return a :class:`.TypeEngine` object corresponding to a dialect.
This is an end-user override hook that can be used to provide
differing types depending on the given dialect. It is used
by the :class:`.TypeDecorator` implementation of :meth:`type_engine`
to help determine what type should ultimately be returned
for a given :class:`.TypeDecorator`.
By default returns ``self.impl``.
"""
return self.impl
def __getattr__(self, key):
"""Proxy all other undefined accessors to the underlying
implementation."""
return getattr(self.impl, key)
def process_literal_param(self, value, dialect):
"""Receive a literal parameter value to be rendered inline within
a statement.
This method is used when the compiler renders a
literal value without using binds, typically within DDL
such as in the "server default" of a column or an expression
within a CHECK constraint.
The returned string will be rendered into the output string.
.. versionadded:: 0.9.0
"""
raise NotImplementedError()
def process_bind_param(self, value, dialect):
"""Receive a bound parameter value to be converted.
Subclasses override this method to return the
value that should be passed along to the underlying
:class:`.TypeEngine` object, and from there to the
DBAPI ``execute()`` method.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
This operation should be designed with the reverse operation
in mind, which would be the process_result_value method of
this class.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
"""
raise NotImplementedError()
def process_result_value(self, value, dialect):
"""Receive a result-row column value to be converted.
Subclasses should implement this method to operate on data
fetched from the database.
Subclasses override this method to return the
value that should be passed back to the application,
given a value that is already processed by
the underlying :class:`.TypeEngine` object, originally
from the DBAPI cursor method ``fetchone()`` or similar.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
This operation should be designed to be reversible by
the "process_bind_param" method of this class.
"""
raise NotImplementedError()
@util.memoized_property
def _has_bind_processor(self):
"""memoized boolean, check if process_bind_param is implemented.
Allows the base process_bind_param to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return self.__class__.process_bind_param.__code__ \
is not TypeDecorator.process_bind_param.__code__
@util.memoized_property
def _has_literal_processor(self):
"""memoized boolean, check if process_literal_param is implemented.
"""
return self.__class__.process_literal_param.__code__ \
is not TypeDecorator.process_literal_param.__code__
def literal_processor(self, dialect):
"""Provide a literal processing function for the given
:class:`.Dialect`.
Subclasses here will typically override
:meth:`.TypeDecorator.process_literal_param` instead of this method
directly.
By default, this method makes use of
:meth:`.TypeDecorator.process_bind_param` if that method is
implemented, where :meth:`.TypeDecorator.process_literal_param` is
not. The rationale here is that :class:`.TypeDecorator` typically
deals with Python conversions of data that are above the layer of
database presentation. With the value converted by
:meth:`.TypeDecorator.process_bind_param`, the underlying type will
then handle whether it needs to be presented to the DBAPI as a bound
parameter or to the database as an inline SQL value.
.. versionadded:: 0.9.0
"""
if self._has_literal_processor:
process_param = self.process_literal_param
elif self._has_bind_processor:
# the bind processor should normally be OK
# for TypeDecorator since it isn't doing DB-level
# handling, the handling here won't be different for bound vs.
# literals.
process_param = self.process_bind_param
else:
process_param = None
if process_param:
impl_processor = self.impl.literal_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.literal_processor(dialect)
def bind_processor(self, dialect):
"""Provide a bound value processing function for the
given :class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for bound value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_bind_param` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_bind_param` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
This method is the reverse counterpart to the
:meth:`result_processor` method of this class.
"""
if self._has_bind_processor:
process_param = self.process_bind_param
impl_processor = self.impl.bind_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.bind_processor(dialect)
@util.memoized_property
def _has_result_processor(self):
"""memoized boolean, check if process_result_value is implemented.
Allows the base process_result_value to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return self.__class__.process_result_value.__code__ \
is not TypeDecorator.process_result_value.__code__
def result_processor(self, dialect, coltype):
"""Provide a result value processing function for the given
:class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for result value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_result_value` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_result_value` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
:param coltype: A SQLAlchemy data type
This method is the reverse counterpart to the
:meth:`bind_processor` method of this class.
"""
if self._has_result_processor:
process_value = self.process_result_value
impl_processor = self.impl.result_processor(dialect,
coltype)
if impl_processor:
def process(value):
return process_value(impl_processor(value), dialect)
else:
def process(value):
return process_value(value, dialect)
return process
else:
return self.impl.result_processor(dialect, coltype)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
By default, returns self. This method is called by
the expression system when an object using this type is
on the left or right side of an expression against a plain Python
object which does not yet have a SQLAlchemy type assigned::
expr = table.c.somecolumn + 35
Where above, if ``somecolumn`` uses this type, this method will
be called with the value ``operator.add``
and ``35``. The return value is whatever SQLAlchemy type should
be used for ``35`` for this particular operation.
"""
return self
def copy(self, **kw):
"""Produce a copy of this :class:`.TypeDecorator` instance.
This is a shallow copy and is provided to fulfill part of
the :class:`.TypeEngine` contract. It usually does not
need to be overridden unless the user-defined :class:`.TypeDecorator`
has local state that should be deep-copied.
"""
instance = self.__class__.__new__(self.__class__)
instance.__dict__.update(self.__dict__)
return instance
def get_dbapi_type(self, dbapi):
"""Return the DBAPI type object represented by this
:class:`.TypeDecorator`.
By default this calls upon :meth:`.TypeEngine.get_dbapi_type` of the
underlying "impl".
"""
return self.impl.get_dbapi_type(dbapi)
def compare_values(self, x, y):
"""Given two values, compare them for equality.
By default this calls upon :meth:`.TypeEngine.compare_values`
of the underlying "impl", which in turn usually
uses the Python equals operator ``==``.
This function is used by the ORM to compare
an original-loaded value with an intercepted
"changed" value, to determine if a net change
has occurred.
"""
return self.impl.compare_values(x, y)
def __repr__(self):
return util.generic_repr(self, to_inspect=self.impl)
class Variant(TypeDecorator):
"""A wrapping type that selects among a variety of
implementations based on dialect in use.
The :class:`.Variant` type is typically constructed
using the :meth:`.TypeEngine.with_variant` method.
.. versionadded:: 0.7.2
.. seealso:: :meth:`.TypeEngine.with_variant` for an example of use.
"""
def __init__(self, base, mapping):
"""Construct a new :class:`.Variant`.
:param base: the base 'fallback' type
:param mapping: dictionary of string dialect names to
:class:`.TypeEngine` instances.
"""
self.impl = base
self.mapping = mapping
def load_dialect_impl(self, dialect):
if dialect.name in self.mapping:
return self.mapping[dialect.name]
else:
return self.impl
def with_variant(self, type_, dialect_name):
"""Return a new :class:`.Variant` which adds the given
type + dialect name to the mapping, in addition to the
mapping present in this :class:`.Variant`.
:param type_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
"""
if dialect_name in self.mapping:
raise exc.ArgumentError(
"Dialect '%s' is already present in "
"the mapping for this Variant" % dialect_name)
mapping = self.mapping.copy()
mapping[dialect_name] = type_
return Variant(self.impl, mapping)
@property
def comparator_factory(self):
"""express comparison behavior in terms of the base type"""
return self.impl.comparator_factory
def _reconstitute_comparator(expression):
return expression.comparator
def to_instance(typeobj, *arg, **kw):
if typeobj is None:
return NULLTYPE
if util.callable(typeobj):
return typeobj(*arg, **kw)
else:
return typeobj
def adapt_type(typeobj, colspecs):
if isinstance(typeobj, type):
typeobj = typeobj()
for t in typeobj.__class__.__mro__[0:-1]:
try:
impltype = colspecs[t]
break
except KeyError:
pass
else:
# couldn't adapt - so just return the type itself
# (it may be a user-defined type)
return typeobj
# if we adapted the given generic type to a database-specific type,
# but it turns out the originally given "generic" type
# is actually a subclass of our resulting type, then we were already
# given a more specific type than that required; so use that.
if (issubclass(typeobj.__class__, impltype)):
return typeobj
return typeobj.adapt(impltype)
|
bsd-3-clause
|
mcking49/apache-flask
|
Python/Lib/test/test_code.py
|
29
|
3240
|
"""This module includes tests of the code object representation.
>>> def f(x):
... def g(y):
... return x + y
... return g
...
>>> dump(f.func_code)
name: f
argcount: 1
names: ()
varnames: ('x', 'g')
cellvars: ('x',)
freevars: ()
nlocals: 2
flags: 3
consts: ('None', '<code object g>')
>>> dump(f(4).func_code)
name: g
argcount: 1
names: ()
varnames: ('y',)
cellvars: ()
freevars: ('x',)
nlocals: 1
flags: 19
consts: ('None',)
>>> def h(x, y):
... a = x + y
... b = x - y
... c = a * b
... return c
...
>>> dump(h.func_code)
name: h
argcount: 2
names: ()
varnames: ('x', 'y', 'a', 'b', 'c')
cellvars: ()
freevars: ()
nlocals: 5
flags: 67
consts: ('None',)
>>> def attrs(obj):
... print obj.attr1
... print obj.attr2
... print obj.attr3
>>> dump(attrs.func_code)
name: attrs
argcount: 1
names: ('attr1', 'attr2', 'attr3')
varnames: ('obj',)
cellvars: ()
freevars: ()
nlocals: 1
flags: 67
consts: ('None',)
>>> def optimize_away():
... 'doc string'
... 'not a docstring'
... 53
... 53L
>>> dump(optimize_away.func_code)
name: optimize_away
argcount: 0
names: ()
varnames: ()
cellvars: ()
freevars: ()
nlocals: 0
flags: 67
consts: ("'doc string'", 'None')
"""
import unittest
import weakref
from test.test_support import run_doctest, run_unittest, cpython_only
def consts(t):
"""Yield a doctest-safe sequence of object reprs."""
for elt in t:
r = repr(elt)
if r.startswith("<code object"):
yield "<code object %s>" % elt.co_name
else:
yield r
def dump(co):
"""Print out a text representation of a code object."""
for attr in ["name", "argcount", "names", "varnames", "cellvars",
"freevars", "nlocals", "flags"]:
print "%s: %s" % (attr, getattr(co, "co_" + attr))
print "consts:", tuple(consts(co.co_consts))
class CodeTest(unittest.TestCase):
@cpython_only
def test_newempty(self):
import _testcapi
co = _testcapi.code_newempty("filename", "funcname", 15)
self.assertEqual(co.co_filename, "filename")
self.assertEqual(co.co_name, "funcname")
self.assertEqual(co.co_firstlineno, 15)
class CodeWeakRefTest(unittest.TestCase):
def test_basic(self):
# Create a code object in a clean environment so that we know we have
# the only reference to it left.
namespace = {}
exec "def f(): pass" in globals(), namespace
f = namespace["f"]
del namespace
self.called = False
def callback(code):
self.called = True
# f is now the last reference to the function, and through it, the code
# object. While we hold it, check that we can create a weakref and
# deref it. Then delete it, and check that the callback gets called and
# the reference dies.
coderef = weakref.ref(f.__code__, callback)
self.assertTrue(bool(coderef()))
del f
self.assertFalse(bool(coderef()))
self.assertTrue(self.called)
def test_main(verbose=None):
from test import test_code
run_doctest(test_code, verbose)
run_unittest(CodeTest, CodeWeakRefTest)
if __name__ == "__main__":
test_main()
|
mit
|
GaloisInc/hacrypto
|
src/Tools/FigureOfMerit/FigureOfMerit/BlockCiphers/Scenario2/CipherImplementation.py
|
1
|
13359
|
#
# University of Luxembourg
# Laboratory of Algorithmics, Cryptology and Security (LACS)
#
# FigureOfMerit (FOM)
#
# Copyright (C) 2015 University of Luxembourg
#
# Written in 2015 by Daniel Dinu <[email protected]>
#
# This file is part of FigureOfMerit.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from Scenario2.CipherImplementationMetrics import CipherImplementationMetrics
from Scenario2 import Constants
__author__ = 'daniel.dinu'
class CipherImplementation:
def __init__(self, name, block_size, key_size, version, compiler_options):
"""
Initialize cipher implementation
:param name: Cipher name
:param block_size: Cipher block size
:param key_size: Cipher key size
:param version: Cipher implementation version
:param compiler_options: Cipher implementation compiler options
"""
self.name = name
self.block_size = block_size
self.key_size = key_size
self.version = version
self.compiler_options = compiler_options
self.avr_metrics = CipherImplementationMetrics()
self.msp_metrics = CipherImplementationMetrics()
self.arm_metrics = CipherImplementationMetrics()
self.fom1_avr = 0
self.fom1_msp = 0
self.fom1_arm = 0
self.fom2_avr = 0
self.fom2_msp = 0
self.fom2_arm = 0
self.fom3_avr = 0
self.fom3_msp = 0
self.fom3_arm = 0
def add_metrics(self, architecture, metrics):
"""
Add cipher implementation metrics
:param architecture: Cipher implementation architecture
:param metrics: Cipher implementation metrics
"""
if Constants.ARCHITECTURE_AVR == architecture:
self.avr_metrics = metrics
if Constants.ARCHITECTURE_MSP == architecture:
self.msp_metrics = metrics
if Constants.ARCHITECTURE_ARM == architecture:
self.arm_metrics = metrics
def compute_fom(self,
avr_min_code_size,
msp_min_code_size,
arm_min_code_size,
avr_min_ram,
msp_min_ram,
arm_min_ram,
avr_min_execution_time,
msp_min_execution_time,
arm_min_execution_time,
avr_max_code_size,
msp_max_code_size,
arm_max_code_size,
avr_max_ram,
msp_max_ram,
arm_max_ram,
avr_max_execution_time,
msp_max_execution_time,
arm_max_execution_time):
"""
Compute cipher implementation FOM 1, FOM 2 and FOM 3
:param avr_min_code_size: AVR min code size
:param msp_min_code_size: MSP min code size
:param arm_min_code_size: ARM min code size
:param avr_min_ram: AVR min RAM
:param msp_min_ram: MSP min RAM
:param arm_min_ram: ARM min RAM
:param avr_min_execution_time: AVR min execution time
:param msp_min_execution_time: MSP min execution time
:param arm_min_execution_time: ARM min execution time
:param avr_max_code_size: AVR max code size
:param msp_max_code_size: MSP max code size
:param arm_max_code_size: ARM max code size
:param avr_max_ram: AVR max RAM
:param msp_max_ram: MSP max RAM
:param arm_max_ram: ARM max RAM
:param avr_max_execution_time: AVR max execution time
:param msp_max_execution_time: MSP max execution time
:param arm_max_execution_time: ARM max execution time
"""
# AVR
avr_code_size = self.avr_metrics.code_size_e
avr_ram = self.avr_metrics.ram_data + self.avr_metrics.ram_stack_e
avr_execution_time = self.avr_metrics.execution_time_e
# MSP
msp_code_size = self.msp_metrics.code_size_e
msp_ram = self.msp_metrics.ram_data + self.msp_metrics.ram_stack_e
msp_execution_time = self.msp_metrics.execution_time_e
# ARM
arm_code_size = self.arm_metrics.code_size_e
arm_ram = self.arm_metrics.ram_data + self.arm_metrics.ram_stack_e
arm_execution_time = self.arm_metrics.execution_time_e
# AVR weights
avr_code_size_weight = Constants.FOM1_AVR_CODE_SIZE_WEIGHT
avr_ram_weight = Constants.FOM1_AVR_RAM_WEIGHT
avr_execution_time_weight = Constants.FOM1_AVR_EXECUTION_TIME_WEIGHT
# MSP weights
msp_code_size_weight = Constants.FOM1_MSP_CODE_SIZE_WEIGHT
msp_ram_weight = Constants.FOM1_MSP_RAM_WEIGHT
msp_execution_time_weight = Constants.FOM1_MSP_EXECUTION_TIME_WEIGHT
# ARM weights
arm_code_size_weight = Constants.FOM1_ARM_CODE_SIZE_WEIGHT
arm_ram_weight = Constants.FOM1_ARM_RAM_WEIGHT
arm_execution_time_weight = Constants.FOM1_ARM_EXECUTION_TIME_WEIGHT
# AVR
avr_fom_code_size = avr_code_size_weight * (avr_code_size / avr_min_code_size)
avr_fom_ram = avr_ram_weight * (avr_ram / avr_min_ram)
avr_fom_execution_time = avr_execution_time_weight * (avr_execution_time / avr_min_execution_time)
avr_fom = avr_fom_code_size + avr_fom_ram + avr_fom_execution_time
# MSP
msp_fom_code_size = msp_code_size_weight * (msp_code_size / msp_min_code_size)
msp_fom_ram = msp_ram_weight * (msp_ram / msp_min_ram)
msp_fom_execution_time = msp_execution_time_weight * (msp_execution_time / msp_min_execution_time)
msp_fom = msp_fom_code_size + msp_fom_ram + msp_fom_execution_time
# ARM
arm_fom_code_size = arm_code_size_weight * (arm_code_size / arm_min_code_size)
arm_fom_ram = arm_ram_weight * (arm_ram / arm_min_ram)
arm_fom_execution_time = arm_execution_time_weight * (arm_execution_time / arm_min_execution_time)
arm_fom = arm_fom_code_size + arm_fom_ram + arm_fom_execution_time
if Constants.DEBUG_ON == Constants.DEBUG:
print(Constants.CIPHER_IMPLEMENTATION_FOM1_DETAILS.format(avr_fom_code_size,
avr_fom_ram,
avr_fom_execution_time,
avr_fom,
msp_fom_code_size,
msp_fom_ram,
msp_fom_execution_time,
msp_fom,
arm_fom_code_size,
arm_fom_ram,
arm_fom_execution_time,
arm_fom))
# FOM 1
self.fom1_avr = avr_fom
self.fom1_msp = msp_fom
self.fom1_arm = arm_fom
# AVR weights
avr_code_size_weight = Constants.FOM2_AVR_CODE_SIZE_WEIGHT
avr_ram_weight = Constants.FOM2_AVR_RAM_WEIGHT
avr_execution_time_weight = Constants.FOM2_AVR_EXECUTION_TIME_WEIGHT
# MSP weights
msp_code_size_weight = Constants.FOM2_MSP_CODE_SIZE_WEIGHT
msp_ram_weight = Constants.FOM2_MSP_RAM_WEIGHT
msp_execution_time_weight = Constants.FOM2_MSP_EXECUTION_TIME_WEIGHT
# ARM weights
arm_code_size_weight = Constants.FOM2_ARM_CODE_SIZE_WEIGHT
arm_ram_weight = Constants.FOM2_ARM_RAM_WEIGHT
arm_execution_time_weight = Constants.FOM2_ARM_EXECUTION_TIME_WEIGHT
# AVR
avr_fom_code_size = avr_code_size_weight * (avr_code_size / Constants.AVR_MAX_ROM)
avr_fom_ram = avr_ram_weight * (avr_ram / Constants.AVR_MAX_RAM)
avr_fom_execution_time = avr_execution_time_weight
avr_fom = avr_fom_code_size + avr_fom_ram + avr_fom_execution_time
# MSP
msp_fom_code_size = msp_code_size_weight * (msp_code_size / Constants.MSP_MAX_ROM)
msp_fom_ram = msp_ram_weight * (msp_ram / Constants.MSP_MAX_RAM)
msp_fom_execution_time = msp_execution_time_weight
msp_fom = msp_fom_code_size + msp_fom_ram + msp_fom_execution_time
# ARM
arm_fom_code_size = arm_code_size_weight * (arm_code_size / Constants.ARM_MAX_ROM)
arm_fom_ram = arm_ram_weight * (arm_ram / Constants.ARM_MAX_RAM)
arm_fom_execution_time = arm_execution_time_weight
arm_fom = arm_fom_code_size + arm_fom_ram + arm_fom_execution_time
if Constants.DEBUG_ON == Constants.DEBUG:
print(Constants.CIPHER_IMPLEMENTATION_FOM2_DETAILS.format(avr_fom_code_size,
avr_fom_ram,
avr_fom_execution_time,
avr_fom,
msp_fom_code_size,
msp_fom_ram,
msp_fom_execution_time,
msp_fom,
arm_fom_code_size,
arm_fom_ram,
arm_fom_execution_time,
arm_fom))
# FOM 2
self.fom2_avr = avr_fom
self.fom2_msp = msp_fom
self.fom2_arm = arm_fom
# AVR weights
avr_code_size_weight = Constants.FOM3_AVR_CODE_SIZE_WEIGHT
avr_ram_weight = Constants.FOM3_AVR_RAM_WEIGHT
avr_execution_time_weight = Constants.FOM3_AVR_EXECUTION_TIME_WEIGHT
# MSP weights
msp_code_size_weight = Constants.FOM3_MSP_CODE_SIZE_WEIGHT
msp_ram_weight = Constants.FOM3_MSP_RAM_WEIGHT
msp_execution_time_weight = Constants.FOM3_MSP_EXECUTION_TIME_WEIGHT
# ARM weights
arm_code_size_weight = Constants.FOM3_ARM_CODE_SIZE_WEIGHT
arm_ram_weight = Constants.FOM3_ARM_RAM_WEIGHT
arm_execution_time_weight = Constants.FOM3_ARM_EXECUTION_TIME_WEIGHT
# AVR
avr_fom_code_size = avr_code_size_weight
avr_fom_ram = avr_ram_weight
avr_fom_execution_time = avr_execution_time_weight * (avr_execution_time / avr_min_execution_time)
avr_fom = avr_fom_code_size + avr_fom_ram + avr_fom_execution_time
# MSP
msp_fom_code_size = msp_code_size_weight
msp_fom_ram = msp_ram_weight
msp_fom_execution_time = msp_execution_time_weight * (msp_execution_time / msp_min_execution_time)
msp_fom = msp_fom_code_size + msp_fom_ram + msp_fom_execution_time
# ARM
arm_fom_code_size = arm_code_size_weight
arm_fom_ram = arm_ram_weight
arm_fom_execution_time = arm_execution_time_weight * (arm_execution_time / arm_min_execution_time)
arm_fom = arm_fom_code_size + arm_fom_ram + arm_fom_execution_time
if Constants.DEBUG_ON == Constants.DEBUG:
print(Constants.CIPHER_IMPLEMENTATION_FOM3_DETAILS.format(avr_fom_code_size,
avr_fom_ram,
avr_fom_execution_time,
avr_fom,
msp_fom_code_size,
msp_fom_ram,
msp_fom_execution_time,
msp_fom,
arm_fom_code_size,
arm_fom_ram,
arm_fom_execution_time,
arm_fom))
# FOM 3
self.fom3_avr = avr_fom
self.fom3_msp = msp_fom
self.fom3_arm = arm_fom
|
bsd-3-clause
|
stewnorriss/LibCloud
|
libcloud/test/compute/test_ibm_sce.py
|
29
|
14632
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import unittest
from libcloud.utils.py3 import httplib
import sys
from libcloud.compute.types import InvalidCredsError
from libcloud.compute.drivers.ibm_sce import IBMNodeDriver as IBM
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import IBM_PARAMS
class IBMTests(unittest.TestCase, TestCaseMixin):
"""
Tests the IBM SmartCloud Enterprise driver.
"""
def setUp(self):
IBM.connectionCls.conn_classes = (None, IBMMockHttp)
IBMMockHttp.type = None
self.driver = IBM(*IBM_PARAMS)
def test_auth(self):
IBMMockHttp.type = 'UNAUTHORIZED'
try:
self.driver.list_nodes()
except InvalidCredsError:
e = sys.exc_info()[1]
self.assertTrue(isinstance(e, InvalidCredsError))
self.assertEqual(e.value, '401: Unauthorized')
else:
self.fail('test should have thrown')
def test_list_nodes(self):
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 3)
self.assertEqual(ret[0].id, '26557')
self.assertEqual(ret[0].name, 'Insight Instance')
self.assertEqual(ret[0].public_ips, ['129.33.196.128'])
self.assertEqual(ret[0].private_ips, []) # Private IPs not supported
self.assertEqual(ret[1].public_ips, []) # Node is non-active (no IP)
self.assertEqual(ret[1].private_ips, [])
self.assertEqual(ret[1].id, '28193')
def test_list_sizes(self):
ret = self.driver.list_sizes()
self.assertEqual(len(ret), 9) # 9 instance configurations supported
self.assertEqual(ret[0].id, 'BRZ32.1/2048/60*175')
self.assertEqual(ret[1].id, 'BRZ64.2/4096/60*500*350')
self.assertEqual(ret[2].id, 'COP32.1/2048/60')
self.assertEqual(ret[0].name, 'Bronze 32 bit')
self.assertEqual(ret[0].disk, None)
def test_list_images(self):
ret = self.driver.list_images()
self.assertEqual(len(ret), 21)
self.assertEqual(ret[10].name, "Rational Asset Manager 7.2.0.1")
self.assertEqual(ret[9].id, '10002573')
def test_list_locations(self):
ret = self.driver.list_locations()
self.assertEqual(len(ret), 6)
self.assertEqual(ret[0].id, '41')
self.assertEqual(ret[0].name, 'Raleigh')
self.assertEqual(ret[0].country, 'U.S.A')
def test_create_node(self):
# Test creation of node
IBMMockHttp.type = 'CREATE'
image = NodeImage(id=11, name='Rational Insight', driver=self.driver)
size = NodeSize('LARGE', 'LARGE', None, None, None, None, self.driver)
location = NodeLocation('1', 'POK', 'US', driver=self.driver)
ret = self.driver.create_node(name='RationalInsight4',
image=image,
size=size,
location=location,
publicKey='MyPublicKey',
configurationData={
'insight_admin_password': 'myPassword1',
'db2_admin_password': 'myPassword2',
'report_user_password': 'myPassword3'})
self.assertTrue(isinstance(ret, Node))
self.assertEqual(ret.name, 'RationalInsight4')
# Test creation attempt with invalid location
IBMMockHttp.type = 'CREATE_INVALID'
location = NodeLocation('3', 'DOESNOTEXIST', 'US', driver=self.driver)
try:
ret = self.driver.create_node(name='RationalInsight5',
image=image,
size=size,
location=location,
publicKey='MyPublicKey',
configurationData={
'insight_admin_password': 'myPassword1',
'db2_admin_password': 'myPassword2',
'report_user_password': 'myPassword3'})
except Exception:
e = sys.exc_info()[1]
self.assertEqual(e.args[0], 'Error 412: No DataCenter with id: 3')
else:
self.fail('test should have thrown')
def test_destroy_node(self):
# Delete existent node
nodes = self.driver.list_nodes() # retrieves 3 nodes
self.assertEqual(len(nodes), 3)
IBMMockHttp.type = 'DELETE'
toDelete = nodes[1]
ret = self.driver.destroy_node(toDelete)
self.assertTrue(ret)
# Delete non-existent node
IBMMockHttp.type = 'DELETED'
nodes = self.driver.list_nodes() # retrieves 2 nodes
self.assertEqual(len(nodes), 2)
try:
self.driver.destroy_node(toDelete) # delete non-existent node
except Exception:
e = sys.exc_info()[1]
self.assertEqual(e.args[0], 'Error 404: Invalid Instance ID 28193')
else:
self.fail('test should have thrown')
def test_reboot_node(self):
nodes = self.driver.list_nodes()
IBMMockHttp.type = 'REBOOT'
# Reboot active node
self.assertEqual(len(nodes), 3)
ret = self.driver.reboot_node(nodes[0])
self.assertTrue(ret)
# Reboot inactive node
try:
ret = self.driver.reboot_node(nodes[1])
except Exception:
e = sys.exc_info()[1]
self.assertEqual(
e.args[0], 'Error 412: Instance must be in the Active state')
else:
self.fail('test should have thrown')
def test_list_volumes(self):
ret = self.driver.list_volumes()
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0].name, 'libcloudvol')
self.assertEqual(ret[0].extra['location'], '141')
self.assertEqual(ret[0].size, '2048')
self.assertEqual(ret[0].id, '39281')
def test_attach_volume(self):
vols = self.driver.list_volumes()
nodes = self.driver.list_nodes()
IBMMockHttp.type = 'ATTACH'
ret = self.driver.attach_volume(nodes[0], vols[0])
self.assertTrue(ret)
def test_create_volume(self):
IBMMockHttp.type = 'CREATE'
ret = self.driver.create_volume('256',
'test-volume',
location='141',
format='RAW',
offering_id='20001208')
self.assertEqual(ret.id, '39293')
self.assertEqual(ret.size, '256')
self.assertEqual(ret.name, 'test-volume')
self.assertEqual(ret.extra['location'], '141')
def test_destroy_volume(self):
vols = self.driver.list_volumes()
IBMMockHttp.type = 'DESTROY'
ret = self.driver.destroy_volume(vols[0])
self.assertTrue(ret)
def test_ex_destroy_image(self):
image = self.driver.list_images()
IBMMockHttp.type = 'DESTROY'
ret = self.driver.ex_destroy_image(image[0])
self.assertTrue(ret)
def test_detach_volume(self):
nodes = self.driver.list_nodes()
vols = self.driver.list_volumes()
IBMMockHttp.type = 'DETACH'
ret = self.driver.detach_volume(nodes[0], vols[0])
self.assertTrue(ret)
def test_ex_allocate_address(self):
IBMMockHttp.type = 'ALLOCATE'
ret = self.driver.ex_allocate_address('141', '20001223')
self.assertEqual(ret.id, '292795')
self.assertEqual(ret.state, '0')
self.assertEqual(ret.options['location'], '141')
def test_ex_delete_address(self):
IBMMockHttp.type = 'DELETE'
ret = self.driver.ex_delete_address('292795')
self.assertTrue(ret)
def test_ex_list_addresses(self):
ret = self.driver.ex_list_addresses()
self.assertEqual(ret[0].ip, '170.225.160.218')
self.assertEqual(ret[0].options['location'], '141')
self.assertEqual(ret[0].id, '292795')
self.assertEqual(ret[0].state, '2')
def test_ex_list_storage_offerings(self):
ret = self.driver.ex_list_storage_offerings()
self.assertEqual(ret[0].name, 'Small')
self.assertEqual(ret[0].location, '61')
self.assertEqual(ret[0].id, '20001208')
class IBMMockHttp(MockHttp):
fixtures = ComputeFileFixtures('ibm_sce')
def _computecloud_enterprise_api_rest_20100331_instances(self, method, url, body, headers):
body = self.fixtures.load('instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_DELETED(self, method, url, body, headers):
body = self.fixtures.load('instances_deleted.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.UNAUTHORIZED])
def _computecloud_enterprise_api_rest_20100331_offerings_image(self, method, url, body, headers):
body = self.fixtures.load('images.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_locations(self, method, url, body, headers):
body = self.fixtures.load('locations.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_26557_REBOOT(self, method, url, body, headers):
body = self.fixtures.load('reboot_active.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_28193_REBOOT(self, method, url, body, headers):
return (412, 'Error 412: Instance must be in the Active state', {}, 'Precondition Failed')
def _computecloud_enterprise_api_rest_20100331_instances_28193_DELETE(self, method, url, body, headers):
body = self.fixtures.load('delete.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_28193_DELETED(self, method, url, body, headers):
return (404, 'Error 404: Invalid Instance ID 28193', {}, 'Precondition Failed')
def _computecloud_enterprise_api_rest_20100331_instances_CREATE(self, method, url, body, headers):
body = self.fixtures.load('create.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_CREATE_INVALID(self, method, url, body, headers):
return (412, 'Error 412: No DataCenter with id: 3', {}, 'Precondition Failed')
def _computecloud_enterprise_api_rest_20100331_storage(self, method, url, body, headers):
body = self.fixtures.load('list_volumes.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_26557_ATTACH(self, method, url, body, headers):
body = self.fixtures.load('attach_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_storage_CREATE(self, method, url, body, headers):
body = self.fixtures.load('create_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_storage_39281_DESTROY(self, method, url, body, headers):
body = self.fixtures.load('destroy_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_offerings_image_2_DESTROY(self, method, url, body, headers):
body = self.fixtures.load('destroy_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_26557_DETACH(self, method, url, body, headers):
body = self.fixtures.load('detach_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_addresses_ALLOCATE(self, method, url, body, headers):
body = self.fixtures.load('allocate_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_addresses_292795_DELETE(self, method, url, body, headers):
body = self.fixtures.load('delete_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_addresses(self, method, url, body, headers):
body = self.fixtures.load('list_addresses.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_offerings_storage(self, method, url, body, headers):
body = self.fixtures.load('list_storage_offerings.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
# This is only to accommodate the response tests built into test\__init__.py
def _computecloud_enterprise_api_rest_20100331_instances_26557(self, method, url, body, headers):
if method == 'DELETE':
body = self.fixtures.load('delete.xml')
else:
body = self.fixtures.load('reboot_active.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
apache-2.0
|
terryyin/linkchecker
|
third_party/dnspython/tests/message.py
|
68
|
5594
|
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import os
import unittest
import dns.exception
import dns.message
query_text = """id 1234
opcode QUERY
rcode NOERROR
flags RD
edns 0
eflags DO
payload 4096
;QUESTION
wwww.dnspython.org. IN A
;ANSWER
;AUTHORITY
;ADDITIONAL"""
goodhex = '04d201000001000000000001047777777709646e73707974686f6e' \
'036f726700000100010000291000000080000000'
goodwire = goodhex.decode('hex_codec')
answer_text = """id 1234
opcode QUERY
rcode NOERROR
flags QR AA RD
;QUESTION
dnspython.org. IN SOA
;ANSWER
dnspython.org. 3600 IN SOA woof.dnspython.org. hostmaster.dnspython.org. 2003052700 3600 1800 604800 3600
;AUTHORITY
dnspython.org. 3600 IN NS ns1.staff.nominum.org.
dnspython.org. 3600 IN NS ns2.staff.nominum.org.
dnspython.org. 3600 IN NS woof.play-bow.org.
;ADDITIONAL
woof.play-bow.org. 3600 IN A 204.152.186.150
"""
goodhex2 = '04d2 8500 0001 0001 0003 0001' \
'09646e73707974686f6e036f726700 0006 0001' \
'c00c 0006 0001 00000e10 0028 ' \
'04776f6f66c00c 0a686f73746d6173746572c00c' \
'7764289c 00000e10 00000708 00093a80 00000e10' \
'c00c 0002 0001 00000e10 0014' \
'036e7331057374616666076e6f6d696e756dc016' \
'c00c 0002 0001 00000e10 0006 036e7332c063' \
'c00c 0002 0001 00000e10 0010 04776f6f6608706c61792d626f77c016' \
'c091 0001 0001 00000e10 0004 cc98ba96'
goodwire2 = goodhex2.replace(' ', '').decode('hex_codec')
query_text_2 = """id 1234
opcode QUERY
rcode 4095
flags RD
edns 0
eflags DO
payload 4096
;QUESTION
wwww.dnspython.org. IN A
;ANSWER
;AUTHORITY
;ADDITIONAL"""
goodhex3 = '04d2010f0001000000000001047777777709646e73707974686f6e' \
'036f726700000100010000291000ff0080000000'
goodwire3 = goodhex3.decode('hex_codec')
class MessageTestCase(unittest.TestCase):
def test_comparison_eq1(self):
q1 = dns.message.from_text(query_text)
q2 = dns.message.from_text(query_text)
self.failUnless(q1 == q2)
def test_comparison_ne1(self):
q1 = dns.message.from_text(query_text)
q2 = dns.message.from_text(query_text)
q2.id = 10
self.failUnless(q1 != q2)
def test_comparison_ne2(self):
q1 = dns.message.from_text(query_text)
q2 = dns.message.from_text(query_text)
q2.question = []
self.failUnless(q1 != q2)
def test_comparison_ne3(self):
q1 = dns.message.from_text(query_text)
self.failUnless(q1 != 1)
def test_EDNS_to_wire1(self):
q = dns.message.from_text(query_text)
w = q.to_wire()
self.failUnless(w == goodwire)
def test_EDNS_from_wire1(self):
m = dns.message.from_wire(goodwire)
self.failUnless(str(m) == query_text)
def test_EDNS_to_wire2(self):
q = dns.message.from_text(query_text_2)
w = q.to_wire()
self.failUnless(w == goodwire3)
def test_EDNS_from_wire2(self):
m = dns.message.from_wire(goodwire3)
self.failUnless(str(m) == query_text_2)
def test_TooBig(self):
def bad():
q = dns.message.from_text(query_text)
for i in xrange(0, 25):
rrset = dns.rrset.from_text('foo%d.' % i, 3600,
dns.rdataclass.IN,
dns.rdatatype.A,
'10.0.0.%d' % i)
q.additional.append(rrset)
w = q.to_wire(max_size=512)
self.failUnlessRaises(dns.exception.TooBig, bad)
def test_answer1(self):
a = dns.message.from_text(answer_text)
wire = a.to_wire(want_shuffle=False)
self.failUnless(wire == goodwire2)
def test_TrailingJunk(self):
def bad():
badwire = goodwire + '\x00'
m = dns.message.from_wire(badwire)
self.failUnlessRaises(dns.message.TrailingJunk, bad)
def test_ShortHeader(self):
def bad():
badwire = '\x00' * 11
m = dns.message.from_wire(badwire)
self.failUnlessRaises(dns.message.ShortHeader, bad)
def test_RespondingToResponse(self):
def bad():
q = dns.message.make_query('foo', 'A')
r1 = dns.message.make_response(q)
r2 = dns.message.make_response(r1)
self.failUnlessRaises(dns.exception.FormError, bad)
def test_ExtendedRcodeSetting(self):
m = dns.message.make_query('foo', 'A')
m.set_rcode(4095)
self.failUnless(m.rcode() == 4095)
m.set_rcode(2)
self.failUnless(m.rcode() == 2)
def test_EDNSVersionCoherence(self):
m = dns.message.make_query('foo', 'A')
m.use_edns(1)
self.failUnless((m.ednsflags >> 16) & 0xFF == 1)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
moling3650/mblog
|
www/app/frame/__init__.py
|
1
|
4236
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-05-05 22:14:44
# @Author : moling ([email protected])
# @Link : #
# @Version : 0.1
import asyncio
import functools
import inspect
import logging
import os
from aiohttp import web
from .errors import APIError
# 工厂模式,生成GET、POST等请求方法的装饰器
def request(path, *, method):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.__method__ = method
wrapper.__route__ = path
return wrapper
return decorator
get = functools.partial(request, method='GET')
post = functools.partial(request, method='POST')
put = functools.partial(request, method='PUT')
delete = functools.partial(request, method='DELETE')
# RequestHandler目的就是从URL函数中分析其需要接收的参数,从request中获取必要的参数,
# URL函数不一定是一个coroutine,因此我们用RequestHandler()来封装一个URL处理函数。
# 调用URL函数,然后把结果转换为web.Response对象,这样,就完全符合aiohttp框架的要求:
class RequestHandler(object): # 初始化一个请求处理类
def __init__(self, func):
self._func = asyncio.coroutine(func)
async def __call__(self, request): # 任何类,只需要定义一个__call__()方法,就可以直接对实例进行调用
# 获取函数的参数表
required_args = inspect.signature(self._func).parameters
logging.info('required args: %s' % required_args)
# 获取从GET或POST传进来的参数值,如果函数参数表有这参数名就加入
kw = {arg: value for arg, value in request.__data__.items() if arg in required_args}
# 获取match_info的参数值,例如@get('/blog/{id}')之类的参数值
kw.update(request.match_info)
# 如果有request参数的话也加入
if 'request' in required_args:
kw['request'] = request
# 检查参数表中有没参数缺失
for key, arg in required_args.items():
# request参数不能为可变长参数
if key == 'request' and arg.kind in (arg.VAR_POSITIONAL, arg.VAR_KEYWORD):
return web.HTTPBadRequest(text='request parameter cannot be the var argument.')
# 如果参数类型不是变长列表和变长字典,变长参数是可缺省的
if arg.kind not in (arg.VAR_POSITIONAL, arg.VAR_KEYWORD):
# 如果还是没有默认值,而且还没有传值的话就报错
if arg.default == arg.empty and arg.name not in kw:
return web.HTTPBadRequest(text='Missing argument: %s' % arg.name)
logging.info('call with args: %s' % kw)
try:
return await self._func(**kw)
except APIError as e:
return dict(error=e.error, data=e.data, message=e.message)
# 添加一个模块的所有路由
def add_routes(app, module_name):
try:
mod = __import__(module_name, fromlist=['get_submodule'])
except ImportError as e:
raise e
# 遍历mod的方法和属性,主要是找处理方法
# 由于我们定义的处理方法,被@get或@post修饰过,所以方法里会有'__method__'和'__route__'属性
for attr in dir(mod):
# 如果是以'_'开头的,一律pass,我们定义的处理方法不是以'_'开头的
if attr.startswith('_'):
continue
# 获取到非'_'开头的属性或方法
func = getattr(mod, attr)
# 获取有__method___和__route__属性的方法
if callable(func) and hasattr(func, '__method__') and hasattr(func, '__route__'):
args = ', '.join(inspect.signature(func).parameters.keys())
logging.info('add route %s %s => %s(%s)' % (func.__method__, func.__route__, func.__name__, args))
app.router.add_route(func.__method__, func.__route__, RequestHandler(func))
# 添加静态文件夹的路径
def add_static(app):
path = os.path.join(os.path.dirname(__path__[0]), 'static')
app.router.add_static('/static/', path)
logging.info('add static %s => %s' % ('/static/', path))
|
mit
|
elggem/tensorflow_node
|
src/tensorflow_node/tests/test_inputlayer.py
|
2
|
3115
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging as log
import tensorflow as tf
import numpy as np
from tensorflow_node import AutoEncoderNode
from tensorflow_node import SummaryWriter
from tensorflow_node import OpenCVInputLayer
# this tests the cropping of the input layer.
class InputLayerTest(tf.test.TestCase):
def testInputLayer(self):
with self.test_session():
inputlayer = OpenCVInputLayer(output_size=(16, 16), batch_size=250)
data_a = np.floor(np.random.rand(250, 8, 8, 1) * 100)
data_b = np.floor(np.random.rand(250, 8, 8, 1) * 100)
data_c = np.floor(np.random.rand(250, 8, 8, 1) * 100)
data_d = np.floor(np.random.rand(250, 8, 8, 1) * 100)
data_ab = np.concatenate([data_a, data_b], axis=1)
data_cd = np.concatenate([data_c, data_d], axis=1)
data_ac = np.concatenate([data_a, data_c], axis=2)
data_bd = np.concatenate([data_b, data_d], axis=2)
data = np.concatenate([data_ab, data_cd], axis=2)
tensor_a = inputlayer.get_tensor_for_region([0, 0, 8, 8])
tensor_b = inputlayer.get_tensor_for_region([8, 0, 8, 8])
tensor_c = inputlayer.get_tensor_for_region([0, 8, 8, 8])
tensor_d = inputlayer.get_tensor_for_region([8, 8, 8, 8])
tensor_ab = inputlayer.get_tensor_for_region([0, 0, 16, 8])
tensor_cd = inputlayer.get_tensor_for_region([0, 8, 16, 8])
tensor_ac = inputlayer.get_tensor_for_region([0, 0, 8, 16])
tensor_bd = inputlayer.get_tensor_for_region([8, 0, 8, 16])
feed_dict = {inputlayer.name + "/input:0": data}
return_a = tensor_a.eval(feed_dict=feed_dict)
return_b = tensor_b.eval(feed_dict=feed_dict)
return_c = tensor_c.eval(feed_dict=feed_dict)
return_d = tensor_d.eval(feed_dict=feed_dict)
return_ab = tensor_ab.eval(feed_dict=feed_dict)
return_cd = tensor_cd.eval(feed_dict=feed_dict)
return_ac = tensor_ac.eval(feed_dict=feed_dict)
return_bd = tensor_bd.eval(feed_dict=feed_dict)
reshaped_a = return_a.reshape(data_a.shape)
reshaped_b = return_b.reshape(data_b.shape)
reshaped_c = return_c.reshape(data_c.shape)
reshaped_d = return_d.reshape(data_d.shape)
reshaped_ab = return_ab.reshape(data_ab.shape)
reshaped_cd = return_cd.reshape(data_cd.shape)
reshaped_ac = return_ac.reshape(data_ac.shape)
reshaped_bd = return_bd.reshape(data_bd.shape)
# compare elementwise...
assert((data_a == reshaped_a).all())
assert((data_b == reshaped_b).all())
assert((data_c == reshaped_c).all())
assert((data_d == reshaped_d).all())
assert((data_ab == reshaped_ab).all())
assert((data_cd == reshaped_cd).all())
assert((data_ac == reshaped_ac).all())
assert((data_bd == reshaped_bd).all())
if __name__ == '__main__':
tf.test.main()
|
unlicense
|
papedaniel/oioioi
|
oioioi/oi/controllers.py
|
1
|
11036
|
from django.shortcuts import redirect
from django.template import RequestContext
from django.template.loader import render_to_string
from django.template.response import TemplateResponse
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from oioioi.base.utils.redirect import safe_redirect
from oioioi.contests.controllers import PublicContestRegistrationController, \
PastRoundsHiddenContestControllerMixin
from oioioi.contests.models import Submission, SubmissionReport
from oioioi.contests.utils import is_contest_admin, is_contest_observer, \
can_see_personal_data
from oioioi.programs.controllers import ProgrammingContestController
from oioioi.participants.controllers import ParticipantsController
from oioioi.participants.models import Participant
from oioioi.participants.utils import is_participant
from oioioi.oi.models import OIRegistration, OIOnsiteRegistration
from oioioi.spliteval.controllers import SplitEvalContestControllerMixin
from oioioi.scoresreveal.utils import is_revealed
class OIRegistrationController(ParticipantsController):
@property
def form_class(self):
from oioioi.oi.forms import OIRegistrationForm
return OIRegistrationForm
@property
def participant_admin(self):
from oioioi.oi.admin import OIRegistrationParticipantAdmin
return OIRegistrationParticipantAdmin
def anonymous_can_enter_contest(self):
return True
def can_enter_contest(self, request):
return True
def can_register(self, request):
return True
def can_unregister(self, request, participant):
return False
def registration_view(self, request):
participant = self._get_participant_for_form(request)
if 'oi_oiregistrationformdata' in request.session:
# pylint: disable=not-callable
form = self.form_class(request.session[
'oi_oiregistrationformdata'])
del request.session['oi_oiregistrationformdata']
else:
form = self.get_form(request, participant)
if request.method == 'POST':
if '_add_school' in request.POST:
data = request.POST.copy()
data.pop('_add_school', None)
data.pop('csrfmiddlewaretoken', None)
request.session['oi_oiregistrationformdata'] = data
return redirect('add_school')
elif form.is_valid(): # pylint: disable=maybe-no-member
participant, created = Participant.objects \
.get_or_create(contest=self.contest, user=request.user)
self.handle_validated_form(request, form, participant)
if 'next' in request.GET:
return safe_redirect(request, request.GET['next'])
else:
return redirect('default_contest_view',
contest_id=self.contest.id)
context = {'form': form, 'participant': participant}
return TemplateResponse(request, self.registration_template, context)
def get_contest_participant_info_list(self, request, user):
prev = super(OIRegistrationController, self) \
.get_contest_participant_info_list(request, user)
if can_see_personal_data(request):
sensitive_info = OIRegistration.objects.filter(
participant__user=user,
participant__contest=request.contest)
if sensitive_info.exists():
context = {'model': sensitive_info[0]}
rendered_sensitive_info = render_to_string(
'oi/sensitive_participant_info.html',
context_instance=RequestContext(request, context))
prev.append((2, rendered_sensitive_info))
return prev
class OIContestController(ProgrammingContestController):
description = _("Polish Olympiad in Informatics - Online")
create_forum = True
def fill_evaluation_environ(self, environ, submission):
super(OIContestController, self) \
.fill_evaluation_environ(environ, submission)
environ['group_scorer'] = 'oioioi.programs.utils.min_group_scorer'
environ['test_scorer'] = \
'oioioi.programs.utils.threshold_linear_test_scorer'
def registration_controller(self):
return OIRegistrationController(self.contest)
def can_submit(self, request, problem_instance, check_round_times=True):
if request.user.is_anonymous():
return False
if request.user.has_perm('contests.contest_admin', self.contest):
return True
if not is_participant(request):
return False
return super(OIContestController, self) \
.can_submit(request, problem_instance, check_round_times)
def can_see_stats(self, request):
return is_contest_admin(request) or is_contest_observer(request)
def should_confirm_submission_receipt(self, request, submission):
return submission.kind == 'NORMAL' and request.user == submission.user
def update_user_result_for_problem(self, result):
try:
latest_submission = Submission.objects \
.filter(problem_instance=result.problem_instance) \
.filter(user=result.user) \
.filter(score__isnull=False) \
.exclude(status='CE') \
.filter(kind='NORMAL') \
.latest()
try:
report = SubmissionReport.objects.get(
submission=latest_submission, status='ACTIVE',
kind='NORMAL')
except SubmissionReport.DoesNotExist:
report = None
result.score = latest_submission.score
result.status = latest_submission.status
result.submission_report = report
except Submission.DoesNotExist:
result.score = None
result.status = None
result.submission_report = None
def can_see_ranking(self, request):
return is_contest_admin(request) or is_contest_observer(request)
def default_contestlogo_url(self):
return '%(url)soi/logo.png' % {'url': settings.STATIC_URL}
def default_contesticons_urls(self):
return ['%(url)simages/menu/menu-icon-%(i)d.png' %
{'url': settings.STATIC_URL, 'i': i} for i in range(1, 4)]
OIContestController.mix_in(SplitEvalContestControllerMixin)
class OIOnsiteRegistrationController(ParticipantsController):
@property
def participant_admin(self):
from oioioi.oi.admin import OIOnsiteRegistrationParticipantAdmin
return OIOnsiteRegistrationParticipantAdmin
def get_model_class(self):
return OIOnsiteRegistration
def can_register(self, request):
return False
def can_edit_registration(self, request, participant):
return False
def get_contest_participant_info_list(self, request, user):
prev = super(OIOnsiteRegistrationController, self) \
.get_contest_participant_info_list(request, user)
info = OIOnsiteRegistration.objects.filter(participant__user=user,
participant__contest=request.contest)
if info.exists():
context = {'model': info[0]}
rendered_info = render_to_string('oi/participant_info.html',
context_instance=RequestContext(request, context))
prev.append((98, rendered_info))
return prev
class OIOnsiteContestController(OIContestController):
description = _("Polish Olympiad in Informatics - Onsite")
create_forum = False
def registration_controller(self):
return OIOnsiteRegistrationController(self.contest)
def should_confirm_submission_receipt(self, request, submission):
return False
def is_onsite(self):
return True
OIOnsiteContestController.mix_in(PastRoundsHiddenContestControllerMixin)
class BOIOnsiteContestController(OIOnsiteContestController):
description = _("Baltic Olympiad in Informatics")
create_forum = False
def can_see_test_comments(self, request, submissionreport):
submission = submissionreport.submission
return is_contest_admin(request) or \
self.results_visible(request, submission)
def reveal_score(self, request, submission):
super(BOIOnsiteContestController, self).reveal_score(request,
submission)
self.update_user_results(submission.user, submission.problem_instance)
def update_user_result_for_problem(self, result):
try:
submissions = Submission.objects \
.filter(problem_instance=result.problem_instance) \
.filter(user=result.user) \
.filter(score__isnull=False) \
.exclude(status='CE') \
.filter(kind='NORMAL')
chosen_submission = submissions.latest()
revealed = submissions.filter(revealed__isnull=False)
if revealed:
max_revealed = revealed.order_by('-score')[0]
if max_revealed.score > chosen_submission.score:
chosen_submission = max_revealed
try:
report = SubmissionReport.objects.get(
submission=chosen_submission, status='ACTIVE',
kind='NORMAL')
except SubmissionReport.DoesNotExist:
report = None
result.score = chosen_submission.score
result.status = chosen_submission.status
result.submission_report = report
except Submission.DoesNotExist:
result.score = None
result.status = None
result.submission_report = None
def get_visible_reports_kinds(self, request, submission):
if is_revealed(submission) or \
self.results_visible(request, submission):
return ['USER_OUTS', 'INITIAL', 'NORMAL']
else:
return ['USER_OUTS', 'INITIAL']
def can_print_files(self, request):
return True
def can_see_ranking(self, request):
return True
def default_contestlogo_url(self):
return None
def default_contesticons_urls(self):
return []
def fill_evaluation_environ(self, environ, submission):
super(BOIOnsiteContestController, self) \
.fill_evaluation_environ(environ, submission)
environ['test_scorer'] = 'oioioi.programs.utils.discrete_test_scorer'
class BOIOnlineContestController(BOIOnsiteContestController):
description = _("Baltic Olympiad in Informatics - online")
create_forum = False
def registration_controller(self):
return PublicContestRegistrationController(self.contest)
def is_onsite(self):
return False
def can_see_ranking(self, request):
return True
|
gpl-3.0
|
NeCTAR-RC/neutron
|
neutron/plugins/cisco/common/cisco_exceptions.py
|
43
|
8481
|
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Exceptions used by the Cisco plugin."""
from neutron.common import exceptions
class NetworkSegmentIDNotFound(exceptions.NeutronException):
"""Segmentation ID for network is not found."""
message = _("Segmentation ID for network %(net_id)s is not found.")
class NoMoreNics(exceptions.NeutronException):
"""No more dynamic NICs are available in the system."""
message = _("Unable to complete operation. No more dynamic NICs are "
"available in the system.")
class NetworkVlanBindingAlreadyExists(exceptions.NeutronException):
"""Binding cannot be created, since it already exists."""
message = _("NetworkVlanBinding for %(vlan_id)s and network "
"%(network_id)s already exists.")
class VlanIDNotFound(exceptions.NeutronException):
"""VLAN ID cannot be found."""
message = _("Vlan ID %(vlan_id)s not found.")
class VlanIDOutsidePool(exceptions.NeutronException):
"""VLAN ID cannot be allocated, since it is outside the configured pool."""
message = _("Unable to complete operation. VLAN ID exists outside of the "
"configured network segment range.")
class VlanIDNotAvailable(exceptions.NeutronException):
"""No VLAN ID available."""
message = _("No Vlan ID available.")
class QosNotFound(exceptions.NeutronException):
"""QoS level with this ID cannot be found."""
message = _("QoS level %(qos_id)s could not be found "
"for tenant %(tenant_id)s.")
class QosNameAlreadyExists(exceptions.NeutronException):
"""QoS Name already exists."""
message = _("QoS level with name %(qos_name)s already exists "
"for tenant %(tenant_id)s.")
class CredentialNotFound(exceptions.NeutronException):
"""Credential with this ID cannot be found."""
message = _("Credential %(credential_id)s could not be found.")
class CredentialNameNotFound(exceptions.NeutronException):
"""Credential Name could not be found."""
message = _("Credential %(credential_name)s could not be found.")
class CredentialAlreadyExists(exceptions.NeutronException):
"""Credential already exists."""
message = _("Credential %(credential_name)s already exists.")
class ProviderNetworkExists(exceptions.NeutronException):
"""Provider network already exists."""
message = _("Provider network %s already exists")
class NexusComputeHostNotConfigured(exceptions.NeutronException):
"""Connection to compute host is not configured."""
message = _("Connection to %(host)s is not configured.")
class NexusConnectFailed(exceptions.NeutronException):
"""Failed to connect to Nexus switch."""
message = _("Unable to connect to Nexus %(nexus_host)s. Reason: %(exc)s.")
class NexusConfigFailed(exceptions.NeutronException):
"""Failed to configure Nexus switch."""
message = _("Failed to configure Nexus: %(config)s. Reason: %(exc)s.")
class NexusPortBindingNotFound(exceptions.NeutronException):
"""NexusPort Binding is not present."""
message = _("Nexus Port Binding (%(filters)s) is not present.")
def __init__(self, **kwargs):
filters = ','.join('%s=%s' % i for i in kwargs.items())
super(NexusPortBindingNotFound, self).__init__(filters=filters)
class NoNexusSviSwitch(exceptions.NeutronException):
"""No usable nexus switch found."""
message = _("No usable Nexus switch found to create SVI interface.")
class PortVnicBindingAlreadyExists(exceptions.NeutronException):
"""PortVnic Binding already exists."""
message = _("PortVnic Binding %(port_id)s already exists.")
class PortVnicNotFound(exceptions.NeutronException):
"""PortVnic Binding is not present."""
message = _("PortVnic Binding %(port_id)s is not present.")
class SubnetNotSpecified(exceptions.NeutronException):
"""Subnet id not specified."""
message = _("No subnet_id specified for router gateway.")
class SubnetInterfacePresent(exceptions.NeutronException):
"""Subnet SVI interface already exists."""
message = _("Subnet %(subnet_id)s has an interface on %(router_id)s.")
class PortIdForNexusSvi(exceptions.NeutronException):
"""Port Id specified for Nexus SVI."""
message = _('Nexus hardware router gateway only uses Subnet Ids.')
class InvalidDetach(exceptions.NeutronException):
message = _("Unable to unplug the attachment %(att_id)s from port "
"%(port_id)s for network %(net_id)s. The attachment "
"%(att_id)s does not exist.")
class PolicyProfileAlreadyExists(exceptions.NeutronException):
"""Policy Profile cannot be created since it already exists."""
message = _("Policy Profile %(profile_id)s "
"already exists.")
class PolicyProfileIdNotFound(exceptions.NotFound):
"""Policy Profile with the given UUID cannot be found."""
message = _("Policy Profile %(profile_id)s could not be found.")
class PolicyProfileNameNotFound(exceptions.NotFound):
"""Policy Profile with the given name cannot be found."""
message = _("Policy Profile %(profile_name)s could not be found.")
class NetworkProfileAlreadyExists(exceptions.NeutronException):
"""Network Profile cannot be created since it already exists."""
message = _("Network Profile %(profile_id)s "
"already exists.")
class NetworkProfileNotFound(exceptions.NotFound):
"""Network Profile with the given UUID/name cannot be found."""
message = _("Network Profile %(profile)s could not be found.")
class NetworkProfileInUse(exceptions.InUse):
"""Network Profile with the given UUID is in use."""
message = _("One or more network segments belonging to network "
"profile %(profile)s is in use.")
class NoMoreNetworkSegments(exceptions.NoNetworkAvailable):
"""Network segments exhausted for the given network profile."""
message = _("No more segments available in network segment pool "
"%(network_profile_name)s.")
class VMNetworkNotFound(exceptions.NotFound):
"""VM Network with the given name cannot be found."""
message = _("VM Network %(name)s could not be found.")
class VxlanIDInUse(exceptions.InUse):
"""VXLAN ID is in use."""
message = _("Unable to create the network. "
"The VXLAN ID %(vxlan_id)s is in use.")
class VxlanIDNotFound(exceptions.NotFound):
"""VXLAN ID cannot be found."""
message = _("Vxlan ID %(vxlan_id)s not found.")
class VxlanIDOutsidePool(exceptions.NeutronException):
"""VXLAN ID cannot be allocated, as it is outside the configured pool."""
message = _("Unable to complete operation. VXLAN ID exists outside of the "
"configured network segment range.")
class VSMConnectionFailed(exceptions.ServiceUnavailable):
"""Connection to VSM failed."""
message = _("Connection to VSM failed: %(reason)s.")
class VSMError(exceptions.NeutronException):
"""Error has occurred on the VSM."""
message = _("Internal VSM Error: %(reason)s.")
class NetworkBindingNotFound(exceptions.NotFound):
"""Network Binding for network cannot be found."""
message = _("Network Binding for network %(network_id)s could "
"not be found.")
class PortBindingNotFound(exceptions.NotFound):
"""Port Binding for port cannot be found."""
message = _("Port Binding for port %(port_id)s could "
"not be found.")
class ProfileTenantBindingNotFound(exceptions.NotFound):
"""Profile to Tenant binding for given profile ID cannot be found."""
message = _("Profile-Tenant binding for profile %(profile_id)s could "
"not be found.")
class NoClusterFound(exceptions.NotFound):
"""No service cluster found to perform multi-segment bridging."""
message = _("No service cluster found to perform multi-segment bridging.")
|
apache-2.0
|
harshita-gupta/Harvard-FRSEM-Catalog-2016-17
|
flask/lib/python2.7/site-packages/pip/_vendor/colorama/initialise.py
|
485
|
1917
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import atexit
import contextlib
import sys
from .ansitowin32 import AnsiToWin32
orig_stdout = None
orig_stderr = None
wrapped_stdout = None
wrapped_stderr = None
atexit_done = False
def reset_all():
if AnsiToWin32 is not None: # Issue #74: objects might become None at exit
AnsiToWin32(orig_stdout).reset_all()
def init(autoreset=False, convert=None, strip=None, wrap=True):
if not wrap and any([autoreset, convert, strip]):
raise ValueError('wrap=False conflicts with any other arg=True')
global wrapped_stdout, wrapped_stderr
global orig_stdout, orig_stderr
orig_stdout = sys.stdout
orig_stderr = sys.stderr
if sys.stdout is None:
wrapped_stdout = None
else:
sys.stdout = wrapped_stdout = \
wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
if sys.stderr is None:
wrapped_stderr = None
else:
sys.stderr = wrapped_stderr = \
wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
global atexit_done
if not atexit_done:
atexit.register(reset_all)
atexit_done = True
def deinit():
if orig_stdout is not None:
sys.stdout = orig_stdout
if orig_stderr is not None:
sys.stderr = orig_stderr
@contextlib.contextmanager
def colorama_text(*args, **kwargs):
init(*args, **kwargs)
try:
yield
finally:
deinit()
def reinit():
if wrapped_stdout is not None:
sys.stdout = wrapped_stdout
if wrapped_stderr is not None:
sys.stderr = wrapped_stderr
def wrap_stream(stream, convert, strip, autoreset, wrap):
if wrap:
wrapper = AnsiToWin32(stream,
convert=convert, strip=strip, autoreset=autoreset)
if wrapper.should_wrap():
stream = wrapper.stream
return stream
|
mit
|
ikoula/cloudstack
|
test/integration/smoke/test_reset_vm_on_reboot.py
|
3
|
5578
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for reset Vm on reboot
"""
#Import Local Modules
import marvin
from marvin.codes import FAILED
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
_multiprocess_shared_ = True
class TestResetVmOnReboot(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestResetVmOnReboot, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
domain = get_domain(cls.apiclient)
zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.services['mode'] = zone.networktype
template = get_template(
cls.apiclient,
zone.id,
cls.services["ostype"]
)
if template == FAILED:
assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
# Set Zones and disk offerings ??
cls.services["small"]["zoneid"] = zone.id
cls.services["small"]["template"] = template.id
# Create account, service offerings, vm.
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=domain.id
)
cls.small_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["small"],
isvolatile="true"
)
#create a virtual machine
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services["small"],
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.small_offering.id,
mode=cls.services["mode"]
)
cls._cleanup = [
cls.small_offering,
cls.account
]
@classmethod
def tearDownClass(cls):
cls.apiclient = super(TestResetVmOnReboot, cls).getClsTestClient().getApiClient()
cleanup_resources(cls.apiclient, cls._cleanup)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
#Clean up, terminate the created ISOs
cleanup_resources(self.apiclient, self.cleanup)
return
@attr(hypervisor="xenserver")
@attr(tags=["advanced", "basic"], required_hardware="false")
def test_01_reset_vm_on_reboot(self):
#TODO: SIMENH: add new test to check volume contents
"""Test reset virtual machine on reboot
"""
# Validate the following
# create vm and list the volume for that VM. Reboot vm and check if root volume is different as before.
volumelist_before_reboot = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertNotEqual(
volumelist_before_reboot,
None,
"Check if volume is in listvolumes"
)
volume_before_reboot = volumelist_before_reboot[0]
self.debug("Rebooting vm %s " % (self.virtual_machine.id))
cmd = rebootVirtualMachine.rebootVirtualMachineCmd()
cmd.id = self.virtual_machine.id
self.apiclient.rebootVirtualMachine(cmd)
volumelist_after_reboot = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertNotEqual(
volumelist_after_reboot,
None,
"Check if volume is in listvolumes"
)
volume_after_reboot = volumelist_after_reboot[0]
self.assertNotEqual(
volume_after_reboot.id,
volume_before_reboot.id,
"Check whether volumes are different before and after reboot"
)
list_vm_response = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_vm_response,
None,
"Check virtual machine is listVirtualMachines"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
'Running',
"Check the state of VM"
)
return
|
gpl-2.0
|
reinout/django
|
tests/fixtures/models.py
|
57
|
3234
|
"""
Fixtures.
Fixtures are a way of loading data into the database in bulk. Fixure data
can be stored in any serializable format (including JSON and XML). Fixtures
are identified by name, and are stored in either a directory named 'fixtures'
in the application directory, or in one of the directories named in the
``FIXTURE_DIRS`` setting.
"""
import uuid
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Category(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
def __str__(self):
return self.headline
class Meta:
ordering = ('-pub_date', 'headline')
class Blog(models.Model):
name = models.CharField(max_length=100)
featured = models.ForeignKey(Article, models.CASCADE, related_name='fixtures_featured_set')
articles = models.ManyToManyField(Article, blank=True,
related_name='fixtures_articles_set')
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=100)
tagged_type = models.ForeignKey(ContentType, models.CASCADE, related_name="fixtures_tag_set")
tagged_id = models.PositiveIntegerField(default=0)
tagged = GenericForeignKey(ct_field='tagged_type', fk_field='tagged_id')
def __str__(self):
return '<%s: %s> tagged "%s"' % (self.tagged.__class__.__name__,
self.tagged, self.name)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
def natural_key(self):
return (self.name,)
class SpyManager(PersonManager):
def get_queryset(self):
return super().get_queryset().filter(cover_blown=False)
class Spy(Person):
objects = SpyManager()
cover_blown = models.BooleanField(default=False)
class ProxySpy(Spy):
class Meta:
proxy = True
class Visa(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
permissions = models.ManyToManyField(Permission, blank=True)
def __str__(self):
return '%s %s' % (self.person.name,
', '.join(p.name for p in self.permissions.all()))
class Book(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Person)
def __str__(self):
authors = ' and '.join(a.name for a in self.authors.all())
return '%s by %s' % (self.name, authors) if authors else self.name
class Meta:
ordering = ('name',)
class PrimaryKeyUUIDModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
|
bsd-3-clause
|
MihaZelnik/meepleAPI
|
meeple/apps/api/views/mechanic.py
|
1
|
1447
|
from django.conf.urls import url
from apps.api.models import Mechanic
from apps.api.serializers.mechanic import MechanicSerializer, MechanicListSerializer
from apps.api.views.helpers import BaseGameView, BaseExpansionsView, BaseListView, BaseDetailView
class MechanicListView(BaseListView):
"""
Get list of mechanics
"""
queryset = Mechanic.objects.all()
serializer_class = MechanicListSerializer
resource = 'mechanics'
class MechanicDetailView(BaseDetailView):
"""
Get mechanic by ID
"""
queryset = Mechanic.objects.all()
serializer_class = MechanicSerializer
resource = 'mechanics'
class MechanicGamesView(BaseGameView):
"""
Get list of games with this mechanic
"""
klass = Mechanic
resource = 'mechanics'
class MechanicExpansionsView(BaseExpansionsView):
"""
Get list of expansions with this mechanic
"""
klass = Mechanic
resource = 'mechanics'
mechanic_list = MechanicListView.as_view()
mechanic_detail = MechanicDetailView.as_view()
mechanic_games = MechanicGamesView.as_view()
mechanic_expansions = MechanicExpansionsView.as_view()
urlpatterns = [
url(r'^$', mechanic_list, name='mechanic-list'),
url(r'^(?P<pk>[0-9]+)/$', mechanic_detail, name='mechanic-detail'),
url(r'^(?P<pk>[0-9]+)/games/$', mechanic_games, name='mechanic-games'),
url(r'^(?P<pk>[0-9]+)/expanisons/$', mechanic_expansions, name='mechanic-expansions'),
]
|
bsd-3-clause
|
cxxgtxy/tensorflow
|
tensorflow/contrib/distributions/python/ops/inverse_gamma.py
|
64
|
10410
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The InverseGamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = [
"InverseGamma",
"InverseGammaWithSoftplusConcentrationRate",
]
class InverseGamma(distribution.Distribution):
"""InverseGamma distribution.
The `InverseGamma` distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `rate` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(-alpha - 1) exp(-beta / x) / Z
Z = Gamma(alpha) beta**-alpha
```
where:
* `concentration = alpha`,
* `rate = beta`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta / x) / Gamma(alpha)
```
where `GammaInc` is the [upper incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and stddev,
```none
concentration = alpha = (mean / stddev)**2
rate = beta = mean / stddev**2
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
WARNING: This distribution may draw 0-valued samples for small concentration
values. See note in `tf.random_gamma` docstring.
#### Examples
```python
dist = InverseGamma(concentration=3.0, rate=2.0)
dist2 = InverseGamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
"""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGamma"):
"""Construct InverseGamma with `concentration` and `rate` parameters.
The parameters `concentration` and `rate` must be shaped in a way that
supports broadcasting (e.g. `concentration + rate` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
rate: Floating point tensor, the inverse scale params of the
distribution(s). Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]):
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
] if validate_args else []):
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
check_ops.assert_same_float_dtype(
[self._concentration, self._rate])
super(InverseGamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._rate],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("concentration", "rate"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.concentration),
array_ops.shape(self.rate))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.concentration.get_shape(),
self.rate.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(
"""Note: See `tf.random_gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return 1. / random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
beta=self.rate,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
def _cdf(self, x):
x = self._maybe_assert_valid_sample(x)
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
return math_ops.igammac(self.concentration, self.rate / x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return -(1. + self.concentration) * math_ops.log(x) - self.rate / x
def _log_normalization(self):
return (math_ops.lgamma(self.concentration)
- self.concentration * math_ops.log(self.rate))
def _entropy(self):
return (self.concentration
+ math_ops.log(self.rate)
+ math_ops.lgamma(self.concentration)
- ((1. + self.concentration) *
math_ops.digamma(self.concentration)))
@distribution_util.AppendDocstring(
"""The mean of an inverse gamma distribution is
`rate / (concentration - 1)`, when `concentration > 1`, and `NaN`
otherwise. If `self.allow_nan_stats` is `False`, an exception will be
raised rather than returning `NaN`""")
def _mean(self):
mean = self.rate / (self.concentration - 1.)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 1., mean, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype), self.concentration,
message="mean undefined when any concentration <= 1"),
], mean)
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `concentration > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
var = (math_ops.square(self.rate)
/ math_ops.square(self.concentration - 1.)
/ (self.concentration - 2.))
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 2., var, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
constant_op.constant(2., dtype=self.dtype),
self.concentration,
message="variance undefined when any concentration <= 2"),
], var)
@distribution_util.AppendDocstring(
"""The mode of an inverse gamma distribution is `rate / (concentration +
1)`.""")
def _mode(self):
return self.rate / (1. + self.concentration)
def _maybe_assert_valid_sample(self, x):
check_ops.assert_same_float_dtype(
tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
], x)
class InverseGammaWithSoftplusConcentrationRate(InverseGamma):
"""`InverseGamma` with softplus of `concentration` and `rate`."""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusConcentrationRate"):
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]):
super(InverseGammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
apache-2.0
|
wavelets/hebel
|
hebel/parameter_updaters.py
|
3
|
3771
|
# Copyright (C) 2013 Hannes Bretschneider
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
""" Implements different variants of updating the parameters in SGD,
such as momentum and Nesterov momentum.
"""
from pycuda import gpuarray
from itertools import izip
class ParameterUpdater(object):
def __init__(self, model):
self.model = model
def pre_gradient_update(self, stream=None):
pass
def post_gradient_update(self, gradients, stream=None):
pass
class SimpleSGDUpdate(ParameterUpdater):
def post_gradient_update(self, gradients, batch_size,
learning_parameters,
stream=None):
learning_rate = learning_parameters[0]
multiplier = [-lr_mult * learning_rate / batch_size for lr_mult in
self.model.lr_multiplier]
update = zip(gradients, multiplier)
self.model.update_parameters(update)
class MomentumUpdate(ParameterUpdater):
def __init__(self, model):
self.model = model
self.velocity = [gpuarray.zeros_like(p)
for p in self.model.parameters]
def post_gradient_update(self, gradients, batch_size,
learning_parameters, stream=None):
learning_rate, momentum = learning_parameters
updates = []
for gparam, vparam, lr_multiplier in \
izip(gradients, self.velocity, self.model.lr_multiplier):
vparam._axpbyz(momentum,
gparam, -learning_rate * lr_multiplier / batch_size,
vparam, stream=stream)
updates.append((vparam, 1.))
self.model.update_parameters(updates)
class NesterovMomentumUpdate(MomentumUpdate):
def pre_gradient_update(self):
""" First step of Nesterov momentum method:
take step in direction of accumulated gradient
"""
updates = zip(self.velocity, self.model.n_parameters * [1.])
self.model.update_parameters(updates)
def post_gradient_update(self, gradients, batch_size,
learning_parameters, stream=None):
""" Second step of Nesterov momentum method:
take step in direction of new gradient and update velocity
"""
learning_rate, momentum = learning_parameters
updates = []
for param, gparam, vparam, lr_multiplier in \
izip(self.model.parameters, gradients,
self.velocity, self.model.lr_multiplier):
updates.append(
(gparam, -learning_rate * lr_multiplier / batch_size))
# param -= learning_rate*lr_multiplier/batch_size*gparam
# param._axpbyz(1., gparam, -learning_rate*lr_multiplier/batch_size,
# param, stream=stream)
# vparam = momentum*vparam \
# - learning_rate*lr_multiplier/batch_size*gparam
vparam._axpbyz(momentum, gparam, -learning_rate*lr_multiplier/batch_size,
vparam, stream=stream)
self.model.update_parameters(updates)
|
gpl-2.0
|
IndianHillsLawrence/saferoutes2school
|
simple.py
|
1
|
34186
|
import psycopg2
import pprint
conn = psycopg2.connect("dbname=indianhills user=mdupont")
import sys
#cur2 = conn.cursor()
#cur3 = conn.cursor()
def return_list(sql) :
cur = conn.cursor()
try :
cur.execute(sql)
except Exception as e:
print sql
print e
raise e
ret = []
for x in cur:
ret.append( x[0])
#pprint.pprint( ret)
#print "SQL %s got count %s" % (sql, len(ret))
return ret
def reflect(c):
p = 0
for x in c.description:
print "row[\"%s\"]=s[%s]" % (x.name, p)
p = p + 1
def schools():
schools = conn.cursor()
# first select all the schools
schools.execute("""select
osm_id,
"isced:level",
"addr:housename",
"addr:housenumber",
name,
ST_asText(ST_Centroid(ST_Transform(way,4326))) as the_geom
from
planet_osm_point
where "isced:level" is not null;
""")
school_data = {}
for s in schools:
row = {}
#print s
row["osm_id"]=s[0]
row["isced:level"]=s[1]
row["addr:housename"]=s[2]
row["addr:housenumber"]=s[3]
row["name"]=s[4]
row["geom_text"]=s[5]
school_data[row["name"]]=row
return school_data
def routing_point_for_school(geom_text, osm_id ):
"""
We find the closest point in the routine matrix for each school
"""
dist = """
round(
CAST(
ST_Distance_Sphere(
ST_Centroid(the_geom),
ST_GeomFromText('%s')
)
As numeric)
,2)
""" % geom_text
#print "DIST", dist
sql = """SELECT *,
%s As dist_meters
FROM ways_vertices_pgr
order by %s
limit 1;""" % (dist,dist)
cur = conn.cursor()
#print sql
cur.execute(sql)
#reflect(cur)
#if len(cur):
# try:
for s in cur:
row = {}
row["id"]=s[0] # the routing id
row["cnt"]=s[1]
row["chk"]=s[2]
row["ein"]=s[3]
row["eout"]=s[4]
row["the_geom"]=s[5] # geometry of routing point
row["dist_meters"]=s[6] # the distance
return row
def find_route(bid, aid):
cur2 = conn.cursor()
cur2.execute("""
SELECT * FROM pgr_dijkstra(
'SELECT gid AS id, source::integer as source, target::integer as target, length::double precision AS cost, the_geom FROM ways',
%s, %s, false, false)
""" % (aid, bid));
total = 0
legs = []
for r2 in cur2:
leg = {
'row_seq' : r2[0],
'row_id1' : r2[1],
'row_id2' : r2[2],
'row_cost': r2[3]
}
legs.append(leg)
total = total + float(leg['row_cost'])
return {
'from' : aid,
'to' : bid,
'total' : total,
'legs': legs
}
def add_route_leg(
cur3,
bid, # the school
aid, # the house
row_id1, # source point
row_id2, # target point
row_cost # cost
):
cmd = """insert into school_route (
from_vertex,
to_vertex,
sourcepoint,
target_point ,
leg_cost) values (
%s,
%s,
%s,
%s,
%s)""" % (
bid, # the school
aid, # the house
row_id1, # source point
row_id2, # target point
row_cost # cost
)
#print cmd
cur3.execute(cmd )
sys.stdout.write('.')
conn.commit()
def missing_points():
"""
get a list of routing points
"""
return return_list("""
select
distinct(a.id)
from
ways_vertices_pgr a
left outer join
school_route b
on
a.id=b.to_vertex
where
b.to_vertex is null
""")
def process_school(bid, osm_id, limit ):
# now lets find all the points not associated with school
cur = conn.cursor()
print "process_school(%s,%s,%s )" % (bid, osm_id, limit)
#
# def process_school_first(bid, osm_id, limit ):
# cur = conn.cursor()
# print "process_school(%s,%s,%s )" % (bid, osm_id, limit)
# # just calculate to each routing point, dont worry about places for now, we can add them later
# sql = """
# select
# distinct(g2.id)
# from
# ways_vertices_pgr g1,
# ways_vertices_pgr g2
# where
# ST_DWithin(g1.the_geom, g2.the_geom, %s)
# and g1.id=%s
# and g1.id != g2.id;
# """ % (limit + 0.001, bid)
# print sql
# cur.execute(sql)
# # -- where the node is closer to this school than other schools
# for record in cur:
# #print record
# aid = record[0]
# # pass
# print aid
# # now get the route to that
# add_route(cur3, bid, osm_id, aid )
def create_school_route():
cur = conn.cursor()
#cur.execute("""drop table school_route;""")
cur.execute("""delete from table school_route;""")
cur.execute("""
Create table if not exists school_route(
from_vertex bigint,
to_vertex bigint,
sourcepoint bigint,
target_point bigint,
leg_cost double precision);
""")
def create_ways():
# now create at table for postgis an include the geometry
# we could include that directly in the school route table as well
cur = conn.cursor()
#cur.execute(""" drop table school_ways; """)
cur.execute("""
create table if not exists school_ways as select c.leg_cost, a.* from school_route c, ways a where c.target_point = a.gid;
""")
# recreate the table holding the routes
#create_school_route()
def all_traversed_points():
"""
all the traversed points in the routes so far, so we will skip them as well.
"""
return return_list( """
select
distinct(r.sourcepoint)
from
school_route r
""")
def Decimal(x):
return x
def process_schools():
return {
'rp': {2358L: {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.2281896409873 38.9764826248688)',
'isced:level': '1',
'name': 'Woodlawn Elementary School',
'osm_id': 3112669826L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('63.35'),
'ein': None,
'eout': None,
'id': 2358L,
'the_geom': '0101000020E61000001CD71AA5A6CE57C0FB00497DFE7C4340'}},
2505L: {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.258446516724 38.9645600189159)',
'isced:level': '1',
'name': 'Hillcrest Elementary School',
'osm_id': 3112669817L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('52.39'),
'ein': None,
'eout': None,
'id': 2505L,
'the_geom': '0101000020E6100000F390291F82D057C062484E266E7B4340'}},
7323L: {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.2702877494731 38.9671719054717)',
'isced:level': '1',
'name': 'Sunset Hill Elementary School',
'osm_id': 3112669825L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('89.12'),
'ein': None,
'eout': None,
'id': 7323L,
'the_geom': '0101000020E610000027A5A0DB4BD157C09C340D8AE67B4340'}},
16571L: {'addr:housename': None,
'addr:housenumber': '2201',
'geom_text': 'POINT(-95.2561886012573 38.9440285441441)',
'isced:level': '1',
'name': 'Schwegler Elementary School',
'osm_id': 3112669823L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('47.98'),
'ein': None,
'eout': None,
'id': 16571L,
'the_geom': '0101000020E6100000B69F8CF161D057C010864BD8C8784340'}},
16932L: {'addr:housename': None,
'addr:housenumber': '2521',
'geom_text': 'POINT(-95.2992881516219 38.9365535258204)',
'isced:level': '1',
'name': 'Sunflower Elementary School',
'osm_id': 3112669824L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('114.79'),
'ein': None,
'eout': None,
'id': 16932L,
'the_geom': '0101000020E6100000DEFFC70913D357C00AD80E46EC774340'}},
17269L: {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.3016403901934 38.9632742606496)',
'isced:level': '1',
'name': 'Quail Run Elementary School',
'osm_id': 3112669822L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('62.36'),
'ein': None,
'eout': None,
'id': 17269L,
'the_geom': '0101000020E6100000B73DE6A848D357C0AC30C73C3C7B4340'}},
18110L: {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.2110256206852 38.9345921838266)',
'isced:level': '1',
'name': 'Prairie Park Elementary School',
'osm_id': 3112669821L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('48.24'),
'ein': None,
'eout': None,
'id': 18110L,
'the_geom': '0101000020E6100000187CF54C8ACD57C074CBB3379D774340'}},
18433L: {'addr:housename': None,
'addr:housenumber': '936',
'geom_text': 'POINT(-95.2309065955642 38.9663825175286)',
'isced:level': '1',
'name': 'New York Elementary School',
'osm_id': 3112669819L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('69.95'),
'ein': None,
'eout': None,
'id': 18433L,
'the_geom': '0101000020E6100000389AC871CCCE57C06506E055C57B4340'}},
19124L: {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.2374201898578 38.950614288905)',
'isced:level': '1',
'name': 'Cordley Elementary School',
'osm_id': 3112669815L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('35.86'),
'ein': None,
'eout': None,
'id': 19124L,
'the_geom': '0101000020E6100000013851A62CCF57C0A5E83702A7794340'}},
19871L: {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.2711531864179 38.9827085269385)',
'isced:level': '1',
'name': 'Deerfield Elementary School',
'osm_id': 3112669816L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('27.61'),
'ein': None,
'eout': None,
'id': 19871L,
'the_geom': '0101000020E6100000582DFA545FD157C0504FC4C7CC7D4340'}},
21112L: {'addr:housename': None,
'addr:housenumber': '810',
'geom_text': 'POINT(-95.2447015742247 38.9737829460492)',
'isced:level': '1',
'name': 'Pinckney Elementary School',
'osm_id': 3112669820L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('29.05'),
'ein': None,
'eout': None,
'id': 21112L,
'the_geom': '0101000020E61000000A100533A6CF57C0BB4967BB9D7C4340'}},
22265L: {'addr:housename': None,
'addr:housenumber': '1101',
'geom_text': 'POINT(-95.3274340762724 38.9635176820164)',
'isced:level': '1',
'name': 'Langston Hughes Elementary School',
'osm_id': 3112669818L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('133.40'),
'ein': None,
'eout': None,
'id': 22265L,
'the_geom': '0101000020E6100000A6F8533EDFD457C07769C361697B4340'}},
23803L: {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.2405040163966 38.9351028409179)',
'isced:level': '1',
'name': 'Broken Arrow Elementary School',
'osm_id': 3112669814L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('27.47'),
'ein': None,
'eout': None,
'id': 23803L,
'the_geom': '0101000020E6100000A4D23E0C63CF57C080D8D2A3A9774340'}}},
'schools': {'Broken Arrow Elementary School': {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.2405040163966 38.9351028409179)',
'isced:level': '1',
'name': 'Broken Arrow Elementary School',
'osm_id': 3112669814L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('27.47'),
'ein': None,
'eout': None,
'id': 23803L,
'the_geom': '0101000020E6100000A4D23E0C63CF57C080D8D2A3A9774340'}},
'Cordley Elementary School': {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.2374201898578 38.950614288905)',
'isced:level': '1',
'name': 'Cordley Elementary School',
'osm_id': 3112669815L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('35.86'),
'ein': None,
'eout': None,
'id': 19124L,
'the_geom': '0101000020E6100000013851A62CCF57C0A5E83702A7794340'}},
'Deerfield Elementary School': {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.2711531864179 38.9827085269385)',
'isced:level': '1',
'name': 'Deerfield Elementary School',
'osm_id': 3112669816L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('27.61'),
'ein': None,
'eout': None,
'id': 19871L,
'the_geom': '0101000020E6100000582DFA545FD157C0504FC4C7CC7D4340'}},
'Hillcrest Elementary School': {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.258446516724 38.9645600189159)',
'isced:level': '1',
'name': 'Hillcrest Elementary School',
'osm_id': 3112669817L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('52.39'),
'ein': None,
'eout': None,
'id': 2505L,
'the_geom': '0101000020E6100000F390291F82D057C062484E266E7B4340'}},
'Langston Hughes Elementary School': {'addr:housename': None,
'addr:housenumber': '1101',
'geom_text': 'POINT(-95.3274340762724 38.9635176820164)',
'isced:level': '1',
'name': 'Langston Hughes Elementary School',
'osm_id': 3112669818L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('133.40'),
'ein': None,
'eout': None,
'id': 22265L,
'the_geom': '0101000020E6100000A6F8533EDFD457C07769C361697B4340'}},
'New York Elementary School': {'addr:housename': None,
'addr:housenumber': '936',
'geom_text': 'POINT(-95.2309065955642 38.9663825175286)',
'isced:level': '1',
'name': 'New York Elementary School',
'osm_id': 3112669819L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('69.95'),
'ein': None,
'eout': None,
'id': 18433L,
'the_geom': '0101000020E6100000389AC871CCCE57C06506E055C57B4340'}},
'Pinckney Elementary School': {'addr:housename': None,
'addr:housenumber': '810',
'geom_text': 'POINT(-95.2447015742247 38.9737829460492)',
'isced:level': '1',
'name': 'Pinckney Elementary School',
'osm_id': 3112669820L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('29.05'),
'ein': None,
'eout': None,
'id': 21112L,
'the_geom': '0101000020E61000000A100533A6CF57C0BB4967BB9D7C4340'}},
'Prairie Park Elementary School': {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.2110256206852 38.9345921838266)',
'isced:level': '1',
'name': 'Prairie Park Elementary School',
'osm_id': 3112669821L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('48.24'),
'ein': None,
'eout': None,
'id': 18110L,
'the_geom': '0101000020E6100000187CF54C8ACD57C074CBB3379D774340'}},
'Quail Run Elementary School': {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.3016403901934 38.9632742606496)',
'isced:level': '1',
'name': 'Quail Run Elementary School',
'osm_id': 3112669822L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('62.36'),
'ein': None,
'eout': None,
'id': 17269L,
'the_geom': '0101000020E6100000B73DE6A848D357C0AC30C73C3C7B4340'}},
'Schwegler Elementary School': {'addr:housename': None,
'addr:housenumber': '2201',
'geom_text': 'POINT(-95.2561886012573 38.9440285441441)',
'isced:level': '1',
'name': 'Schwegler Elementary School',
'osm_id': 3112669823L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('47.98'),
'ein': None,
'eout': None,
'id': 16571L,
'the_geom': '0101000020E6100000B69F8CF161D057C010864BD8C8784340'}},
'Sunflower Elementary School': {'addr:housename': None,
'addr:housenumber': '2521',
'geom_text': 'POINT(-95.2992881516219 38.9365535258204)',
'isced:level': '1',
'name': 'Sunflower Elementary School',
'osm_id': 3112669824L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('114.79'),
'ein': None,
'eout': None,
'id': 16932L,
'the_geom': '0101000020E6100000DEFFC70913D357C00AD80E46EC774340'}},
'Sunset Hill Elementary School': {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.2702877494731 38.9671719054717)',
'isced:level': '1',
'name': 'Sunset Hill Elementary School',
'osm_id': 3112669825L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('89.12'),
'ein': None,
'eout': None,
'id': 7323L,
'the_geom': '0101000020E610000027A5A0DB4BD157C09C340D8AE67B4340'}},
'Woodlawn Elementary School': {'addr:housename': None,
'addr:housenumber': None,
'geom_text': 'POINT(-95.2281896409873 38.9764826248688)',
'isced:level': '1',
'name': 'Woodlawn Elementary School',
'osm_id': 3112669826L,
'routing_point': {'chk': None,
'cnt': None,
'dist_meters': Decimal('63.35'),
'ein': None,
'eout': None,
'id': 2358L,
'the_geom': '0101000020E61000001CD71AA5A6CE57C0FB00497DFE7C4340'}}}}
srp = {}
school_data = schools()
for s in school_data.keys():
print s
d = school_data[s]
#distance = closest_schools(d['osm_id'])
x = routing_point_for_school(d['geom_text'],d['osm_id'])
d['routing_point'] = x
srp[x['id']]=d
sd= {
'schools' : school_data,
'rp' : srp
}
pprint.pprint(sd)
return sd
def closest_schools_to_point(rpid, srps):
"""
select the schools that are closest to this one, lets pick the second one away so that we have some overlap but not too much
"""
# ST_Distance(s.the_geom,rp.the_geom)
return return_list(
"""
select
s.id
from
ways_vertices_pgr rp,
ways_vertices_pgr s
where
rp.id = %s
and
s.id in (%s)
order by
ST_Distance(s.the_geom,rp.the_geom)
limit 5
;
""" % (
rpid,
",".join(str(x) for x in srps['rp'].keys())
)
)
def process_all_points():
"""
first get all the points that are not in a route
then for each point, find the N closest schools and calculate a route to them.
then pick the shortest route, and add that to the system.
# optional, for each point along the way, add that as well to the route so that we done need to add them doubled.
"""
sd = process_schools()
# we will need to refresh this as well.
used = all_traversed_points()
print used;
missing = missing_points()
cur3 = conn.cursor()
for p in missing :
print "eval p %s" %(p)
if p not in used : # skip the used
sl = closest_schools_to_point(p, sd)
routes = {}
pprint.pprint(sl)
if sl is not None:
mindist = 99999
minroute = None
for s in sl :
# now for the closest schools we will find the shortest route
r = find_route(p,s)
#routes[s]=r
if r['total'] < mindist:
minroute = r
mindist= r['total']
print "p %s, min %s from %s to %s " %(p,mindist, minroute['from'],minroute['to'])
#pprint.pprint( minroute)
#now insert this into the school routes,
for l in minroute['legs'] :
add_route_leg(cur3,
minroute['from'],
minroute['to'],
l['row_id1'],
l['row_id2'],
l['row_cost']
)
#raise Exception()
# main routing
process_all_points()
|
agpl-3.0
|
HardLight/denyhosts
|
DenyHosts/constants.py
|
2
|
1776
|
import sys
#################################################################################
# These files will be created relative to prefs WORK_DIR #
#################################################################################
SECURE_LOG_OFFSET = "offset"
DENIED_TIMESTAMPS = "denied-timestamps"
ABUSIVE_HOSTS_INVALID = "hosts"
ABUSIVE_HOSTS_VALID = "hosts-valid"
ABUSIVE_HOSTS_ROOT = "hosts-root"
ABUSIVE_HOSTS_RESTRICTED = "hosts-restricted"
ABUSED_USERS_INVALID = "users-invalid"
ABUSED_USERS_VALID = "users-valid"
ABUSED_USERS_AND_HOSTS = "users-hosts"
"""
successful logins AFTER invalid
attempts from same host
"""
SUSPICIOUS_LOGINS = "suspicious-logins"
ALLOWED_HOSTS = "allowed-hosts"
ALLOWED_WARNED_HOSTS = "allowed-warned-hosts"
RESTRICTED_USERNAMES = "restricted-usernames"
SYNC_TIMESTAMP = "sync-timestamp"
SYNC_HOSTS = "sync-hosts"
SYNC_HOSTS_TMP = "sync-hosts.tmp"
SYNC_RECEIVED_HOSTS = "sync-received"
PURGE_HISTORY = "purge-history"
#################################################################################
# Miscellaneous constants #
#################################################################################
CONFIG_FILE = "/etc/denyhosts.conf"
DENY_DELIMITER = "# DenyHosts:"
ENTRY_DELIMITER = " | "
TIME_SPEC_LOOKUP = {
's': 1, # s
'm': 60, # minute
'h': 3600, # hour
'd': 86400, # day
'w': 604800, # week
'y': 31536000, # year
}
SYNC_MIN_INTERVAL = 300 # 5 minutes
# this is used to set a timeout for xmlrpc
SOCKET_TIMEOUT = 30 # 10 seconds
plat = sys.platform
if plat.startswith("freebsd"):
# this has no effect if BLOCK_SERVICE is empty
BSD_STYLE = " : deny"
else:
BSD_STYLE = ""
|
gpl-2.0
|
Thortoise/Super-Snake
|
Blender/animation_nodes-master/base_types/template.py
|
1
|
3160
|
import bpy
from bpy.props import *
from mathutils import Vector
from .. tree_info import getNodeByIdentifier
from .. nodes.system import subprogram_sockets
from .. utils.nodes import newNodeAtCursor, invokeTranslation
class Template:
bl_options = {"INTERNAL"}
nodeOffset = (0, 0)
menuWidth = 400
usedMenu = BoolProperty(default = False)
@classmethod
def poll(cls, context):
try: return context.space_data.node_tree.bl_idname == "an_AnimationNodeTree"
except: return False
def invoke(self, context, event):
if hasattr(self, "drawDialog"):
return context.window_manager.invoke_props_dialog(self, width = self.menuWidth)
if hasattr(self, "drawMenu") and getattr(self, "needsMenu", False):
self.usedMenu = True
context.window_manager.popup_menu(self.drawPopupMenu)
return {"FINISHED"}
self.usedMenu = False
return self.execute(context)
def draw(self, context):
self.drawDialog(self.layout)
def drawPopupMenu(self, menu, context):
col = menu.layout.column()
self.drawMenu(col)
def check(self, context):
return True
def execute(self, context):
self.nodesToMove = []
self.nodesToOffset = []
self.finalActiveNode = None
self.insert()
self.offsetNodesToMouse()
self.moveInsertedNodes()
if self.finalActiveNode is not None:
self._setActiveNode(self.finalActiveNode)
return {"FINISHED"}
def insert(self):
pass
def newNode(self, type, x = 0, y = 0, move = True, mouseOffset = True, label = ""):
node = self.nodeTree.nodes.new(type = type)
node.location = (x, y)
node.label = label
if mouseOffset: self.nodesToOffset.append(node)
if move: self.nodesToMove.append(node)
return node
def newLink(self, fromSocket, toSocket):
self.nodeTree.links.new(toSocket, fromSocket)
def nodeByIdentifier(self, identifier):
try: return getNodeByIdentifier(identifier)
except: return None
def offsetNodesToMouse(self):
tempNode = newNodeAtCursor("an_DebugNode")
offset = tempNode.location
self.nodeTree.nodes.remove(tempNode)
for node in self.nodesToOffset:
node.location += offset + Vector(self.nodeOffset)
def moveInsertedNodes(self):
self.deselectAllNodes()
for node in self.nodesToMove:
node.select = True
invokeTranslation()
@property
def nodeTree(self):
return bpy.context.space_data.edit_tree
@property
def activeNode(self):
return getattr(bpy.context, "active_node", None)
def deselectAllNodes(self):
for node in self.nodeTree.nodes:
node.select = False
def updateSubprograms(self):
subprogram_sockets.updateIfNecessary()
def setActiveNode(self, node):
self.finalActiveNode = node
def _setActiveNode(self, node):
self.deselectAllNodes()
self.finalActiveNode.select = True
self.nodeTree.nodes.active = self.finalActiveNode
|
gpl-3.0
|
grimlor/OrlandoPythonUserGroup
|
multiprocess_demo.py
|
1
|
3541
|
__filename__ = 'multithread_demo.py'
__author__ = '[email protected]'
import multiprocessing
import time
import random
class HelloWorld(object):
def __init__(self):
self.my_number = 1
self.my_number_2 = multiprocessing.Value('i', 1)
#self.lock = threading.Lock()
self.lock = multiprocessing.Lock()
def thread_target1(self, parameter = None):
if parameter:
print '{0} this parameter has been passed'.format(str(parameter))
try:
time.sleep(parameter)
print 'Wake up time'
except:
#who cares
pass
else:
print 'hello world.... this is stupid'
time.sleep(10)
return 'More stupid stuff'
def thread_target2(self, parameter = None):
time.sleep(.1*random.randint(0,10))
self.my_number += 1
time.sleep(float(parameter))
self.my_number += 1
print self.my_number
def thread_target3(self, parameter = None):
time.sleep(.1*random.randint(0,10))
with self.my_number_2.get_lock():
self.my_number_2.value += 1
time.sleep(float(parameter))
with self.my_number_2.get_lock():
self.my_number_2.value += 1
print self.my_number_2.value
def demo1(self):
for i in range(10):
#this_thread = threading.Thread(target = self.thread_target1, args = (i,)).start()
this_thread = multiprocessing.Process(target = self.thread_target1).start()
#print 'Thread count: {0}'.format(threading.active_count())
print 'Process count: {0}'.format(multiprocessing.active_children())
#This should return something
print this_thread
def demo2(self):
for i in range(10):
#this_thread = threading.Thread(target = self.thread_target1, args = (i,)).start()
this_process = multiprocessing.Process(target = self.thread_target1, args = (i,))
this_process.daemon = True
this_process.start()
#print 'Thread count: {0}'.format(threading.active_count())
print 'Process count: {0}'.format(multiprocessing.active_children())
time.sleep(60)
def demo3(self):
for i in range(10):
#this_thread = threading.Thread(target = self.thread_target1, args = (i,)).start()
this_process = multiprocessing.Process(target = self.thread_target2, args = (i,))
this_process.daemon = False
this_process.start()
#print 'Thread count: {0}'.format(threading.active_count())
print 'Process count: {0} My Number: {1}'.format(multiprocessing.active_children(), self.my_number)
#print 'Thread count: {0} My Number: {1}'.format(threading.active_count(), self.my_number)
def demo4(self):
for i in range(10):
#this_thread = threading.Thread(target = self.thread_target1, args = (i,)).start()
this_process = multiprocessing.Process(target = self.thread_target3, args = (i,))
this_process.daemon = False
this_process.start()
#print 'Thread count: {0}'.format(threading.active_count())
print 'Process count: {0} My Number {1}'.format(multiprocessing.active_children(), self.my_number)
#print 'Thread count: {0} My Number: {1}'.format(threading.active_count(), self.my_number)
test = HelloWorld()
#test.demo1()
#test.demo2()
#test.demo3()
test.demo4()
|
apache-2.0
|
dorant/home-assistant
|
tests/components/test_device_sun_light_trigger.py
|
16
|
3719
|
"""
tests.test_component_device_sun_light_trigger
~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests device sun light trigger component.
"""
# pylint: disable=too-many-public-methods,protected-access
import os
import unittest
import homeassistant.loader as loader
from homeassistant.const import CONF_PLATFORM, STATE_HOME, STATE_NOT_HOME
from homeassistant.components import (
device_tracker, light, sun, device_sun_light_trigger)
from tests.common import (
get_test_config_dir, get_test_home_assistant, ensure_sun_risen,
ensure_sun_set)
KNOWN_DEV_PATH = None
def setUpModule(): # pylint: disable=invalid-name
""" Initalizes a Home Assistant server. """
global KNOWN_DEV_PATH
KNOWN_DEV_PATH = os.path.join(get_test_config_dir(),
device_tracker.CSV_DEVICES)
with open(KNOWN_DEV_PATH, 'w') as fil:
fil.write('device,name,track,picture\n')
fil.write('DEV1,device 1,1,http://example.com/dev1.jpg\n')
fil.write('DEV2,device 2,1,http://example.com/dev2.jpg\n')
def tearDownModule(): # pylint: disable=invalid-name
""" Stops the Home Assistant server. """
os.remove(os.path.join(get_test_config_dir(),
device_tracker.YAML_DEVICES))
class TestDeviceSunLightTrigger(unittest.TestCase):
""" Test the device sun light trigger module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = get_test_home_assistant()
self.scanner = loader.get_component(
'device_tracker.test').get_scanner(None, None)
self.scanner.reset()
self.scanner.come_home('DEV1')
loader.get_component('light.test').init()
self.assertTrue(device_tracker.setup(self.hass, {
device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}
}))
self.assertTrue(light.setup(self.hass, {
light.DOMAIN: {CONF_PLATFORM: 'test'}
}))
self.assertTrue(sun.setup(
self.hass, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}))
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_lights_on_when_sun_sets(self):
""" Test lights go on when there is someone home and the sun sets. """
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
ensure_sun_risen(self.hass)
light.turn_off(self.hass)
self.hass.pool.block_till_done()
ensure_sun_set(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(light.is_on(self.hass))
def test_lights_turn_off_when_everyone_leaves(self):
""" Test lights turn off when everyone leaves the house. """
light.turn_on(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
self.hass.states.set(device_tracker.ENTITY_ID_ALL_DEVICES,
STATE_NOT_HOME)
self.hass.pool.block_till_done()
self.assertFalse(light.is_on(self.hass))
def test_lights_turn_on_when_coming_home_after_sun_set(self):
""" Test lights turn on when coming home after sun set. """
light.turn_off(self.hass)
ensure_sun_set(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
self.hass.states.set(
device_tracker.ENTITY_ID_FORMAT.format('device_2'), STATE_HOME)
self.hass.pool.block_till_done()
self.assertTrue(light.is_on(self.hass))
|
mit
|
bourreauEric/or-tools
|
data/nonogram_regular/nonogram_car.py
|
74
|
1201
|
# Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Problem from ECLiPSe
# http:#eclipse.crosscoreop.com/eclipse/examples/nono.ecl.txt
# Problem n3 ( http:#www.pro.or.jp/~fuji/java/puzzle/nonogram/index-eng.html )
# 'Car'
#
rows = 10;
row_rule_len = 4;
row_rules = [
[0,0,0,4],
[0,1,1,6],
[0,1,1,6],
[0,1,1,6],
[0,0,4,9],
[0,0,1,1],
[0,0,1,1],
[0,2,7,2],
[1,1,1,1],
[0,0,2,2]
]
cols = 15;
col_rule_len = 2;
col_rules = [
[0,4],
[1,2],
[1,1],
[5,1],
[1,2],
[1,1],
[5,1],
[1,1],
[4,1],
[4,1],
[4,2],
[4,1],
[4,1],
[4,2],
[0,4]
]
|
apache-2.0
|
ArcherSys/ArcherSys
|
Lib/site-packages/twisted/python/util.py
|
2
|
27425
|
# -*- test-case-name: twisted.python.test.test_util -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import, print_function
import os, sys, errno, warnings
try:
import pwd, grp
except ImportError:
pwd = grp = None
try:
from os import setgroups, getgroups
except ImportError:
setgroups = getgroups = None
from twisted.python.compat import _PY3, unicode
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
# For backwards compatibility, some things import this, so just link it
from collections import OrderedDict
deprecatedModuleAttribute(
Version("Twisted", 15, 5, 0),
"Use collections.OrderedDict instead.",
"twisted.python.util",
"OrderedDict")
class InsensitiveDict:
"""Dictionary, that has case-insensitive keys.
Normally keys are retained in their original form when queried with
.keys() or .items(). If initialized with preserveCase=0, keys are both
looked up in lowercase and returned in lowercase by .keys() and .items().
"""
"""
Modified recipe at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66315 originally
contributed by Sami Hangaslammi.
"""
def __init__(self, dict=None, preserve=1):
"""Create an empty dictionary, or update from 'dict'."""
self.data = {}
self.preserve=preserve
if dict:
self.update(dict)
def __delitem__(self, key):
k=self._lowerOrReturn(key)
del self.data[k]
def _lowerOrReturn(self, key):
if isinstance(key, bytes) or isinstance(key, unicode):
return key.lower()
else:
return key
def __getitem__(self, key):
"""Retrieve the value associated with 'key' (in any case)."""
k = self._lowerOrReturn(key)
return self.data[k][1]
def __setitem__(self, key, value):
"""Associate 'value' with 'key'. If 'key' already exists, but
in different case, it will be replaced."""
k = self._lowerOrReturn(key)
self.data[k] = (key, value)
def has_key(self, key):
"""Case insensitive test whether 'key' exists."""
k = self._lowerOrReturn(key)
return k in self.data
__contains__ = has_key
def _doPreserve(self, key):
if not self.preserve and (isinstance(key, bytes)
or isinstance(key, unicode)):
return key.lower()
else:
return key
def keys(self):
"""List of keys in their original case."""
return list(self.iterkeys())
def values(self):
"""List of values."""
return list(self.itervalues())
def items(self):
"""List of (key,value) pairs."""
return list(self.iteritems())
def get(self, key, default=None):
"""Retrieve value associated with 'key' or return default value
if 'key' doesn't exist."""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
"""If 'key' doesn't exists, associate it with the 'default' value.
Return value associated with 'key'."""
if not self.has_key(key):
self[key] = default
return self[key]
def update(self, dict):
"""Copy (key,value) pairs from 'dict'."""
for k,v in dict.items():
self[k] = v
def __repr__(self):
"""String representation of the dictionary."""
items = ", ".join([("%r: %r" % (k,v)) for k,v in self.items()])
return "InsensitiveDict({%s})" % items
def iterkeys(self):
for v in self.data.values():
yield self._doPreserve(v[0])
def itervalues(self):
for v in self.data.values():
yield v[1]
def iteritems(self):
for (k, v) in self.data.values():
yield self._doPreserve(k), v
def popitem(self):
i=self.items()[0]
del self[i[0]]
return i
def clear(self):
for k in self.keys():
del self[k]
def copy(self):
return InsensitiveDict(self, self.preserve)
def __len__(self):
return len(self.data)
def __eq__(self, other):
for k,v in self.items():
if not (k in other) or not (other[k]==v):
return 0
return len(self)==len(other)
def uniquify(lst):
"""Make the elements of a list unique by inserting them into a dictionary.
This must not change the order of the input lst.
"""
dct = {}
result = []
for k in lst:
if k not in dct:
result.append(k)
dct[k] = 1
return result
def padTo(n, seq, default=None):
"""
Pads a sequence out to n elements,
filling in with a default value if it is not long enough.
If the input sequence is longer than n, raises ValueError.
Details, details:
This returns a new list; it does not extend the original sequence.
The new list contains the values of the original sequence, not copies.
"""
if len(seq) > n:
raise ValueError("%d elements is more than %d." % (len(seq), n))
blank = [default] * n
blank[:len(seq)] = list(seq)
return blank
def getPluginDirs():
warnings.warn(
"twisted.python.util.getPluginDirs is deprecated since Twisted 12.2.",
DeprecationWarning, stacklevel=2)
import twisted
systemPlugins = os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(twisted.__file__))), 'plugins')
userPlugins = os.path.expanduser("~/TwistedPlugins")
confPlugins = os.path.expanduser("~/.twisted")
allPlugins = filter(os.path.isdir, [systemPlugins, userPlugins, confPlugins])
return allPlugins
def addPluginDir():
warnings.warn(
"twisted.python.util.addPluginDir is deprecated since Twisted 12.2.",
DeprecationWarning, stacklevel=2)
sys.path.extend(getPluginDirs())
def sibpath(path, sibling):
"""
Return the path to a sibling of a file in the filesystem.
This is useful in conjunction with the special C{__file__} attribute
that Python provides for modules, so modules can load associated
resource files.
"""
return os.path.join(os.path.dirname(os.path.abspath(path)), sibling)
def _getpass(prompt):
"""
Helper to turn IOErrors into KeyboardInterrupts.
"""
import getpass
try:
return getpass.getpass(prompt)
except IOError as e:
if e.errno == errno.EINTR:
raise KeyboardInterrupt
raise
except EOFError:
raise KeyboardInterrupt
def getPassword(prompt = 'Password: ', confirm = 0, forceTTY = 0,
confirmPrompt = 'Confirm password: ',
mismatchMessage = "Passwords don't match."):
"""Obtain a password by prompting or from stdin.
If stdin is a terminal, prompt for a new password, and confirm (if
C{confirm} is true) by asking again to make sure the user typed the same
thing, as keystrokes will not be echoed.
If stdin is not a terminal, and C{forceTTY} is not true, read in a line
and use it as the password, less the trailing newline, if any. If
C{forceTTY} is true, attempt to open a tty and prompt for the password
using it. Raise a RuntimeError if this is not possible.
@returns: C{str}
"""
isaTTY = hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
old = None
try:
if not isaTTY:
if forceTTY:
try:
old = sys.stdin, sys.stdout
sys.stdin = sys.stdout = open('/dev/tty', 'r+')
except:
raise RuntimeError("Cannot obtain a TTY")
else:
password = sys.stdin.readline()
if password[-1] == '\n':
password = password[:-1]
return password
while 1:
try1 = _getpass(prompt)
if not confirm:
return try1
try2 = _getpass(confirmPrompt)
if try1 == try2:
return try1
else:
sys.stderr.write(mismatchMessage + "\n")
finally:
if old:
sys.stdin.close()
sys.stdin, sys.stdout = old
def println(*a):
sys.stdout.write(' '.join(map(str, a))+'\n')
# XXX
# This does not belong here
# But where does it belong?
def str_xor(s, b):
return ''.join([chr(ord(c) ^ b) for c in s])
def makeStatBar(width, maxPosition, doneChar = '=', undoneChar = '-', currentChar = '>'):
"""
Creates a function that will return a string representing a progress bar.
"""
aValue = width / float(maxPosition)
def statBar(position, force = 0, last = ['']):
assert len(last) == 1, "Don't mess with the last parameter."
done = int(aValue * position)
toDo = width - done - 2
result = "[%s%s%s]" % (doneChar * done, currentChar, undoneChar * toDo)
if force:
last[0] = result
return result
if result == last[0]:
return ''
last[0] = result
return result
statBar.__doc__ = """statBar(position, force = 0) -> '[%s%s%s]'-style progress bar
returned string is %d characters long, and the range goes from 0..%d.
The 'position' argument is where the '%s' will be drawn. If force is false,
'' will be returned instead if the resulting progress bar is identical to the
previously returned progress bar.
""" % (doneChar * 3, currentChar, undoneChar * 3, width, maxPosition, currentChar)
return statBar
def spewer(frame, s, ignored):
"""
A trace function for sys.settrace that prints every function or method call.
"""
from twisted.python import reflect
if 'self' in frame.f_locals:
se = frame.f_locals['self']
if hasattr(se, '__class__'):
k = reflect.qual(se.__class__)
else:
k = reflect.qual(type(se))
print('method %s of %s at %s' % (
frame.f_code.co_name, k, id(se)))
else:
print('function %s in %s, line %s' % (
frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_lineno))
def searchupwards(start, files=[], dirs=[]):
"""
Walk upwards from start, looking for a directory containing
all files and directories given as arguments::
>>> searchupwards('.', ['foo.txt'], ['bar', 'bam'])
If not found, return None
"""
start=os.path.abspath(start)
parents=start.split(os.sep)
exists=os.path.exists; join=os.sep.join; isdir=os.path.isdir
while len(parents):
candidate=join(parents)+os.sep
allpresent=1
for f in files:
if not exists("%s%s" % (candidate, f)):
allpresent=0
break
if allpresent:
for d in dirs:
if not isdir("%s%s" % (candidate, d)):
allpresent=0
break
if allpresent: return candidate
parents.pop(-1)
return None
class LineLog:
"""
A limited-size line-based log, useful for logging line-based
protocols such as SMTP.
When the log fills up, old entries drop off the end.
"""
def __init__(self, size=10):
"""
Create a new log, with size lines of storage (default 10).
A log size of 0 (or less) means an infinite log.
"""
if size < 0:
size = 0
self.log = [None]*size
self.size = size
def append(self,line):
if self.size:
self.log[:-1] = self.log[1:]
self.log[-1] = line
else:
self.log.append(line)
def str(self):
return '\n'.join(filter(None,self.log))
def __getitem__(self, item):
return filter(None,self.log)[item]
def clear(self):
"""Empty the log"""
self.log = [None]*self.size
def raises(exception, f, *args, **kwargs):
"""
Determine whether the given call raises the given exception.
"""
try:
f(*args, **kwargs)
except exception:
return 1
return 0
class IntervalDifferential(object):
"""
Given a list of intervals, generate the amount of time to sleep between
"instants".
For example, given 7, 11 and 13, the three (infinite) sequences::
7 14 21 28 35 ...
11 22 33 44 ...
13 26 39 52 ...
will be generated, merged, and used to produce::
(7, 0) (4, 1) (2, 2) (1, 0) (7, 0) (1, 1) (4, 2) (2, 0) (5, 1) (2, 0)
New intervals may be added or removed as iteration proceeds using the
proper methods.
"""
def __init__(self, intervals, default=60):
"""
@type intervals: C{list} of C{int}, C{long}, or C{float} param
@param intervals: The intervals between instants.
@type default: C{int}, C{long}, or C{float}
@param default: The duration to generate if the intervals list
becomes empty.
"""
self.intervals = intervals[:]
self.default = default
def __iter__(self):
return _IntervalDifferentialIterator(self.intervals, self.default)
class _IntervalDifferentialIterator(object):
def __init__(self, i, d):
self.intervals = [[e, e, n] for (e, n) in zip(i, range(len(i)))]
self.default = d
self.last = 0
def __next__(self):
if not self.intervals:
return (self.default, None)
last, index = self.intervals[0][0], self.intervals[0][2]
self.intervals[0][0] += self.intervals[0][1]
self.intervals.sort()
result = last - self.last
self.last = last
return result, index
# Iterators on Python 2 use next(), not __next__()
next = __next__
def addInterval(self, i):
if self.intervals:
delay = self.intervals[0][0] - self.intervals[0][1]
self.intervals.append([delay + i, i, len(self.intervals)])
self.intervals.sort()
else:
self.intervals.append([i, i, 0])
def removeInterval(self, interval):
for i in range(len(self.intervals)):
if self.intervals[i][1] == interval:
index = self.intervals[i][2]
del self.intervals[i]
for i in self.intervals:
if i[2] > index:
i[2] -= 1
return
raise ValueError("Specified interval not in IntervalDifferential")
class FancyStrMixin:
"""
Mixin providing a flexible implementation of C{__str__}.
C{__str__} output will begin with the name of the class, or the contents
of the attribute C{fancybasename} if it is set.
The body of C{__str__} can be controlled by overriding C{showAttributes} in
a subclass. Set C{showAttributes} to a sequence of strings naming
attributes, or sequences of C{(attributeName, callable)}, or sequences of
C{(attributeName, displayName, formatCharacter)}. In the second case, the
callable is passed the value of the attribute and its return value used in
the output of C{__str__}. In the final case, the attribute is looked up
using C{attributeName}, but the output uses C{displayName} instead, and
renders the value of the attribute using C{formatCharacter}, e.g. C{"%.3f"}
might be used for a float.
"""
# Override in subclasses:
showAttributes = ()
def __str__(self):
r = ['<', (hasattr(self, 'fancybasename') and self.fancybasename)
or self.__class__.__name__]
for attr in self.showAttributes:
if isinstance(attr, str):
r.append(' %s=%r' % (attr, getattr(self, attr)))
elif len(attr) == 2:
r.append((' %s=' % (attr[0],)) + attr[1](getattr(self, attr[0])))
else:
r.append((' %s=' + attr[2]) % (attr[1], getattr(self, attr[0])))
r.append('>')
return ''.join(r)
__repr__ = __str__
class FancyEqMixin:
"""
Mixin that implements C{__eq__} and C{__ne__}.
Comparison is done using the list of attributes defined in
C{compareAttributes}.
"""
compareAttributes = ()
def __eq__(self, other):
if not self.compareAttributes:
return self is other
if isinstance(self, other.__class__):
return (
[getattr(self, name) for name in self.compareAttributes] ==
[getattr(other, name) for name in self.compareAttributes])
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
try:
# initgroups is available in Python 2.7+ on UNIX-likes
from os import initgroups as _initgroups
except ImportError:
_initgroups = None
if _initgroups is None:
def initgroups(uid, primaryGid):
"""
Do nothing.
Underlying platform support require to manipulate groups is missing.
"""
else:
def initgroups(uid, primaryGid):
"""
Initializes the group access list.
This uses the stdlib support which calls initgroups(3) under the hood.
If the given user is a member of more than C{NGROUPS}, arbitrary
groups will be silently discarded to bring the number below that
limit.
@type uid: C{int}
@param uid: The UID for which to look up group information.
@type primaryGid: C{int} or C{NoneType}
@param primaryGid: If provided, an additional GID to include when
setting the groups.
"""
return _initgroups(pwd.getpwuid(uid)[0], primaryGid)
def switchUID(uid, gid, euid=False):
"""
Attempts to switch the uid/euid and gid/egid for the current process.
If C{uid} is the same value as L{os.getuid} (or L{os.geteuid}),
this function will issue a L{UserWarning} and not raise an exception.
@type uid: C{int} or C{NoneType}
@param uid: the UID (or EUID) to switch the current process to. This
parameter will be ignored if the value is C{None}.
@type gid: C{int} or C{NoneType}
@param gid: the GID (or EGID) to switch the current process to. This
parameter will be ignored if the value is C{None}.
@type euid: C{bool}
@param euid: if True, set only effective user-id rather than real user-id.
(This option has no effect unless the process is running
as root, in which case it means not to shed all
privileges, retaining the option to regain privileges
in cases such as spawning processes. Use with caution.)
"""
if euid:
setuid = os.seteuid
setgid = os.setegid
getuid = os.geteuid
else:
setuid = os.setuid
setgid = os.setgid
getuid = os.getuid
if gid is not None:
setgid(gid)
if uid is not None:
if uid == getuid():
uidText = (euid and "euid" or "uid")
actionText = "tried to drop privileges and set%s %s" % (uidText, uid)
problemText = "%s is already %s" % (uidText, getuid())
warnings.warn("%s but %s; should we be root? Continuing."
% (actionText, problemText))
else:
initgroups(uid, gid)
setuid(uid)
class SubclassableCStringIO(object):
"""
A wrapper around cStringIO to allow for subclassing.
"""
__csio = None
def __init__(self, *a, **kw):
from cStringIO import StringIO
self.__csio = StringIO(*a, **kw)
def __iter__(self):
return self.__csio.__iter__()
def next(self):
return self.__csio.next()
def close(self):
return self.__csio.close()
def isatty(self):
return self.__csio.isatty()
def seek(self, pos, mode=0):
return self.__csio.seek(pos, mode)
def tell(self):
return self.__csio.tell()
def read(self, n=-1):
return self.__csio.read(n)
def readline(self, length=None):
return self.__csio.readline(length)
def readlines(self, sizehint=0):
return self.__csio.readlines(sizehint)
def truncate(self, size=None):
return self.__csio.truncate(size)
def write(self, s):
return self.__csio.write(s)
def writelines(self, list):
return self.__csio.writelines(list)
def flush(self):
return self.__csio.flush()
def getvalue(self):
return self.__csio.getvalue()
def untilConcludes(f, *a, **kw):
"""
Call C{f} with the given arguments, handling C{EINTR} by retrying.
@param f: A function to call.
@param *a: Positional arguments to pass to C{f}.
@param **kw: Keyword arguments to pass to C{f}.
@return: Whatever C{f} returns.
@raise: Whatever C{f} raises, except for C{IOError} or C{OSError} with
C{errno} set to C{EINTR}.
"""
while True:
try:
return f(*a, **kw)
except (IOError, OSError) as e:
if e.args[0] == errno.EINTR:
continue
raise
def mergeFunctionMetadata(f, g):
"""
Overwrite C{g}'s name and docstring with values from C{f}. Update
C{g}'s instance dictionary with C{f}'s.
@return: A function that has C{g}'s behavior and metadata merged from
C{f}.
"""
try:
g.__name__ = f.__name__
except TypeError:
pass
try:
g.__doc__ = f.__doc__
except (TypeError, AttributeError):
pass
try:
g.__dict__.update(f.__dict__)
except (TypeError, AttributeError):
pass
try:
g.__module__ = f.__module__
except TypeError:
pass
return g
def nameToLabel(mname):
"""
Convert a string like a variable name into a slightly more human-friendly
string with spaces and capitalized letters.
@type mname: C{str}
@param mname: The name to convert to a label. This must be a string
which could be used as a Python identifier. Strings which do not take
this form will result in unpredictable behavior.
@rtype: C{str}
"""
labelList = []
word = ''
lastWasUpper = False
for letter in mname:
if letter.isupper() == lastWasUpper:
# Continuing a word.
word += letter
else:
# breaking a word OR beginning a word
if lastWasUpper:
# could be either
if len(word) == 1:
# keep going
word += letter
else:
# acronym
# we're processing the lowercase letter after the acronym-then-capital
lastWord = word[:-1]
firstLetter = word[-1]
labelList.append(lastWord)
word = firstLetter + letter
else:
# definitely breaking: lower to upper
labelList.append(word)
word = letter
lastWasUpper = letter.isupper()
if labelList:
labelList[0] = labelList[0].capitalize()
else:
return mname.capitalize()
labelList.append(word)
return ' '.join(labelList)
def uidFromString(uidString):
"""
Convert a user identifier, as a string, into an integer UID.
@type uid: C{str}
@param uid: A string giving the base-ten representation of a UID or the
name of a user which can be converted to a UID via L{pwd.getpwnam}.
@rtype: C{int}
@return: The integer UID corresponding to the given string.
@raise ValueError: If the user name is supplied and L{pwd} is not
available.
"""
try:
return int(uidString)
except ValueError:
if pwd is None:
raise
return pwd.getpwnam(uidString)[2]
def gidFromString(gidString):
"""
Convert a group identifier, as a string, into an integer GID.
@type uid: C{str}
@param uid: A string giving the base-ten representation of a GID or the
name of a group which can be converted to a GID via L{grp.getgrnam}.
@rtype: C{int}
@return: The integer GID corresponding to the given string.
@raise ValueError: If the group name is supplied and L{grp} is not
available.
"""
try:
return int(gidString)
except ValueError:
if grp is None:
raise
return grp.getgrnam(gidString)[2]
def runAsEffectiveUser(euid, egid, function, *args, **kwargs):
"""
Run the given function wrapped with seteuid/setegid calls.
This will try to minimize the number of seteuid/setegid calls, comparing
current and wanted permissions
@param euid: effective UID used to call the function.
@type euid: C{int}
@type egid: effective GID used to call the function.
@param egid: C{int}
@param function: the function run with the specific permission.
@type function: any callable
@param *args: arguments passed to C{function}
@param **kwargs: keyword arguments passed to C{function}
"""
uid, gid = os.geteuid(), os.getegid()
if uid == euid and gid == egid:
return function(*args, **kwargs)
else:
if uid != 0 and (uid != euid or gid != egid):
os.seteuid(0)
if gid != egid:
os.setegid(egid)
if euid != 0 and (euid != uid or gid != egid):
os.seteuid(euid)
try:
return function(*args, **kwargs)
finally:
if euid != 0 and (uid != euid or gid != egid):
os.seteuid(0)
if gid != egid:
os.setegid(gid)
if uid != 0 and (uid != euid or gid != egid):
os.seteuid(uid)
def runWithWarningsSuppressed(suppressedWarnings, f, *args, **kwargs):
"""
Run C{f(*args, **kwargs)}, but with some warnings suppressed.
Unlike L{twisted.internet.utils.runWithWarningsSuppressed}, it has no
special support for L{twisted.internet.defer.Deferred}.
@param suppressedWarnings: A list of arguments to pass to filterwarnings.
Must be a sequence of 2-tuples (args, kwargs).
@param f: A callable.
@param args: Arguments for C{f}.
@param kwargs: Keyword arguments for C{f}
@return: The result of C{f(*args, **kwargs)}.
"""
with warnings.catch_warnings():
for a, kw in suppressedWarnings:
warnings.filterwarnings(*a, **kw)
return f(*args, **kwargs)
__all__ = [
"uniquify", "padTo", "getPluginDirs", "addPluginDir", "sibpath",
"getPassword", "println", "makeStatBar", "OrderedDict",
"InsensitiveDict", "spewer", "searchupwards", "LineLog",
"raises", "IntervalDifferential", "FancyStrMixin", "FancyEqMixin",
"switchUID", "SubclassableCStringIO", "mergeFunctionMetadata",
"nameToLabel", "uidFromString", "gidFromString", "runAsEffectiveUser",
"untilConcludes", "runWithWarningsSuppressed",
]
if _PY3:
__notported__ = ["SubclassableCStringIO", "LineLog", "makeStatBar"]
for name in __all__[:]:
if name in __notported__:
__all__.remove(name)
del globals()[name]
del name, __notported__
|
mit
|
Tao4free/QGIS_plugins
|
SuperLabeling/ui.py
|
2
|
5076
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'SuperLabeling.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(477, 195)
self.gridLayout = QtGui.QGridLayout(Dialog)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.lbLayer = QtGui.QLabel(Dialog)
self.lbLayer.setObjectName(_fromUtf8("lbLayer"))
self.gridLayout.addWidget(self.lbLayer, 0, 0, 1, 1)
self.txLayer = QtGui.QLineEdit(Dialog)
self.txLayer.setObjectName(_fromUtf8("txLayer"))
self.gridLayout.addWidget(self.txLayer, 0, 1, 1, 5)
self.lbStep1 = QtGui.QLabel(Dialog)
self.lbStep1.setObjectName(_fromUtf8("lbStep1"))
self.gridLayout.addWidget(self.lbStep1, 1, 0, 1, 6)
self.cbField = QtGui.QComboBox(Dialog)
self.cbField.setObjectName(_fromUtf8("cbField"))
self.gridLayout.addWidget(self.cbField, 2, 0, 1, 2)
self.lbStep2 = QtGui.QLabel(Dialog)
self.lbStep2.setObjectName(_fromUtf8("lbStep2"))
self.gridLayout.addWidget(self.lbStep2, 4, 0, 2, 3)
self.txX = QtGui.QLineEdit(Dialog)
self.txX.setObjectName(_fromUtf8("txX"))
self.gridLayout.addWidget(self.txX, 4, 5, 1, 1)
self.lbPoint = QtGui.QLabel(Dialog)
self.lbPoint.setObjectName(_fromUtf8("lbPoint"))
self.gridLayout.addWidget(self.lbPoint, 2, 5, 1, 1)
self.txY = QtGui.QLineEdit(Dialog)
self.txY.setObjectName(_fromUtf8("txY"))
self.gridLayout.addWidget(self.txY, 5, 5, 1, 1)
self.lbX = QtGui.QLabel(Dialog)
self.lbX.setFrameShape(QtGui.QFrame.NoFrame)
self.lbX.setObjectName(_fromUtf8("lbX"))
self.gridLayout.addWidget(self.lbX, 4, 4, 1, 1)
self.lbY = QtGui.QLabel(Dialog)
self.lbY.setObjectName(_fromUtf8("lbY"))
self.gridLayout.addWidget(self.lbY, 5, 4, 1, 1)
self.pbnEdit = QtGui.QPushButton(Dialog)
self.pbnEdit.setObjectName(_fromUtf8("pbnEdit"))
self.gridLayout.addWidget(self.pbnEdit, 2, 2, 1, 1)
self.lbStep2_2 = QtGui.QLabel(Dialog)
self.lbStep2_2.setObjectName(_fromUtf8("lbStep2_2"))
self.gridLayout.addWidget(self.lbStep2_2, 8, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout.addWidget(self.buttonBox, 8, 4, 1, 2)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "SuperLabeling", None))
self.lbLayer.setText(_translate("Dialog", "<html><head/><body><p>Current layer</p></body></html>", None))
self.lbStep1.setText(_translate("Dialog", "<html><head/><body><p><span style=\" font-size:10pt;\">1. Choose the field you want to Label, click "Start Edition" </span></p></body></html>", None))
self.lbStep2.setText(_translate("Dialog", "<html><head/><body><p align=\"center\"><span style=\" font-size:10pt;\">2. Left Click on canvas where you want to put </span></p><p align=\"center\"><span style=\" font-size:10pt;\">the Label (make click near the feature)</span></p></body></html>", None))
self.lbPoint.setText(_translate("Dialog", "<html><head/><body><p align=\"center\"><span style=\" font-size:10pt;\">The point you click</span></p></body></html>", None))
self.lbX.setText(_translate("Dialog", "<html><head/><body><p align=\"center\"><span style=\" font-size:10pt;\">X</span></p></body></html>", None))
self.lbY.setText(_translate("Dialog", "<html><head/><body><p align=\"center\">Y</p></body></html>", None))
self.pbnEdit.setText(_translate("Dialog", "Start Edition", None))
self.lbStep2_2.setText(_translate("Dialog", "<html><head/><body><p align=\"center\"><span style=\" font-size:10pt;\">3. Left double click to hide label</span></p></body></html>", None))
|
gpl-3.0
|
tersmitten/ansible
|
lib/ansible/modules/network/meraki/meraki_admin.py
|
4
|
16712
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Kevin Breit (@kbreit) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: meraki_admin
short_description: Manage administrators in the Meraki cloud
version_added: '2.6'
description:
- Allows for creation, management, and visibility into administrators within Meraki.
options:
name:
description:
- Name of the dashboard administrator.
- Required when creating a new administrator.
type: str
email:
description:
- Email address for the dashboard administrator.
- Email cannot be updated.
- Required when creating or editing an administrator.
type: str
org_access:
description:
- Privileges assigned to the administrator in the organization.
aliases: [ orgAccess ]
choices: [ full, none, read-only ]
type: str
tags:
description:
- Tags the administrator has privileges on.
- When creating a new administrator, C(org_name), C(network), or C(tags) must be specified.
- If C(none) is specified, C(network) or C(tags) must be specified.
suboptions:
tag:
description:
- Object tag which privileges should be assigned.
type: str
access:
description:
- The privilege of the dashboard administrator for the tag.
type: str
networks:
description:
- List of networks the administrator has privileges on.
- When creating a new administrator, C(org_name), C(network), or C(tags) must be specified.
suboptions:
id:
description:
- Network ID for which administrator should have privileges assigned.
type: str
access:
description:
- The privilege of the dashboard administrator on the network.
- Valid options are C(full), C(read-only), or C(none).
type: str
state:
description:
- Create or modify, or delete an organization
- If C(state) is C(absent), name takes priority over email if both are specified.
choices: [ absent, present, query ]
required: true
type: str
org_name:
description:
- Name of organization.
- Used when C(name) should refer to another object.
- When creating a new administrator, C(org_name), C(network), or C(tags) must be specified.
aliases: ['organization']
type: str
org_id:
description:
- ID of organization.
type: str
author:
- Kevin Breit (@kbreit)
extends_documentation_fragment: meraki
'''
EXAMPLES = r'''
- name: Query information about all administrators associated to the organization
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: query
delegate_to: localhost
- name: Query information about a single administrator by name
meraki_admin:
auth_key: abc12345
org_id: 12345
state: query
name: Jane Doe
- name: Query information about a single administrator by email
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: query
email: [email protected]
- name: Create new administrator with organization access
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
org_access: read-only
email: [email protected]
- name: Create new administrator with organization access
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
org_access: read-only
email: [email protected]
- name: Create a new administrator with organization access
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
org_access: read-only
email: [email protected]
- name: Revoke access to an organization for an administrator
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: absent
email: [email protected]
- name: Create a new administrator with full access to two tags
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
orgAccess: read-only
email: [email protected]
tags:
- tag: tenant
access: full
- tag: corporate
access: read-only
- name: Create a new administrator with full access to a network
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
orgAccess: read-only
email: [email protected]
networks:
- id: N_12345
access: full
'''
RETURN = r'''
data:
description: List of administrators.
returned: success
type: complex
contains:
email:
description: Email address of administrator.
returned: success
type: str
sample: [email protected]
id:
description: Unique identification number of administrator.
returned: success
type: str
sample: 1234567890
name:
description: Given name of administrator.
returned: success
type: str
sample: John Doe
accountStatus:
description: Status of account.
returned: success
type: str
sample: ok
twoFactorAuthEnabled:
description: Enabled state of two-factor authentication for administrator.
returned: success
type: bool
sample: false
hasApiKey:
description: Defines whether administrator has an API assigned to their account.
returned: success
type: bool
sample: false
lastActive:
description: Date and time of time the administrator was active within Dashboard.
returned: success
type: str
sample: 2019-01-28 14:58:56 -0800
networks:
description: List of networks administrator has access on.
returned: success
type: complex
contains:
id:
description: The network ID.
returned: when network permissions are set
type: str
sample: N_0123456789
access:
description: Access level of administrator. Options are 'full', 'read-only', or 'none'.
returned: when network permissions are set
type: str
sample: read-only
tags:
description: Tags the adminsitrator has access on.
returned: success
type: complex
contains:
tag:
description: Tag name.
returned: when tag permissions are set
type: str
sample: production
access:
description: Access level of administrator. Options are 'full', 'read-only', or 'none'.
returned: when tag permissions are set
type: str
sample: full
orgAccess:
description: The privilege of the dashboard administrator on the organization. Options are 'full', 'read-only', or 'none'.
returned: success
type: str
sample: full
'''
import os
from ansible.module_utils.basic import AnsibleModule, json, env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec
def get_admins(meraki, org_id):
admins = meraki.request(
meraki.construct_path(
'query',
function='admin',
org_id=org_id
),
method='GET'
)
if meraki.status == 200:
return admins
def get_admin_id(meraki, data, name=None, email=None):
admin_id = None
for a in data:
if meraki.params['name'] is not None:
if meraki.params['name'] == a['name']:
if admin_id is not None:
meraki.fail_json(msg='There are multiple administrators with the same name')
else:
admin_id = a['id']
elif meraki.params['email']:
if meraki.params['email'] == a['email']:
return a['id']
if admin_id is None:
meraki.fail_json(msg='No admin_id found')
return admin_id
def get_admin(meraki, data, id):
for a in data:
if a['id'] == id:
return a
meraki.fail_json(msg='No admin found by specified name or email')
def find_admin(meraki, data, email):
for a in data:
if a['email'] == email:
return a
return None
def delete_admin(meraki, org_id, admin_id):
path = meraki.construct_path('revoke', 'admin', org_id=org_id) + admin_id
r = meraki.request(path,
method='DELETE'
)
if meraki.status == 204:
return r
def network_factory(meraki, networks, nets):
networks = json.loads(networks)
networks_new = []
for n in networks:
networks_new.append({'id': meraki.get_net_id(org_name=meraki.params['org_name'],
net_name=n['network'],
data=nets),
'access': n['access']
})
return networks_new
def create_admin(meraki, org_id, name, email):
payload = dict()
payload['name'] = name
payload['email'] = email
is_admin_existing = find_admin(meraki, get_admins(meraki, org_id), email)
if meraki.params['org_access'] is not None:
payload['orgAccess'] = meraki.params['org_access']
if meraki.params['tags'] is not None:
payload['tags'] = json.loads(meraki.params['tags'])
if meraki.params['networks'] is not None:
nets = meraki.get_nets(org_id=org_id)
networks = network_factory(meraki, meraki.params['networks'], nets)
# meraki.fail_json(msg=str(type(networks)), data=networks)
payload['networks'] = networks
if is_admin_existing is None: # Create new admin
path = meraki.construct_path('create', function='admin', org_id=org_id)
r = meraki.request(path,
method='POST',
payload=json.dumps(payload)
)
if meraki.status == 201:
meraki.result['changed'] = True
return r
elif is_admin_existing is not None: # Update existing admin
if not meraki.params['tags']:
payload['tags'] = []
if not meraki.params['networks']:
payload['networks'] = []
if meraki.is_update_required(is_admin_existing, payload) is True:
path = meraki.construct_path('update', function='admin', org_id=org_id) + is_admin_existing['id']
r = meraki.request(path,
method='PUT',
payload=json.dumps(payload)
)
if meraki.status == 200:
meraki.result['changed'] = True
return r
else:
meraki.result['data'] = is_admin_existing
return -1
def main():
# define the available arguments/parameters that a user can pass to
# the module
argument_spec = meraki_argument_spec()
argument_spec.update(state=dict(type='str', choices=['present', 'query', 'absent'], required=True),
name=dict(type='str'),
email=dict(type='str'),
org_access=dict(type='str', aliases=['orgAccess'], choices=['full', 'read-only', 'none']),
tags=dict(type='json'),
networks=dict(type='json'),
org_name=dict(type='str', aliases=['organization']),
org_id=dict(type='str'),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False,
)
meraki = MerakiModule(module, function='admin')
meraki.function = 'admin'
meraki.params['follow_redirects'] = 'all'
query_urls = {'admin': '/organizations/{org_id}/admins',
}
create_urls = {'admin': '/organizations/{org_id}/admins',
}
update_urls = {'admin': '/organizations/{org_id}/admins/',
}
revoke_urls = {'admin': '/organizations/{org_id}/admins/',
}
meraki.url_catalog['query'] = query_urls
meraki.url_catalog['create'] = create_urls
meraki.url_catalog['update'] = update_urls
meraki.url_catalog['revoke'] = revoke_urls
try:
meraki.params['auth_key'] = os.environ['MERAKI_KEY']
except KeyError:
pass
if meraki.params['auth_key'] is None:
module.fail_json(msg='Meraki Dashboard API key not set')
payload = None
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
return result
# execute checks for argument completeness
if meraki.params['state'] == 'query':
meraki.mututally_exclusive = ['name', 'email']
if not meraki.params['org_name'] and not meraki.params['org_id']:
meraki.fail_json(msg='org_name or org_id required')
meraki.required_if = [(['state'], ['absent'], ['email']),
]
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.params['org_id']
if not meraki.params['org_id']:
org_id = meraki.get_org_id(meraki.params['org_name'])
if meraki.params['state'] == 'query':
admins = get_admins(meraki, org_id)
if not meraki.params['name'] and not meraki.params['email']: # Return all admins for org
meraki.result['data'] = admins
if meraki.params['name'] is not None: # Return a single admin for org
admin_id = get_admin_id(meraki, admins, name=meraki.params['name'])
meraki.result['data'] = admin_id
admin = get_admin(meraki, admins, admin_id)
meraki.result['data'] = admin
elif meraki.params['email'] is not None:
admin_id = get_admin_id(meraki, admins, email=meraki.params['email'])
meraki.result['data'] = admin_id
admin = get_admin(meraki, admins, admin_id)
meraki.result['data'] = admin
elif meraki.params['state'] == 'present':
r = create_admin(meraki,
org_id,
meraki.params['name'],
meraki.params['email'],
)
if r != -1:
meraki.result['data'] = r
elif meraki.params['state'] == 'absent':
admin_id = get_admin_id(meraki,
get_admins(meraki, org_id),
email=meraki.params['email']
)
r = delete_admin(meraki, org_id, admin_id)
if r != -1:
meraki.result['data'] = r
meraki.result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
geometalab/G4SE-Compass
|
compass-api/G4SE/api/views.py
|
1
|
4673
|
import logging
from collections import OrderedDict
import django_filters
from drf_haystack.filters import HaystackHighlightFilter
from drf_haystack.viewsets import HaystackViewSet
from haystack.query import SearchQuerySet
from rest_framework import filters
from rest_framework import permissions
from rest_framework import viewsets
from rest_framework.decorators import api_view, renderer_classes, list_route
from rest_framework import response, schemas
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework_swagger.renderers import OpenAPIRenderer, SwaggerUIRenderer
from api.filters import LimitRecordFilter, DateLimitRecordFilter, DateLimitSearchRecordFilter, \
IsLatestSearchRecordFilter, MetadataSearchFilter, MetadataSearchOrderingFilter
from api.helpers.helpers import is_internal
from api.helpers.input import ElasticSearchExtendedAutoQuery
from api.models import GeoServiceMetadata
from api.paginators import MetadataResultsSetPagination, StandardResultsSetPagination
from api.serializers import EditRecordSerializer, GeoServiceMetadataSearchSerializer, \
GeoServiceMetadataSerializer
logger = logging.getLogger(__name__)
@api_view()
@renderer_classes([SwaggerUIRenderer, OpenAPIRenderer])
def schema_view(request):
generator = schemas.SchemaGenerator(title='G4SE API')
return response.Response(generator.get_schema(request=request))
class MetaDataReadOnlyViewSet(viewsets.ReadOnlyModelViewSet):
"""
Returns all metadata records visible to the client.
"""
serializer_class = GeoServiceMetadataSerializer
pagination_class = MetadataResultsSetPagination
queryset = GeoServiceMetadata.objects.all()
ordering_parameter = api_settings.ORDERING_PARAM
ordering = '-modified'
lookup_url_kwarg = 'pk'
lookup_field = 'api_id'
def get_queryset(self):
queryset = super().get_queryset()
internal = self.request.user.is_authenticated or is_internal(self.request.META['REMOTE_ADDR'])
# TODO: make this into an optional list of publicity
if not internal:
queryset = queryset.filter(visibility=GeoServiceMetadata.VISIBILITY_PUBLIC)
return queryset
filter_backends = (
filters.OrderingFilter,
LimitRecordFilter,
DateLimitRecordFilter,
django_filters.rest_framework.DjangoFilterBackend,
)
class GeoServiceMetadataAdminViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAdminUser,)
queryset = GeoServiceMetadata.objects.filter(imported=False)
serializer_class = EditRecordSerializer
pagination_class = StandardResultsSetPagination
class GeoServiceMetadataSearchView(HaystackViewSet):
# `index_models` is an optional list of which models you would like to include
# in the search result. You might have several models indexed, and this provides
# a way to filter out those of no interest for this particular view.
# (Translates to `SearchQuerySet().models(*index_models)` behind the scenes.
FALLBACK_LANGUAGE = GeoServiceMetadata.ENGLISH
pagination_class = MetadataResultsSetPagination
index_models = [
GeoServiceMetadata,
]
document_uid_field = "api_id"
filter_backends = [
HaystackHighlightFilter,
DateLimitSearchRecordFilter,
IsLatestSearchRecordFilter,
django_filters.rest_framework.DjangoFilterBackend,
MetadataSearchFilter,
MetadataSearchOrderingFilter,
]
serializer_class = GeoServiceMetadataSearchSerializer
def get_queryset(self, index_models=[]):
using = self.request.GET.get('language', self.FALLBACK_LANGUAGE)
if using not in ['de', 'fr', 'it']:
using = self.FALLBACK_LANGUAGE
qs = super().get_queryset(index_models).using(using)
internal = self.request.user.is_authenticated or is_internal(self.request.META['REMOTE_ADDR'])
# TODO: make this into an optional list of publicity
if not internal:
qs = qs.filter(visibility=GeoServiceMetadata.VISIBILITY_PUBLIC)
return qs
@list_route()
def actual(self, request):
query_string = request.GET.get('search', '')
if query_string != '':
using = request.GET.get('language', self.FALLBACK_LANGUAGE)
cleaned_query_string = ElasticSearchExtendedAutoQuery(query_string)
searched_for = cleaned_query_string.prepare(SearchQuerySet().using(using).query)
else:
searched_for = ''
return Response(OrderedDict([
('search', query_string),
('actual_search', searched_for),
]))
|
mit
|
kirca/OpenUpgrade
|
addons/crm_partner_assign/wizard/__init__.py
|
389
|
1038
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_forward_to_partner
import crm_channel_interested
|
agpl-3.0
|
digetx/picasso-kernel
|
Documentation/target/tcm_mod_builder.py
|
868
|
40692
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
gpl-2.0
|
appleseedhq/gaffer
|
python/GafferOSLTest/ModuleTest.py
|
8
|
2380
|
##########################################################################
#
# Copyright (c) 2014, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferTest
class ModuleTest( GafferTest.TestCase ) :
def testNamespacePollution( self ) :
import GafferOSL
self.assertRaises( AttributeError, getattr, GafferOSL, "IECore" )
self.assertRaises( AttributeError, getattr, GafferOSL, "Gaffer" )
self.assertRaises( AttributeError, getattr, GafferOSL, "GafferScene" )
self.assertRaises( AttributeError, getattr, GafferOSL, "GafferImage" )
def testDoesNotImportUI( self ) :
self.assertModuleDoesNotImportUI( "GafferOSL" )
self.assertModuleDoesNotImportUI( "GafferOSLTest" )
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
YarivCol/mbed-os
|
tools/targets/lint.py
|
12
|
10093
|
"""A linting utility for targets.json
This linting utility may be called as follows:
python <path-to>/lint.py targets TARGET [TARGET ...]
all targets will be linted
"""
# mbed SDK
# Copyright (c) 2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import join, abspath, dirname
if __name__ == "__main__":
import sys
ROOT = abspath(join(dirname(__file__), "..", ".."))
sys.path.insert(0, ROOT)
from copy import copy
from yaml import dump_all
import argparse
from tools.targets import Target, set_targets_json_location, TARGET_MAP
def must_have_keys(keys, dict):
"""Require keys in an MCU/Board
is a generator for errors
"""
for key in keys:
if key not in dict:
yield "%s not found, and is required" % key
def may_have_keys(keys, dict):
"""Disable all other keys in an MCU/Board
is a generator for errors
"""
for key in dict.keys():
if key not in keys:
yield "%s found, and is not allowed" % key
def check_extra_labels(dict):
"""Check that extra_labels does not contain any Target names
is a generator for errors
"""
for label in (dict.get("extra_labels", []) +
dict.get("extra_labels_add", [])):
if label in Target.get_json_target_data():
yield "%s is not allowed in extra_labels" % label
def check_release_version(dict):
"""Verify that release version 5 is combined with support for all toolcahins
is a generator for errors
"""
if ("release_versions" in dict and
"5" in dict["release_versions"] and
"supported_toolchains" in dict):
for toolc in ["GCC_ARM", "ARM", "IAR"]:
if toolc not in dict["supported_toolchains"]:
yield ("%s not found in supported_toolchains, and is "
"required by mbed OS 5" % toolc)
def check_inherits(dict):
if ("inherits" in dict and len(dict["inherits"]) > 1):
yield "multiple inheritance is forbidden"
DEVICE_HAS_ALLOWED = ["ANALOGIN", "ANALOGOUT", "CAN", "ETHERNET", "EMAC",
"FLASH", "I2C", "I2CSLAVE", "I2C_ASYNCH", "INTERRUPTIN",
"LOWPOWERTIMER", "PORTIN", "PORTINOUT", "PORTOUT",
"PWMOUT", "RTC", "TRNG","SERIAL", "SERIAL_ASYNCH",
"SERIAL_FC", "SLEEP", "SPI", "SPI_ASYNCH", "SPISLAVE",
"STORAGE"]
def check_device_has(dict):
for name in dict.get("device_has", []):
if name not in DEVICE_HAS_ALLOWED:
yield "%s is not allowed in device_has" % name
MCU_REQUIRED_KEYS = ["release_versions", "supported_toolchains",
"default_lib", "public", "inherits", "device_has"]
MCU_ALLOWED_KEYS = ["device_has_add", "device_has_remove", "core",
"extra_labels", "features", "features_add",
"features_remove", "bootloader_supported", "device_name",
"post_binary_hook", "default_toolchain", "config",
"extra_labels_add", "extra_labels_remove",
"target_overrides"] + MCU_REQUIRED_KEYS
def check_mcu(mcu_json, strict=False):
"""Generate a list of problems with an MCU
:param: mcu_json the MCU's dict to check
:param: strict enforce required keys
"""
errors = list(may_have_keys(MCU_ALLOWED_KEYS, mcu_json))
if strict:
errors.extend(must_have_keys(MCU_REQUIRED_KEYS, mcu_json))
errors.extend(check_extra_labels(mcu_json))
errors.extend(check_release_version(mcu_json))
errors.extend(check_inherits(mcu_json))
errors.extend(check_device_has(mcu_json))
if 'public' in mcu_json and mcu_json['public']:
errors.append("public must be false")
return errors
BOARD_REQUIRED_KEYS = ["inherits"]
BOARD_ALLOWED_KEYS = ["supported_form_factors", "is_disk_virtual",
"detect_code", "extra_labels", "extra_labels_add",
"extra_labels_remove", "public", "config",
"forced_reset_timeout", "target_overrides"] + BOARD_REQUIRED_KEYS
def check_board(board_json, strict=False):
"""Generate a list of problems with an board
:param: board_json the mcus dict to check
:param: strict enforce required keys
"""
errors = list(may_have_keys(BOARD_ALLOWED_KEYS, board_json))
if strict:
errors.extend(must_have_keys(BOARD_REQUIRED_KEYS, board_json))
errors.extend(check_extra_labels(board_json))
errors.extend(check_inherits(board_json))
return errors
def add_if(dict, key, val):
"""Add a value to a dict if it's non-empty"""
if val:
dict[key] = val
def _split_boards(resolution_order, tgt):
"""Split the resolution order between boards and mcus"""
mcus = []
boards = []
iterable = iter(resolution_order)
for name in iterable:
mcu_json = tgt.json_data[name]
if (len(list(check_mcu(mcu_json, True))) >
len(list(check_board(mcu_json, True)))):
boards.append(name)
else:
mcus.append(name)
break
mcus.extend(iterable)
mcus.reverse()
boards.reverse()
return mcus, boards
MCU_FORMAT_STRING = {1: "MCU (%s) ->",
2: "Family (%s) -> MCU (%s) ->",
3: "Family (%s) -> SubFamily (%s) -> MCU (%s) ->"}
BOARD_FORMAT_STRING = {1: "Board (%s)",
2: "Module (%s) -> Board (%s)"}
def _generate_hierarchy_string(mcus, boards):
global_errors = []
if len(mcus) < 1:
global_errors.append("No MCUS found in heirarchy")
mcus_string = "??? ->"
elif len(mcus) > 3:
global_errors.append("No name for targets %s" % ", ".join(mcus[3:]))
mcus_string = MCU_FORMAT_STRING[3] % tuple(mcus[:3])
for name in mcus[3:]:
mcus_string += " ??? (%s) ->" % name
else:
mcus_string = MCU_FORMAT_STRING[len(mcus)] % tuple(mcus)
if len(boards) < 1:
global_errors.append("no boards found in heirarchy")
boards_string = "???"
elif len(boards) > 2:
global_errors.append("no name for targets %s" % ", ".join(boards[2:]))
boards_string = BOARD_FORMAT_STRING[2] % tuple(boards[:2])
for name in boards[2:]:
boards_string += " -> ??? (%s)" % name
else:
boards_string = BOARD_FORMAT_STRING[len(boards)] % tuple(boards)
return mcus_string + " " + boards_string, global_errors
def check_hierarchy(tgt):
"""Atempts to assign labels to the heirarchy"""
resolution_order = copy(tgt.resolution_order_names[:-1])
mcus, boards = _split_boards(resolution_order, tgt)
target_errors = {}
hierachy_string, hierachy_errors = _generate_hierarchy_string(mcus, boards)
to_ret = {"hierarchy": hierachy_string}
add_if(to_ret, "hierarchy errors", hierachy_errors)
for name in mcus[:-1]:
add_if(target_errors, name, list(check_mcu(tgt.json_data[name])))
if len(mcus) >= 1:
add_if(target_errors, mcus[-1],
list(check_mcu(tgt.json_data[mcus[-1]], True)))
for name in boards:
add_if(target_errors, name, list(check_board(tgt.json_data[name])))
if len(boards) >= 1:
add_if(target_errors, boards[-1],
list(check_board(tgt.json_data[boards[-1]], True)))
add_if(to_ret, "target errors", target_errors)
return to_ret
PARSER = argparse.ArgumentParser(prog="targets/lint.py")
SUBPARSERS = PARSER.add_subparsers(title="Commands")
def subcommand(name, *args, **kwargs):
def __subcommand(command):
kwargs['description'] = command.__doc__
subparser = SUBPARSERS.add_parser(name, **kwargs)
for arg in args:
arg = dict(arg)
opt = arg['name']
del arg['name']
if isinstance(opt, basestring):
subparser.add_argument(opt, **arg)
else:
subparser.add_argument(*opt, **arg)
def _thunk(parsed_args):
argv = [arg['dest'] if 'dest' in arg else arg['name']
for arg in args]
argv = [(arg if isinstance(arg, basestring)
else arg[-1]).strip('-').replace('-', '_')
for arg in argv]
argv = {arg: vars(parsed_args)[arg] for arg in argv
if vars(parsed_args)[arg] is not None}
return command(**argv)
subparser.set_defaults(command=_thunk)
return command
return __subcommand
@subcommand("targets",
dict(name="mcus", nargs="+", metavar="MCU",
choices=TARGET_MAP.keys(), type=str.upper))
def targets_cmd(mcus=[]):
"""Find and print errors about specific targets"""
print dump_all([check_hierarchy(TARGET_MAP[m]) for m in mcus],
default_flow_style=False)
@subcommand("all-targets")
def all_targets_cmd():
"""Print all errors about all parts"""
print dump_all([check_hierarchy(m) for m in TARGET_MAP.values()],
default_flow_style=False)
@subcommand("orphans")
def orphans_cmd():
"""Find and print all orphan targets"""
orphans = Target.get_json_target_data().keys()
for tgt in TARGET_MAP.values():
for name in tgt.resolution_order_names:
if name in orphans:
orphans.remove(name)
if orphans:
print dump_all([orphans], default_flow_style=False)
return len(orphans)
def main():
"""entry point"""
options = PARSER.parse_args()
return options.command(options)
if __name__ == "__main__":
sys.exit(main())
|
apache-2.0
|
samuelhavron/heroku-buildpack-python
|
Python-3.4.3/Lib/lib2to3/main.py
|
30
|
11638
|
"""
Main program for 2to3.
"""
from __future__ import with_statement, print_function
import sys
import os
import difflib
import logging
import shutil
import optparse
from . import refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
A refactoring tool that can avoid overwriting its input files.
Prints output to stdout.
Output files can optionally be written to a different directory and or
have an extra file suffix appended to their name for use in situations
where you do not want to replace the input files.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs,
input_base_dir='', output_dir='', append_suffix=''):
"""
Args:
fixers: A list of fixers to import.
options: A dict with RefactoringTool configuration.
explicit: A list of fixers to run even if they are explicit.
nobackups: If true no backup '.bak' files will be created for those
files that are being refactored.
show_diffs: Should diffs of the refactoring be printed to stdout?
input_base_dir: The base directory for all input files. This class
will strip this path prefix off of filenames before substituting
it with output_dir. Only meaningful if output_dir is supplied.
All files processed by refactor() must start with this path.
output_dir: If supplied, all converted files will be written into
this directory tree instead of input_base_dir.
append_suffix: If supplied, all files output by this tool will have
this appended to their filename. Useful for changing .py to
.py3 for example by passing append_suffix='3'.
"""
self.nobackups = nobackups
self.show_diffs = show_diffs
if input_base_dir and not input_base_dir.endswith(os.sep):
input_base_dir += os.sep
self._input_base_dir = input_base_dir
self._output_dir = output_dir
self._append_suffix = append_suffix
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
orig_filename = filename
if self._output_dir:
if filename.startswith(self._input_base_dir):
filename = os.path.join(self._output_dir,
filename[len(self._input_base_dir):])
else:
raise ValueError('filename %s does not start with the '
'input_base_dir %s' % (
filename, self._input_base_dir))
if self._append_suffix:
filename += self._append_suffix
if orig_filename != filename:
output_dir = os.path.dirname(filename)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self.log_message('Writing converted %s to %s.', orig_filename,
filename)
if not self.nobackups:
# Make backup
backup = filename + ".bak"
if os.path.lexists(backup):
try:
os.remove(backup)
except OSError as err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except OSError as err:
self.log_message("Can't rename %s to %s", filename, backup)
# Actually write the new file
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
if orig_filename != filename:
# Preserve the file mode in the new output directory.
shutil.copymode(orig_filename, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message("No changes to %s", filename)
else:
self.log_message("Refactored %s", filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print(line)
sys.stdout.flush()
else:
for line in diff_lines:
print(line)
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" %
(filename,))
return
def warn(msg):
print("WARNING: %s" % (msg,), file=sys.stderr)
def main(fixer_pkg, args=None):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] is used.
Returns a suggested exit status (0, 1, 2).
"""
# Set up option parser
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
parser.add_option("-d", "--doctests_only", action="store_true",
help="Fix up doctests only")
parser.add_option("-f", "--fix", action="append", default=[],
help="Each FIX specifies a transformation; default: all")
parser.add_option("-j", "--processes", action="store", default=1,
type="int", help="Run 2to3 concurrently")
parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a transformation from being run")
parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() is a function")
parser.add_option("-v", "--verbose", action="store_true",
help="More verbose logging")
parser.add_option("--no-diffs", action="store_true",
help="Don't show diffs of the refactoring")
parser.add_option("-w", "--write", action="store_true",
help="Write back modified files")
parser.add_option("-n", "--nobackups", action="store_true", default=False,
help="Don't write backups for modified files")
parser.add_option("-o", "--output-dir", action="store", type="str",
default="", help="Put output files in this directory "
"instead of overwriting the input files. Requires -n.")
parser.add_option("-W", "--write-unchanged-files", action="store_true",
help="Also write files even if no changes were required"
" (useful with --output-dir); implies -w.")
parser.add_option("--add-suffix", action="store", type="str", default="",
help="Append this string to all output filenames."
" Requires -n if non-empty. "
"ex: --add-suffix='3' will generate .py3 files.")
# Parse command line arguments
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if options.write_unchanged_files:
flags["write_unchanged_files"] = True
if not options.write:
warn("--write-unchanged-files/-W implies -w.")
options.write = True
# If we allowed these, the original files would be renamed to backup names
# but not replaced.
if options.output_dir and not options.nobackups:
parser.error("Can't use --output-dir/-o without -n.")
if options.add_suffix and not options.nobackups:
parser.error("Can't use --add-suffix without -n.")
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.list_fixes:
print("Available transformations for the -f/--fix option:")
for fixname in refactor.get_all_fix_names(fixer_pkg):
print(fixname)
if not args:
return 0
if not args:
print("At least one file or directory argument required.", file=sys.stderr)
print("Use --help to show usage.", file=sys.stderr)
return 2
if "-" in args:
refactor_stdin = True
if options.write:
print("Can't write to stdin.", file=sys.stderr)
return 2
if options.print_function:
flags["print_function"] = True
# Set up logging handler
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
logger = logging.getLogger('lib2to3.main')
# Initialize the refactoring tool
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == "all":
all_present = True
else:
explicit.add(fixer_pkg + ".fix_" + fix)
requested = avail_fixes.union(explicit) if all_present else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
input_base_dir = os.path.commonprefix(args)
if (input_base_dir and not input_base_dir.endswith(os.sep)
and not os.path.isdir(input_base_dir)):
# One or more similar names were passed, their directory is the base.
# os.path.commonprefix() is ignorant of path elements, this corrects
# for that weird API.
input_base_dir = os.path.dirname(input_base_dir)
if options.output_dir:
input_base_dir = input_base_dir.rstrip(os.sep)
logger.info('Output in %r will mirror the input directory %r layout.',
options.output_dir, input_base_dir)
rt = StdoutRefactoringTool(
sorted(fixer_names), flags, sorted(explicit),
options.nobackups, not options.no_diffs,
input_base_dir=input_base_dir,
output_dir=options.output_dir,
append_suffix=options.add_suffix)
# Refactor all files and directories passed as arguments
if not rt.errors:
if refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only,
options.processes)
except refactor.MultiprocessingUnsupported:
assert options.processes > 1
print("Sorry, -j isn't supported on this platform.",
file=sys.stderr)
return 1
rt.summarize()
# Return error status (0 if rt.errors is zero)
return int(bool(rt.errors))
|
mit
|
tensorflow/probability
|
tensorflow_probability/python/experimental/distributions/__init__.py
|
1
|
1702
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow Probability experimental distributions package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability.python.distributions.log_prob_ratio import log_prob_ratio
from tensorflow_probability.python.experimental.distributions import marginal_fns
from tensorflow_probability.python.experimental.distributions.increment_log_prob import IncrementLogProb
from tensorflow_probability.python.experimental.distributions.joint_density_coroutine import JointDensityCoroutine
from tensorflow_probability.python.experimental.distributions.joint_distribution_pinned import JointDistributionPinned
from tensorflow_probability.python.experimental.distributions.mvn_precision_factor_linop import MultivariateNormalPrecisionFactorLinearOperator
__all__ = [
'log_prob_ratio',
'IncrementLogProb',
'JointDensityCoroutine',
'JointDistributionPinned',
'marginal_fns',
'MultivariateNormalPrecisionFactorLinearOperator',
]
|
apache-2.0
|
Distrotech/intellij-community
|
python/lib/Lib/compiler/future.py
|
93
|
1839
|
"""Parser for future statements
"""
from compiler import ast, walk
def is_future(stmt):
"""Return true if statement is a well-formed future statement"""
if not isinstance(stmt, ast.From):
return 0
if stmt.modname == "__future__":
return 1
else:
return 0
class FutureParser:
features = ("nested_scopes", "generators", "division",
"absolute_import", "with_statement")
def __init__(self):
self.found = {} # set
def visitModule(self, node):
stmt = node.node
for s in stmt.nodes:
if not self.check_stmt(s):
break
def check_stmt(self, stmt):
if is_future(stmt):
for name, asname in stmt.names:
if name in self.features:
self.found[name] = 1
else:
raise SyntaxError, \
"future feature %s is not defined" % name
stmt.valid_future = 1
return 1
return 0
def get_features(self):
"""Return list of features enabled by future statements"""
return self.found.keys()
class BadFutureParser:
"""Check for invalid future statements"""
def visitFrom(self, node):
if hasattr(node, 'valid_future'):
return
if node.modname != "__future__":
return
raise SyntaxError, "invalid future statement " + repr(node)
def find_futures(node):
p1 = FutureParser()
p2 = BadFutureParser()
walk(node, p1)
walk(node, p2)
return p1.get_features()
if __name__ == "__main__":
import sys
from compiler import parseFile, walk
for file in sys.argv[1:]:
print file
tree = parseFile(file)
v = FutureParser()
walk(tree, v)
print v.found
print
|
apache-2.0
|
meteorcloudy/tensorflow
|
tensorflow/examples/adding_an_op/zero_out_3_test.py
|
110
|
1805
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for version 3 of the zero_out op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.adding_an_op import zero_out_op_3
class ZeroOut3Test(tf.test.TestCase):
def test(self):
with self.test_session():
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
def testAttr(self):
with self.test_session():
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=3)
self.assertAllEqual(result.eval(), [0, 0, 0, 2, 0])
def testNegative(self):
with self.test_session():
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=-1)
with self.assertRaisesOpError("Need preserve_index >= 0, got -1"):
result.eval()
def testLarge(self):
with self.test_session():
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=17)
with self.assertRaisesOpError("preserve_index out of range"):
result.eval()
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
ModdedPA/android_external_chromium_org
|
media/tools/constrained_network_server/traffic_control.py
|
186
|
12569
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Traffic control library for constraining the network configuration on a port.
The traffic controller sets up a constrained network configuration on a port.
Traffic to the constrained port is forwarded to a specified server port.
"""
import logging
import os
import re
import subprocess
# The maximum bandwidth limit.
_DEFAULT_MAX_BANDWIDTH_KBIT = 1000000
class TrafficControlError(BaseException):
"""Exception raised for errors in traffic control library.
Attributes:
msg: User defined error message.
cmd: Command for which the exception was raised.
returncode: Return code of running the command.
stdout: Output of running the command.
stderr: Error output of running the command.
"""
def __init__(self, msg, cmd=None, returncode=None, output=None,
error=None):
BaseException.__init__(self, msg)
self.msg = msg
self.cmd = cmd
self.returncode = returncode
self.output = output
self.error = error
def CheckRequirements():
"""Checks if permissions are available to run traffic control commands.
Raises:
TrafficControlError: If permissions to run traffic control commands are not
available.
"""
if os.geteuid() != 0:
_Exec(['sudo', '-n', 'tc', '-help'],
msg=('Cannot run \'tc\' command. Traffic Control must be run as root '
'or have password-less sudo access to this command.'))
_Exec(['sudo', '-n', 'iptables', '-help'],
msg=('Cannot run \'iptables\' command. Traffic Control must be run '
'as root or have password-less sudo access to this command.'))
def CreateConstrainedPort(config):
"""Creates a new constrained port.
Imposes packet level constraints such as bandwidth, latency, and packet loss
on a given port using the specified configuration dictionary. Traffic to that
port is forwarded to a specified server port.
Args:
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
server_port: Port to redirect traffic on [port] to (integer 1-65535).
interface: Network interface name (string).
latency: Delay added on each packet sent (integer in ms).
bandwidth: Maximum allowed upload bandwidth (integer in kbit/s).
loss: Percentage of packets to drop (integer 0-100).
Raises:
TrafficControlError: If any operation fails. The message in the exception
describes what failed.
"""
_CheckArgsExist(config, 'interface', 'port', 'server_port')
_AddRootQdisc(config['interface'])
try:
_ConfigureClass('add', config)
_AddSubQdisc(config)
_AddFilter(config['interface'], config['port'])
_AddIptableRule(config['interface'], config['port'], config['server_port'])
except TrafficControlError as e:
logging.debug('Error creating constrained port %d.\nError: %s\n'
'Deleting constrained port.', config['port'], e.error)
DeleteConstrainedPort(config)
raise e
def DeleteConstrainedPort(config):
"""Deletes an existing constrained port.
Deletes constraints set on a given port and the traffic forwarding rule from
the constrained port to a specified server port.
The original constrained network configuration used to create the constrained
port must be passed in.
Args:
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
server_port: Port to redirect traffic on [port] to (integer 1-65535).
interface: Network interface name (string).
bandwidth: Maximum allowed upload bandwidth (integer in kbit/s).
Raises:
TrafficControlError: If any operation fails. The message in the exception
describes what failed.
"""
_CheckArgsExist(config, 'interface', 'port', 'server_port')
try:
# Delete filters first so it frees the class.
_DeleteFilter(config['interface'], config['port'])
finally:
try:
# Deleting the class deletes attached qdisc as well.
_ConfigureClass('del', config)
finally:
_DeleteIptableRule(config['interface'], config['port'],
config['server_port'])
def TearDown(config):
"""Deletes the root qdisc and all iptables rules.
Args:
config: Constraint configuration dictionary, format:
interface: Network interface name (string).
Raises:
TrafficControlError: If any operation fails. The message in the exception
describes what failed.
"""
_CheckArgsExist(config, 'interface')
command = ['sudo', 'tc', 'qdisc', 'del', 'dev', config['interface'], 'root']
try:
_Exec(command, msg='Could not delete root qdisc.')
finally:
_DeleteAllIpTableRules()
def _CheckArgsExist(config, *args):
"""Check that the args exist in config dictionary and are not None.
Args:
config: Any dictionary.
*args: The list of key names to check.
Raises:
TrafficControlError: If any key name does not exist in config or is None.
"""
for key in args:
if key not in config.keys() or config[key] is None:
raise TrafficControlError('Missing "%s" parameter.' % key)
def _AddRootQdisc(interface):
"""Sets up the default root qdisc.
Args:
interface: Network interface name.
Raises:
TrafficControlError: If adding the root qdisc fails for a reason other than
it already exists.
"""
command = ['sudo', 'tc', 'qdisc', 'add', 'dev', interface, 'root', 'handle',
'1:', 'htb']
try:
_Exec(command, msg=('Error creating root qdisc. '
'Make sure you have root access'))
except TrafficControlError as e:
# Ignore the error if root already exists.
if not 'File exists' in e.error:
raise e
def _ConfigureClass(option, config):
"""Adds or deletes a class and qdisc attached to the root.
The class specifies bandwidth, and qdisc specifies delay and packet loss. The
class ID is based on the config port.
Args:
option: Adds or deletes a class option [add|del].
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
interface: Network interface name (string).
bandwidth: Maximum allowed upload bandwidth (integer in kbit/s).
"""
# Use constrained port as class ID so we can attach the qdisc and filter to
# it, as well as delete the class, using only the port number.
class_id = '1:%x' % config['port']
if 'bandwidth' not in config.keys() or not config['bandwidth']:
bandwidth = _DEFAULT_MAX_BANDWIDTH_KBIT
else:
bandwidth = config['bandwidth']
bandwidth = '%dkbit' % bandwidth
command = ['sudo', 'tc', 'class', option, 'dev', config['interface'],
'parent', '1:', 'classid', class_id, 'htb', 'rate', bandwidth,
'ceil', bandwidth]
_Exec(command, msg=('Error configuring class ID %s using "%s" command.' %
(class_id, option)))
def _AddSubQdisc(config):
"""Adds a qdisc attached to the class identified by the config port.
Args:
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
interface: Network interface name (string).
latency: Delay added on each packet sent (integer in ms).
loss: Percentage of packets to drop (integer 0-100).
"""
port_hex = '%x' % config['port']
class_id = '1:%x' % config['port']
command = ['sudo', 'tc', 'qdisc', 'add', 'dev', config['interface'], 'parent',
class_id, 'handle', port_hex + ':0', 'netem']
# Check if packet-loss is set in the configuration.
if 'loss' in config.keys() and config['loss']:
loss = '%d%%' % config['loss']
command.extend(['loss', loss])
# Check if latency is set in the configuration.
if 'latency' in config.keys() and config['latency']:
latency = '%dms' % config['latency']
command.extend(['delay', latency])
_Exec(command, msg='Could not attach qdisc to class ID %s.' % class_id)
def _AddFilter(interface, port):
"""Redirects packets coming to a specified port into the constrained class.
Args:
interface: Interface name to attach the filter to (string).
port: Port number to filter packets with (integer 1-65535).
"""
class_id = '1:%x' % port
command = ['sudo', 'tc', 'filter', 'add', 'dev', interface, 'protocol', 'ip',
'parent', '1:', 'prio', '1', 'u32', 'match', 'ip', 'sport', port,
'0xffff', 'flowid', class_id]
_Exec(command, msg='Error adding filter on port %d.' % port)
def _DeleteFilter(interface, port):
"""Deletes the filter attached to the configured port.
Args:
interface: Interface name the filter is attached to (string).
port: Port number being filtered (integer 1-65535).
"""
handle_id = _GetFilterHandleId(interface, port)
command = ['sudo', 'tc', 'filter', 'del', 'dev', interface, 'protocol', 'ip',
'parent', '1:0', 'handle', handle_id, 'prio', '1', 'u32']
_Exec(command, msg='Error deleting filter on port %d.' % port)
def _GetFilterHandleId(interface, port):
"""Searches for the handle ID of the filter identified by the config port.
Args:
interface: Interface name the filter is attached to (string).
port: Port number being filtered (integer 1-65535).
Returns:
The handle ID.
Raises:
TrafficControlError: If handle ID was not found.
"""
command = ['sudo', 'tc', 'filter', 'list', 'dev', interface, 'parent', '1:']
output = _Exec(command, msg='Error listing filters.')
# Search for the filter handle ID associated with class ID '1:port'.
handle_id_re = re.search(
'([0-9a-fA-F]{3}::[0-9a-fA-F]{3}).*(?=flowid 1:%x\s)' % port, output)
if handle_id_re:
return handle_id_re.group(1)
raise TrafficControlError(('Could not find filter handle ID for class ID '
'1:%x.') % port)
def _AddIptableRule(interface, port, server_port):
"""Forwards traffic from constrained port to a specified server port.
Args:
interface: Interface name to attach the filter to (string).
port: Port of incoming packets (integer 1-65535).
server_port: Server port to forward the packets to (integer 1-65535).
"""
# Preroute rules for accessing the port through external connections.
command = ['sudo', 'iptables', '-t', 'nat', '-A', 'PREROUTING', '-i',
interface, '-p', 'tcp', '--dport', port, '-j', 'REDIRECT',
'--to-port', server_port]
_Exec(command, msg='Error adding iptables rule for port %d.' % port)
# Output rules for accessing the rule through localhost or 127.0.0.1
command = ['sudo', 'iptables', '-t', 'nat', '-A', 'OUTPUT', '-p', 'tcp',
'--dport', port, '-j', 'REDIRECT', '--to-port', server_port]
_Exec(command, msg='Error adding iptables rule for port %d.' % port)
def _DeleteIptableRule(interface, port, server_port):
"""Deletes the iptable rule associated with specified port number.
Args:
interface: Interface name to attach the filter to (string).
port: Port of incoming packets (integer 1-65535).
server_port: Server port packets are forwarded to (integer 1-65535).
"""
command = ['sudo', 'iptables', '-t', 'nat', '-D', 'PREROUTING', '-i',
interface, '-p', 'tcp', '--dport', port, '-j', 'REDIRECT',
'--to-port', server_port]
_Exec(command, msg='Error deleting iptables rule for port %d.' % port)
command = ['sudo', 'iptables', '-t', 'nat', '-D', 'OUTPUT', '-p', 'tcp',
'--dport', port, '-j', 'REDIRECT', '--to-port', server_port]
_Exec(command, msg='Error adding iptables rule for port %d.' % port)
def _DeleteAllIpTableRules():
"""Deletes all iptables rules."""
command = ['sudo', 'iptables', '-t', 'nat', '-F']
_Exec(command, msg='Error deleting all iptables rules.')
def _Exec(command, msg=None):
"""Executes a command.
Args:
command: Command list to execute.
msg: Message describing the error in case the command fails.
Returns:
The standard output from running the command.
Raises:
TrafficControlError: If command fails. Message is set by the msg parameter.
"""
cmd_list = [str(x) for x in command]
cmd = ' '.join(cmd_list)
logging.debug('Running command: %s', cmd)
p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise TrafficControlError(msg, cmd, p.returncode, output, error)
return output.strip()
|
bsd-3-clause
|
ezietsman/msc-thesis
|
images/august_phot/S7651/trailed_phot_FT.py
|
1
|
1842
|
# script to calculate trailed FT of the lightcurve
import astronomy as ast
import pylab as pl
import scipy.stats as sci
X = pl.load('S7651_FF_norm.dat')
x = X[:,0]
y = X[:,1]
z = X[:,2]
N = len(x)
fitlength = 100
#x -= int(x[0])
# ephemeris
T0 = 2453964.3307097
P = 0.1545255
#x = (x - T0) / P
ft = []
date = []
peaks = []
f0 = 3000
f1 = 4000
for i in range(0,N,int(fitlength/2.0)):
if i + fitlength/2.0 <= len(x):
print 'somewhere'
date.append(pl.average(x[i:i + fitlength]))
f,a = ast.signal.dft(x[i:i+fitlength],y[i:i+fitlength],f0,f1,1)
ft.append(a)
#sort,argsort = sci.fastsort(a)
#peaks.append(f[argsort[-1]])
# / len(x[i:i+fitlength]))
print i, i+fitlength
else:
print 'finally'
#x = fitwave(y[i:len(t)+1],t[i:len(t)+1],freq)
f,a = ast.signal.dft(x[i:len(x)+1],y[i:len(x)+1],f0,f1,1)
ft.append(a)
#sort,argsort = sci.fastsort(a)
#peaks.append(f[argsort[-1]])
date.append(pl.average(x[i:-1]))# / len(x[i:-1]))
print i
print '\n\n\n\n',N
print pl.shape(ft)
pl.figure(figsize=(6,4))
## calculate phase
x = (x - T0) / P
date = (pl.array(date) - T0) / P
#lt = date < 1.2
#gt = date > 0.8
#date = date[gt*lt]
levels=pl.arange(0.0,0.004 ,0.0001)
im = pl.contourf(pl.array(ft).transpose(),levels=levels,extent=(date[0],date[-1],f0,f1),cmap=pl.cm.jet)
pl.colorbar(im,orientation='horizontal',shrink=1.0,ticks=list(pl.arange(min(levels),max(levels),0.00075)))
#pl.xlabel('HJD (+2453965)')
pl.xlabel('Orbital Phase')
pl.ylabel('Frequency (cycles/day)')
yt = pl.yticks()
pl.yticks(yt[0][1:-1])
#pl.xlim(0.8,1.2)
pl.subplots_adjust(bottom=0.34)
pl.show()
|
mit
|
etherkit/OpenBeacon2
|
macos/venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
|
26
|
51059
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME,
LEGACY_METADATA_FILENAME)
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
possible_filenames = [METADATA_FILENAME,
WHEEL_METADATA_FILENAME,
LEGACY_METADATA_FILENAME]
for metadata_filename in possible_filenames:
metadata_path = posixpath.join(entry, metadata_filename)
pydist = finder.find(metadata_path)
if pydist:
break
else:
continue
with contextlib.closing(pydist.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if version is not None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
# We hit a problem on Travis where enum34 was installed and doesn't
# have a provides attribute ...
if not hasattr(dist, 'provides'):
logger.debug('No "provides": %s', dist)
else:
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
md = self.metadata
logger.debug('Getting requirements from metadata %r', md.todict())
reqts = getattr(md, req_attr)
return set(md.get_requirements(reqts, extras=self.extras,
env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.modules = []
self.finder = finder = resources.finder_for_path(path)
if finder is None:
raise ValueError('finder unavailable for %s' % path)
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for Wheel 0.23 support
if r is None:
r = finder.find(WHEEL_METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find(LEGACY_METADATA_FILENAME)
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
r = finder.find('REQUESTED')
self.requested = r is not None
p = os.path.join(path, 'top_level.txt')
if os.path.exists(p):
with open(p, 'rb') as f:
data = f.read().decode('utf-8')
self.modules = data.splitlines()
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata.name, metadata.version)
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
tl_path = tl_data = None
if path.endswith('.egg'):
if os.path.isdir(path):
p = os.path.join(path, 'EGG-INFO')
meta_path = os.path.join(p, 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(p, 'requires.txt')
tl_path = os.path.join(p, 'top_level.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
tl_path = os.path.join(path, 'top_level.txt')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
# look for top-level modules in top_level.txt, if present
if tl_data is None:
if tl_path is not None and os.path.exists(tl_path):
with open(tl_path, 'rb') as f:
tl_data = f.read().decode('utf-8')
if not tl_data:
tl_data = []
else:
tl_data = tl_data.splitlines()
self.modules = tl_data
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Placeholder for summary'
return Distribution(md)
|
gpl-3.0
|
citrix-openstack-build/ironic
|
ironic/openstack/common/rpc/dispatcher.py
|
1
|
7038
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Code for rpc message dispatching.
Messages that come in have a version number associated with them. RPC API
version numbers are in the form:
Major.Minor
For a given message with version X.Y, the receiver must be marked as able to
handle messages of version A.B, where:
A = X
B >= Y
The Major version number would be incremented for an almost completely new API.
The Minor version number would be incremented for backwards compatible changes
to an existing API. A backwards compatible change could be something like
adding a new method, adding an argument to an existing method (but not
requiring it), or changing the type for an existing argument (but still
handling the old type as well).
The conversion over to a versioned API must be done on both the client side and
server side of the API at the same time. However, as the code stands today,
there can be both versioned and unversioned APIs implemented in the same code
base.
EXAMPLES
========
Nova was the first project to use versioned rpc APIs. Consider the compute rpc
API as an example. The client side is in nova/compute/rpcapi.py and the server
side is in nova/compute/manager.py.
Example 1) Adding a new method.
-------------------------------
Adding a new method is a backwards compatible change. It should be added to
nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should
have a specific version specified to indicate the minimum API version that must
be implemented for the method to be supported. For example::
def get_host_uptime(self, ctxt, host):
topic = _compute_topic(self.topic, ctxt, host, None)
return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
version='1.1')
In this case, version '1.1' is the first version that supported the
get_host_uptime() method.
Example 2) Adding a new parameter.
----------------------------------
Adding a new parameter to an rpc method can be made backwards compatible. The
RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
The implementation of the method must not expect the parameter to be present.::
def some_remote_method(self, arg1, arg2, newarg=None):
# The code needs to deal with newarg=None for cases
# where an older client sends a message without it.
pass
On the client side, the same changes should be made as in example 1. The
minimum version that supports the new parameter should be specified.
"""
from ironic.openstack.common.rpc import common as rpc_common
from ironic.openstack.common.rpc import serializer as rpc_serializer
class RpcDispatcher(object):
"""Dispatch rpc messages according to the requested API version.
This class can be used as the top level 'manager' for a service. It
contains a list of underlying managers that have an API_VERSION attribute.
"""
def __init__(self, callbacks, serializer=None):
"""Initialize the rpc dispatcher.
:param callbacks: List of proxy objects that are an instance
of a class with rpc methods exposed. Each proxy
object should have an RPC_API_VERSION attribute.
:param serializer: The Serializer object that will be used to
deserialize arguments before the method call and
to serialize the result after it returns.
"""
self.callbacks = callbacks
if serializer is None:
serializer = rpc_serializer.NoOpSerializer()
self.serializer = serializer
super(RpcDispatcher, self).__init__()
def _deserialize_args(self, context, kwargs):
"""Helper method called to deserialize args before dispatch.
This calls our serializer on each argument, returning a new set of
args that have been deserialized.
:param context: The request context
:param kwargs: The arguments to be deserialized
:returns: A new set of deserialized args
"""
new_kwargs = dict()
for argname, arg in kwargs.iteritems():
new_kwargs[argname] = self.serializer.deserialize_entity(context,
arg)
return new_kwargs
def dispatch(self, ctxt, version, method, namespace, **kwargs):
"""Dispatch a message based on a requested version.
:param ctxt: The request context
:param version: The requested API version from the incoming message
:param method: The method requested to be called by the incoming
message.
:param namespace: The namespace for the requested method. If None,
the dispatcher will look for a method on a callback
object with no namespace set.
:param kwargs: A dict of keyword arguments to be passed to the method.
:returns: Whatever is returned by the underlying method that gets
called.
"""
if not version:
version = '1.0'
had_compatible = False
for proxyobj in self.callbacks:
# Check for namespace compatibility
try:
cb_namespace = proxyobj.RPC_API_NAMESPACE
except AttributeError:
cb_namespace = None
if namespace != cb_namespace:
continue
# Check for version compatibility
try:
rpc_api_version = proxyobj.RPC_API_VERSION
except AttributeError:
rpc_api_version = '1.0'
is_compatible = rpc_common.version_is_compatible(rpc_api_version,
version)
had_compatible = had_compatible or is_compatible
if not hasattr(proxyobj, method):
continue
if is_compatible:
kwargs = self._deserialize_args(ctxt, kwargs)
result = getattr(proxyobj, method)(ctxt, **kwargs)
return self.serializer.serialize_entity(ctxt, result)
if had_compatible:
raise AttributeError("No such RPC function '%s'" % method)
else:
raise rpc_common.UnsupportedRpcVersion(version=version)
|
apache-2.0
|
bitcraft/tailor
|
apps/service/session.py
|
1
|
7880
|
# -*- coding: utf-8 -*-
"""
countdown between images
submits tasks to be processed by mp queue
"""
import asyncio
import logging
import os
import re
import sys
import threading
import traceback
from io import BytesIO
from os.path import join
import pygame
import requests
from PIL import Image
from apps.service.async_helpers import timing_generator
from apps.service.worker import WorkerPool
from tailor.config import pkConfig
from tailor.plugins.composer import TemplateRenderer
logger = logging.getLogger("tailor.service")
# reduce lookups in to the PIL package namespace
pil_open = Image.open
# make sure pygame sound lib is working
pygame.init()
pygame.mixer.init()
def load_sound(filename):
path = join(pkConfig["paths"]["app_sounds"], filename)
return pygame.mixer.Sound(path)
class Session:
def __init__(self):
# the following attributes are used by the service_app,
# which will read them and send that data to the kiosk.
self.countdown_value_changed = threading.Event()
self.countdown_value = 0
self.finished = False
self.started = False
self.idle = False
self.sounds = dict()
for name, fn in pkConfig["sounds"].items():
self.sounds[name] = load_sound(fn)
async def countdown(self, duration):
""" countdown from whole seconds
"""
duration = int(duration)
for i in range(duration):
self.countdown_value = duration - i
if self.countdown_value < 4:
self.sounds["countdown-tick"].play()
await asyncio.sleep(1)
self.countdown_value = 0
async def render_template(self, root):
"""
:param root:
:return: Image
"""
# render the composite image (async)
renderer = TemplateRenderer()
composite = await renderer.render_all(root)
return composite
@staticmethod
def format_number(value):
"""
:type value: int
:return: str
"""
return "{:05d}".format(value)
def guess_image_extension(self, ext=None):
""" Get best guess file extension for the image
:param ext:
:return:
"""
if ext is None:
return pkConfig["compositor"]["filetype"]
# TODO: something better!
return "jpg"
def name_image(self, prefix, session, capture, ext=None):
""" Generate name for individual images
:param prefix:
:param session:
:param capture:
:return:
"""
ext = self.guess_image_extension(ext)
return "{}-{}-{}.{}".format(
prefix, self.format_number(session), self.format_number(capture), ext
)
def name_composite(self, prefix, session, ext=None):
""" Generate name for composite images
:param prefix:
:param session:
:return:
"""
ext = self.guess_image_extension(ext)
return "{}-{}.{}".format(prefix, self.format_number(session), ext)
def capture_path(self, session_id, capture_id, ext=None):
paths = pkConfig["paths"]
return join(
paths["event_originals"],
"original",
self.name_image("original", session_id, capture_id, ext),
)
@staticmethod
def determine_initial_capture_id():
""" ...
:return:
"""
# here be dragons
regex = re.compile("^(.*?)-(\d+)$")
try:
with open(pkConfig["paths"]["event_log"]) as fp:
for line in fp:
pass
root, ext = os.path.splitext(line)
match = regex.match(root)
if match:
root, i = match.groups()
return int(i) + 1
else:
return 0
except IOError:
return 0
@staticmethod
def mark_session_complete(filename):
def mark():
with open(pkConfig["paths"]["event_log"], "a") as fp:
fp.write(filename + "\n")
# TODO: make sure can fail eventually
done = False
while not done:
try:
mark()
done = True
except IOError:
pass
@staticmethod
def convert_raw_to_pil(raw):
return pil_open(BytesIO(raw))
def play_capture_sound(self, final=False):
if final:
self.sounds["finish-session"].play()
else:
self.sounds["finish-capture"].play()
def get_timer(self, needed_captures):
""" Get generator used to wait between captures
:param needed_captures: number of images needed to capture
:rtype: generator
"""
countdown_time = pkConfig["session"]["countdown-time"]
extra_wait_time = pkConfig["session"]["extra-wait-time"]
return timing_generator(
countdown_time, needed_captures, countdown_time + extra_wait_time
)
async def start(self, camera, template_root):
""" new session
Take 4 photos
Each photo has 3 attempts to take a photo
If we get 4 photos, or 3 failed attempts, then exit
:param template_root: template graph
:param camera: camera object
"""
logger.debug("starting new session")
pool = WorkerPool()
pool.start_workers()
max_failures = pkConfig["session"]["retries"]
self.started = True
self.finished = False
needed_captures = template_root.needed_captures()
session_id = self.determine_initial_capture_id()
for capture_id, timer in enumerate(self.get_timer(needed_captures)):
final, wait_time = timer
await self.countdown(wait_time)
for attempt in range(max_failures):
try:
raw_image = await camera.download_capture()
break
except:
self.sounds["error"].play()
traceback.print_exc(file=sys.stdout)
logger.debug("failed capture %s/3", attempt)
else:
raise RuntimeError
self.idle = True # indicate that picture is taken, getting ready for next
self.play_capture_sound(final)
# the template renderer expects to use pillow images
# add images to the template for rendering
image = self.convert_raw_to_pil(raw_image)
template_root.push_image(image)
path = self.capture_path(session_id, capture_id, "jpg")
pool.queue_data_save(raw_image, path)
self.idle = False # indicate that the camera is not busy
self.finished = final # indicate that the session has all required photos
paths = pkConfig["paths"]
composites_folder = paths["event_composites"]
composite_filename = self.name_composite("composite", session_id)
composite_path = join(composites_folder, "original", composite_filename)
composite_small_path = join(composites_folder, "small", composite_filename)
print_path = join(paths["event_prints"], composite_filename)
composite = await self.render_template(template_root)
pool.queue_image_save(composite, composite_path)
pool.queue_image_thumbnail(composite, composite_small_path)
pool.queue_image_pad_double(composite, print_path)
pool.wait_for_workers() # blocking!
# print the double
# TODO: not use the http service?
url = "http://localhost:5000/print/" + composite_filename
requests.get(url)
self.mark_session_complete(composite_filename)
return template_root
|
gpl-3.0
|
danieljaouen/ansible
|
test/units/modules/network/f5/test_bigiq_application_http.py
|
21
|
5443
|
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigiq_application_http import ApiParameters
from library.modules.bigiq_application_http import ModuleParameters
from library.modules.bigiq_application_http import ModuleManager
from library.modules.bigiq_application_http import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigiq_application_http import ApiParameters
from ansible.modules.network.f5.bigiq_application_http import ModuleParameters
from ansible.modules.network.f5.bigiq_application_http import ModuleManager
from ansible.modules.network.f5.bigiq_application_http import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
description='my description',
service_environment='bar',
servers=[
dict(
address='1.2.3.4',
port=8080
),
dict(
address='5.6.7.8',
port=8000
)
],
inbound_virtual=dict(
address='2.2.2.2',
netmask='255.255.255.255',
port=80
)
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.config_set_name == 'foo'
assert p.sub_path == 'foo'
assert p.http_profile == 'profile_http'
assert p.service_environment == 'bar'
assert len(p.servers) == 2
assert 'address' in p.servers[0]
assert 'port' in p.servers[0]
assert 'address' in p.servers[1]
assert 'port' in p.servers[1]
assert p.servers[0]['address'] == '1.2.3.4'
assert p.servers[0]['port'] == 8080
assert p.servers[1]['address'] == '5.6.7.8'
assert p.servers[1]['port'] == 8000
assert 'address' in p.inbound_virtual
assert 'netmask' in p.inbound_virtual
assert 'port' in p.inbound_virtual
assert p.inbound_virtual['address'] == '2.2.2.2'
assert p.inbound_virtual['netmask'] == '255.255.255.255'
assert p.inbound_virtual['port'] == 80
@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root',
return_value=True)
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_create(self, *args):
set_module_args(dict(
name='foo',
description='my description',
service_environment='bar',
servers=[
dict(
address='1.2.3.4',
port=8080
),
dict(
address='5.6.7.8',
port=8000
)
],
inbound_virtual=dict(
address='2.2.2.2',
netmask='255.255.255.255',
port=80
),
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.has_no_service_environment = Mock(return_value=False)
mm.wait_for_apply_template_task = Mock(return_value=True)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(side_effect=[False, True])
results = mm.exec_module()
assert results['changed'] is True
assert results['description'] == 'my description'
|
gpl-3.0
|
abusesa/abusehelper
|
abusehelper/core/roomgraph.py
|
1
|
9190
|
from __future__ import absolute_import
import os
import sys
import errno
import struct
import cPickle
import idiokit
import subprocess
import contextlib
import socket as native_socket
from idiokit import socket, select
from . import events, rules, taskfarm, bot
class _ConnectionLost(Exception):
pass
@contextlib.contextmanager
def wrapped_socket_errnos(*errnos):
try:
yield
except (native_socket.error, socket.SocketError) as error:
socket_errno = error.args[0]
if socket_errno in errnos:
raise _ConnectionLost(os.strerror(socket_errno))
raise
def _recvall_blocking(conn, amount):
data = []
while amount > 0:
with wrapped_socket_errnos(errno.ECONNRESET):
piece = conn.recv(amount)
if not piece:
raise _ConnectionLost("could not recv() all bytes")
data.append(piece)
amount -= len(piece)
return "".join(data)
def send_encoded(conn, obj):
msg_bytes = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL)
data = struct.pack("!I", len(msg_bytes)) + msg_bytes
with wrapped_socket_errnos(errno.ECONNRESET, errno.EPIPE):
conn.sendall(data)
def recv_decoded(sock):
length_bytes = _recvall_blocking(sock, 4)
length, = struct.unpack("!I", length_bytes)
msg_bytes = _recvall_blocking(sock, length)
return cPickle.loads(msg_bytes)
@idiokit.stream
def _recvall_stream(sock, amount, timeout=None):
data = []
while amount > 0:
with wrapped_socket_errnos(errno.ECONNRESET):
piece = yield sock.recv(amount, timeout=timeout)
if not piece:
raise _ConnectionLost("could not recv() all bytes")
data.append(piece)
amount -= len(piece)
idiokit.stop("".join(data))
@idiokit.stream
def distribute_encode(socks):
writable = []
while True:
to_all, msg = yield idiokit.next()
msg_bytes = cPickle.dumps(msg, cPickle.HIGHEST_PROTOCOL)
data = struct.pack("!I", len(msg_bytes)) + msg_bytes
if to_all:
for sock in socks:
yield sock.sendall(data)
writable = []
else:
while not writable:
_, writable, _ = yield select.select((), socks, ())
writable = list(writable)
yield writable.pop().sendall(data)
@idiokit.stream
def collect_decode(socks):
readable = []
while True:
while not readable:
readable, _, _ = yield select.select(socks, (), ())
readable = list(readable)
sock = readable.pop()
length_bytes = yield _recvall_stream(sock, 4)
length, = struct.unpack("!I", length_bytes)
msg_bytes = yield _recvall_stream(sock, length)
yield idiokit.send(cPickle.loads(msg_bytes))
class RoomGraphBot(bot.ServiceBot):
concurrency = bot.IntParam("""
the number of worker processes used for rule matching
(default: %default)
""", default=1)
def __init__(self, *args, **keys):
bot.ServiceBot.__init__(self, *args, **keys)
self._rooms = taskfarm.TaskFarm(self._handle_room, grace_period=0.0)
self._srcs = {}
self._ready = idiokit.Event()
self._stats = {}
def _inc_stats(self, room, seen=0, sent=0):
seen_count, sent_count = self._stats.get(room, (0, 0))
self._stats[room] = seen_count + seen, sent_count + sent
@idiokit.stream
def _log_stats(self, interval=15.0):
while True:
yield idiokit.sleep(interval)
for room, (seen, sent) in self._stats.iteritems():
self.log.info(
u"Room {0}: seen {1}, sent {2} events".format(room, seen, sent),
event=events.Event({
"type": "room",
"service": self.bot_name,
"seen events": unicode(seen),
"sent events": unicode(sent),
"room": unicode(room)
})
)
self._stats.clear()
@idiokit.stream
def _distribute(self):
while True:
src, event, dsts = yield idiokit.next()
count = 0
for dst in dsts:
dst_room = self._rooms.get(dst)
if dst_room is not None:
count += 1
yield dst_room.send(event.to_elements())
if count > 0:
self._inc_stats(src, sent=1)
@idiokit.stream
def _handle_room(self, room_name):
room = yield self.xmpp.muc.join(room_name, self.bot_name)
distributor = yield self._ready.fork()
yield idiokit.pipe(
room,
idiokit.map(self._map, room_name),
distributor.fork(),
idiokit.Event()
)
def _map(self, elements, room_name):
if room_name not in self._srcs:
return
for event in events.Event.from_elements(elements):
self._inc_stats(room_name, seen=1)
yield False, ("event", (room_name, event))
@idiokit.stream
def session(self, _, src_room, dst_room, rule=None, **keys):
rule = rules.Anything() if rule is None else rules.rule(rule)
src_room = yield self.xmpp.muc.get_full_room_jid(src_room)
dst_room = yield self.xmpp.muc.get_full_room_jid(dst_room)
distributor = yield self._ready.fork()
yield distributor.send(True, ("inc_rule", (src_room, rule, dst_room)))
try:
self._srcs[src_room] = self._srcs.get(src_room, 0) + 1
try:
yield self._rooms.inc(src_room) | self._rooms.inc(dst_room)
finally:
self._srcs[src_room] = self._srcs[src_room] - 1
if self._srcs[src_room] <= 0:
del self._srcs[src_room]
finally:
distributor.send(True, ("dec_rule", (src_room, rule, dst_room)))
def _start_worker(self):
env = dict(os.environ)
env["ABUSEHELPER_SUBPROCESS"] = ""
# Find out the full package & module name. Don't refer to the
# variable __loader__ directly to keep flake8 (version 2.5.0)
# linter happy.
fullname = globals()["__loader__"].fullname
own_conn, other_conn = native_socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
try:
process = subprocess.Popen(
[sys.executable, "-m", fullname],
preexec_fn=os.setpgrp,
stdin=other_conn.fileno(),
close_fds=True,
env=env
)
try:
conn = socket.fromfd(own_conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM)
except:
process.terminate()
process.wait()
raise
finally:
own_conn.close()
other_conn.close()
return process, conn
@idiokit.stream
def main(self, _):
processes = []
connections = []
try:
for _ in xrange(self.concurrency):
process, connection = self._start_worker()
processes.append(process)
connections.append(connection)
if self.concurrency == 1:
self.log.info(u"Started 1 worker process")
else:
self.log.info(u"Started {0} worker processes".format(self.concurrency))
self._ready.succeed(distribute_encode(connections))
yield collect_decode(connections) | self._distribute() | self._log_stats()
finally:
for connection in connections:
yield connection.close()
for process in processes:
process.terminate()
for process in processes:
process.wait()
def roomgraph(conn):
srcs = {}
while True:
type_id, args = recv_decoded(conn)
if type_id == "event":
src, event = args
if src in srcs:
dsts = set(srcs[src].classify(event))
if dsts:
send_encoded(conn, (src, event, dsts))
elif type_id == "inc_rule":
src, rule, dst = args
if src not in srcs:
srcs[src] = rules.Classifier()
srcs[src].inc(rule, dst)
elif type_id == "dec_rule":
src, rule, dst = args
if src in srcs:
srcs[src].dec(rule, dst)
if srcs[src].is_empty():
del srcs[src]
else:
raise RuntimeError("unknown type id {0!r}".format(type_id))
if __name__ == "__main__":
if "ABUSEHELPER_SUBPROCESS" in os.environ:
conn = native_socket.fromfd(0, native_socket.AF_UNIX, native_socket.SOCK_STREAM)
try:
rfd, wfd = os.pipe()
os.dup2(rfd, 0)
os.close(rfd)
os.close(wfd)
conn.setblocking(True)
roomgraph(conn)
except _ConnectionLost:
pass
finally:
conn.close()
else:
RoomGraphBot.from_command_line().execute()
|
mit
|
wfxiang08/django185
|
django/contrib/gis/geos/prototypes/io.py
|
103
|
9413
|
import threading
from ctypes import POINTER, Structure, byref, c_char, c_char_p, c_int, c_size_t
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_sized_string, check_string,
)
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
from django.utils import six
from django.utils.encoding import force_bytes
# ### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure):
pass
class WKTWriter_st(Structure):
pass
class WKBReader_st(Structure):
pass
class WKBWriter_st(Structure):
pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
# WKTReader routines
wkt_reader_create = GEOSFunc('GEOSWKTReader_create')
wkt_reader_create.restype = WKT_READ_PTR
wkt_reader_destroy = GEOSFunc('GEOSWKTReader_destroy')
wkt_reader_destroy.argtypes = [WKT_READ_PTR]
wkt_reader_read = GEOSFunc('GEOSWKTReader_read')
wkt_reader_read.argtypes = [WKT_READ_PTR, c_char_p]
wkt_reader_read.restype = GEOM_PTR
wkt_reader_read.errcheck = check_geom
# WKTWriter routines
wkt_writer_create = GEOSFunc('GEOSWKTWriter_create')
wkt_writer_create.restype = WKT_WRITE_PTR
wkt_writer_destroy = GEOSFunc('GEOSWKTWriter_destroy')
wkt_writer_destroy.argtypes = [WKT_WRITE_PTR]
wkt_writer_write = GEOSFunc('GEOSWKTWriter_write')
wkt_writer_write.argtypes = [WKT_WRITE_PTR, GEOM_PTR]
wkt_writer_write.restype = geos_char_p
wkt_writer_write.errcheck = check_string
try:
wkt_writer_get_outdim = GEOSFunc('GEOSWKTWriter_getOutputDimension')
wkt_writer_get_outdim.argtypes = [WKT_WRITE_PTR]
wkt_writer_get_outdim.restype = c_int
wkt_writer_set_outdim = GEOSFunc('GEOSWKTWriter_setOutputDimension')
wkt_writer_set_outdim.argtypes = [WKT_WRITE_PTR, c_int]
except AttributeError:
# GEOSWKTWriter_get/setOutputDimension has been introduced in GEOS 3.3.0
# Always return 2 if not available
wkt_writer_get_outdim = lambda ptr: 2
wkt_writer_set_outdim = lambda ptr, dim: None
# WKBReader routines
wkb_reader_create = GEOSFunc('GEOSWKBReader_create')
wkb_reader_create.restype = WKB_READ_PTR
wkb_reader_destroy = GEOSFunc('GEOSWKBReader_destroy')
wkb_reader_destroy.argtypes = [WKB_READ_PTR]
def wkb_read_func(func):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
func.argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
wkb_reader_read = wkb_read_func(GEOSFunc('GEOSWKBReader_read'))
wkb_reader_read_hex = wkb_read_func(GEOSFunc('GEOSWKBReader_readHEX'))
# WKBWriter routines
wkb_writer_create = GEOSFunc('GEOSWKBWriter_create')
wkb_writer_create.restype = WKB_WRITE_PTR
wkb_writer_destroy = GEOSFunc('GEOSWKBWriter_destroy')
wkb_writer_destroy.argtypes = [WKB_WRITE_PTR]
# WKB Writing prototypes.
def wkb_write_func(func):
func.argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
func.restype = c_uchar_p
func.errcheck = check_sized_string
return func
wkb_writer_write = wkb_write_func(GEOSFunc('GEOSWKBWriter_write'))
wkb_writer_write_hex = wkb_write_func(GEOSFunc('GEOSWKBWriter_writeHEX'))
# WKBWriter property getter/setter prototypes.
def wkb_writer_get(func, restype=c_int):
func.argtypes = [WKB_WRITE_PTR]
func.restype = restype
return func
def wkb_writer_set(func, argtype=c_int):
func.argtypes = [WKB_WRITE_PTR, argtype]
return func
wkb_writer_get_byteorder = wkb_writer_get(GEOSFunc('GEOSWKBWriter_getByteOrder'))
wkb_writer_set_byteorder = wkb_writer_set(GEOSFunc('GEOSWKBWriter_setByteOrder'))
wkb_writer_get_outdim = wkb_writer_get(GEOSFunc('GEOSWKBWriter_getOutputDimension'))
wkb_writer_set_outdim = wkb_writer_set(GEOSFunc('GEOSWKBWriter_setOutputDimension'))
wkb_writer_get_include_srid = wkb_writer_get(GEOSFunc('GEOSWKBWriter_getIncludeSRID'), restype=c_char)
wkb_writer_set_include_srid = wkb_writer_set(GEOSFunc('GEOSWKBWriter_setIncludeSRID'), argtype=c_char)
# ### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
def __del__(self):
# Cleaning up with the appropriate destructor.
if self._ptr:
self._destructor(self._ptr)
# ### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
_destructor = wkt_reader_destroy
ptr_type = WKT_READ_PTR
def read(self, wkt):
if not isinstance(wkt, (bytes, six.string_types)):
raise TypeError
return wkt_reader_read(self.ptr, force_bytes(wkt))
class _WKBReader(IOBase):
_constructor = wkb_reader_create
_destructor = wkb_reader_destroy
ptr_type = WKB_READ_PTR
def read(self, wkb):
"Returns a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, six.memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, six.string_types)):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
# ### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
_destructor = wkt_writer_destroy
ptr_type = WKT_WRITE_PTR
def write(self, geom):
"Returns the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
@property
def outdim(self):
return wkt_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKT output dimension must be 2 or 3')
wkt_writer_set_outdim(self.ptr, new_dim)
class WKBWriter(IOBase):
_constructor = wkb_writer_create
_destructor = wkb_writer_destroy
ptr_type = WKB_WRITE_PTR
def write(self, geom):
"Returns the WKB representation of the given geometry."
return six.memoryview(wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t())))
def write_hex(self, geom):
"Returns the HEXEWKB representation of the given geometry."
return wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
# ### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
def _get_outdim(self):
return wkb_writer_get_outdim(self.ptr)
def _set_outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
outdim = property(_get_outdim, _set_outdim)
# Property for getting/setting the include srid flag.
def _get_include_srid(self):
return bool(ord(wkb_writer_get_include_srid(self.ptr)))
def _set_include_srid(self, include):
if include:
flag = b'\x01'
else:
flag = b'\x00'
wkb_writer_set_include_srid(self.ptr, flag)
srid = property(_get_include_srid, _set_include_srid)
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
if not thread_context.wkt_r:
thread_context.wkt_r = _WKTReader()
return thread_context.wkt_r
def wkt_w(dim=2):
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter()
thread_context.wkt_w.outdim = dim
return thread_context.wkt_w
def wkb_r():
if not thread_context.wkb_r:
thread_context.wkb_r = _WKBReader()
return thread_context.wkb_r
def wkb_w(dim=2):
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter()
thread_context.wkb_w.outdim = dim
return thread_context.wkb_w
def ewkb_w(dim=2):
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter()
thread_context.ewkb_w.srid = True
thread_context.ewkb_w.outdim = dim
return thread_context.ewkb_w
|
bsd-3-clause
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/ThirdParty/Twisted/twisted/trial/test/test_testcase.py
|
59
|
2043
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Direct unit tests for L{twisted.trial.unittest.SynchronousTestCase} and
L{twisted.trial.unittest.TestCase}.
"""
from __future__ import division, absolute_import
from twisted.trial.unittest import SynchronousTestCase, TestCase
class TestCaseMixin(object):
"""
L{TestCase} tests.
"""
def setUp(self):
"""
Create a couple instances of C{MyTestCase}, each for the same test
method, to be used in the test methods of this class.
"""
self.first = self.MyTestCase('test_1')
self.second = self.MyTestCase('test_1')
def test_equality(self):
"""
In order for one test method to be runnable twice, two TestCase
instances with the same test method name must not compare as equal.
"""
self.assertTrue(self.first == self.first)
self.assertTrue(self.first != self.second)
self.assertFalse(self.first == self.second)
def test_hashability(self):
"""
In order for one test method to be runnable twice, two TestCase
instances with the same test method name should not have the same
hash value.
"""
container = {}
container[self.first] = None
container[self.second] = None
self.assertEqual(len(container), 2)
class SynchronousTestCaseTests(TestCaseMixin, SynchronousTestCase):
class MyTestCase(SynchronousTestCase):
"""
Some test methods which can be used to test behaviors of
L{SynchronousTestCase}.
"""
def test_1(self):
pass
# Yes, subclass SynchronousTestCase again. There are no interesting behaviors
# of self being tested below, only of self.MyTestCase.
class AsynchronousTestCaseTests(TestCaseMixin, SynchronousTestCase):
class MyTestCase(TestCase):
"""
Some test methods which can be used to test behaviors of
L{TestCase}.
"""
def test_1(self):
pass
|
gpl-3.0
|
defzzd/UserDataBase-Heroku
|
venv/Lib/site-packages/pip/_vendor/distlib/locators.py
|
191
|
46946
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError:
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'http://python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
return client.list_packages()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None:
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None:
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
return (t.scheme != 'https', 'pypi.python.org' in t.netloc,
posixpath.basename(t.path))
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implement favours http:// URLs over https://, archives
from PyPI over those from other locations and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
name1, name2 = name1.lower(), name2.lower()
if name1 == name2:
result = True
else:
# distribute replaces '-' by '_' in project names, so it
# can tell where the version starts in a filename.
result = name1.replace('_', '-') == name2.replace('_', '-')
return result
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='):
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/':
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e:
logger.warning('invalid path for wheel: %s', path)
elif path.endswith(self.downloadable_extensions):
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t:
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver:
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a dictionary for a
specific version, whih typically holds information gleaned from a filename or URL for an
archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = self._get_digest(info)
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, info['url'])
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if versions:
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception:
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
result = versions[slist[-1]]
if result and r.extras:
result.extras = r.extras
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
nad probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
urls = d['urls']
if urls:
info = urls[0]
md.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[md.version] = dist
except Exception as e:
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)?
href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*))
(\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError:
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e:
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e:
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path):
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {}
else:
result = { dist.version: dist }
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
result.update(d)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other, unmatched))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
mit
|
fr34k8/paimei
|
console/modules/_PAIMEIexplorer/PIDAModulesListCtrl.py
|
6
|
6655
|
#
# PaiMei
# Copyright (C) 2006 Pedram Amini <[email protected]>
#
# $Id: PIDAModulesListCtrl.py 194 2007-04-05 15:31:53Z cameron $
#
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
'''
@author: Pedram Amini
@license: GNU General Public License 2.0 or later
@contact: [email protected]
@organization: www.openrce.org
'''
import wx
import os
import sys
import time
from wx.lib.mixins.listctrl import ColumnSorterMixin
from wx.lib.mixins.listctrl import ListCtrlAutoWidthMixin
sys.path.append("..")
import pida
class PIDAModulesListCtrl (wx.ListCtrl, ListCtrlAutoWidthMixin):
'''
Our custom list control containing loaded pida modules.
'''
def __init__(self, parent, id, pos=None, size=None, style=None, top=None):
wx.ListCtrl.__init__(self, parent, id, style=wx.LC_REPORT | wx.SIMPLE_BORDER | wx.LC_HRULES )
self.top=top
ListCtrlAutoWidthMixin.__init__(self)
self.InsertColumn(0, "# Func")
self.InsertColumn(1, "# BB")
self.InsertColumn(2, "PIDA Module")
####################################################################################################################
def on_activated (self, event):
'''
Load the PIDA module into the browser tree ctrl.
'''
idx = self.GetFirstSelected()
module = self.GetItem(idx, 2).GetText()
self.top.explorer.load_module(module)
####################################################################################################################
def on_add_module (self, event):
'''
Load a PIDA module into memory.
'''
dlg = wx.FileDialog( \
self, \
message = "Select PIDA module", \
defaultDir = os.getcwd(), \
defaultFile = "", \
wildcard = "*.PIDA", \
style = wx.OPEN | wx.CHANGE_DIR | wx.MULTIPLE \
)
if dlg.ShowModal() != wx.ID_OK:
return
for path in dlg.GetPaths():
try:
module_name = path[path.rfind("\\")+1:path.rfind(".pida")].lower()
if self.top.pida_modules.has_key(module_name):
self.top.err("Module %s already loaded ... skipping." % module_name)
continue
# deprecated - replaced by progress dialog.
#busy = wx.BusyInfo("Loading %s ... stand by." % module_name)
#wx.Yield()
start = time.time()
module = pida.load(path, progress_bar="wx")
if not module:
self.top.msg("Loading of PIDA module '%s' cancelled by user." % module_name)
return
else:
self.top.pida_modules[module_name] = module
self.top.msg("Loaded PIDA module '%s' in %.2f seconds." % (module_name, round(time.time() - start, 3)))
# determine the function and basic block counts for this module.
function_count = len(self.top.pida_modules[module_name].nodes)
basic_block_count = 0
for function in self.top.pida_modules[module_name].nodes.values():
basic_block_count += len(function.nodes)
idx = len(self.top.pida_modules) - 1
self.InsertStringItem(idx, "")
self.SetStringItem(idx, 0, "%d" % function_count)
self.SetStringItem(idx, 1, "%d" % basic_block_count)
self.SetStringItem(idx, 2, module_name)
self.SetColumnWidth(2, wx.LIST_AUTOSIZE)
except:
self.top.err("FAILED LOADING MODULE: %s. Possibly corrupt or version mismatch?" % module_name)
if self.top.pida_modules.has_key(module_name):
del(self.top.pida_modules[module_name])
####################################################################################################################
def on_right_click (self, event):
'''
When an item in the PIDA module list is right clicked, display a context menu.
'''
if not self.x or not self.y:
return
# we only have to do this once, that is what the hasattr() check is for.
if not hasattr(self, "right_click_popup_remove"):
self.right_click_popup_remove = wx.NewId()
self.Bind(wx.EVT_MENU, self.on_right_click_popup_remove, id=self.right_click_popup_remove)
# make a menu.
menu = wx.Menu()
menu.Append(self.right_click_popup_remove, "Remove")
self.PopupMenu(menu, (self.x, self.y))
menu.Destroy()
####################################################################################################################
def on_right_click_popup_remove (self, event):
'''
Right click event handler for popup remove menu selection.
'''
idx = self.GetFirstSelected()
module = self.GetItem(idx, 2).GetText()
del(self.top.pida_modules[module])
self.DeleteItem(idx)
####################################################################################################################
def on_right_down (self, event):
'''
Grab the x/y coordinates when the right mouse button is clicked.
'''
self.x = event.GetX()
self.y = event.GetY()
item, flags = self.HitTest((self.x, self.y))
if flags & wx.LIST_HITTEST_ONITEM:
self.Select(item)
else:
self.x = None
self.y = None
|
gpl-2.0
|
sencha/chromium-spacewalk
|
tools/deep_memory_profiler/lib/pageframe.py
|
123
|
4340
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import struct
LOGGER = logging.getLogger('dmprof')
class PageFrame(object):
"""Represents a pageframe and maybe its shared count."""
def __init__(self, pfn, size, pagecount, start_truncated, end_truncated):
self._pfn = pfn
self._size = size
self._pagecount = pagecount
self._start_truncated = start_truncated
self._end_truncated = end_truncated
def __str__(self):
result = str()
if self._start_truncated:
result += '<'
result += '%06x#%d' % (self._pfn, self._pagecount)
if self._end_truncated:
result += '>'
return result
def __repr__(self):
return str(self)
@staticmethod
def parse(encoded_pfn, size):
start = 0
end = len(encoded_pfn)
end_truncated = False
if encoded_pfn.endswith('>'):
end = len(encoded_pfn) - 1
end_truncated = True
pagecount_found = encoded_pfn.find('#')
pagecount = None
if pagecount_found >= 0:
encoded_pagecount = 'AAA' + encoded_pfn[pagecount_found+1 : end]
pagecount = struct.unpack(
'>I', '\x00' + encoded_pagecount.decode('base64'))[0]
end = pagecount_found
start_truncated = False
if encoded_pfn.startswith('<'):
start = 1
start_truncated = True
pfn = struct.unpack(
'>I', '\x00' + (encoded_pfn[start:end]).decode('base64'))[0]
return PageFrame(pfn, size, pagecount, start_truncated, end_truncated)
@property
def pfn(self):
return self._pfn
@property
def size(self):
return self._size
def set_size(self, size):
self._size = size
@property
def pagecount(self):
return self._pagecount
@property
def start_truncated(self):
return self._start_truncated
@property
def end_truncated(self):
return self._end_truncated
class PFNCounts(object):
"""Represents counts of PFNs in a process."""
_PATH_PATTERN = re.compile(r'^(.*)\.([0-9]+)\.([0-9]+)\.heap$')
def __init__(self, path, modified_time):
matched = self._PATH_PATTERN.match(path)
if matched:
self._pid = int(matched.group(2))
else:
self._pid = 0
self._command_line = ''
self._pagesize = 4096
self._path = path
self._pfn_meta = ''
self._pfnset = {}
self._reason = ''
self._time = modified_time
@staticmethod
def load(path, log_header='Loading PFNs from a heap profile dump: '):
pfnset = PFNCounts(path, float(os.stat(path).st_mtime))
LOGGER.info('%s%s' % (log_header, path))
with open(path, 'r') as pfnset_f:
pfnset.load_file(pfnset_f)
return pfnset
@property
def path(self):
return self._path
@property
def pid(self):
return self._pid
@property
def time(self):
return self._time
@property
def reason(self):
return self._reason
@property
def iter_pfn(self):
for pfn, count in self._pfnset.iteritems():
yield pfn, count
def load_file(self, pfnset_f):
prev_pfn_end_truncated = None
for line in pfnset_f:
line = line.strip()
if line.startswith('GLOBAL_STATS:') or line.startswith('STACKTRACES:'):
break
elif line.startswith('PF: '):
for encoded_pfn in line[3:].split():
page_frame = PageFrame.parse(encoded_pfn, self._pagesize)
if page_frame.start_truncated and (
not prev_pfn_end_truncated or
prev_pfn_end_truncated != page_frame.pfn):
LOGGER.error('Broken page frame number: %s.' % encoded_pfn)
self._pfnset[page_frame.pfn] = self._pfnset.get(page_frame.pfn, 0) + 1
if page_frame.end_truncated:
prev_pfn_end_truncated = page_frame.pfn
else:
prev_pfn_end_truncated = None
elif line.startswith('PageSize: '):
self._pagesize = int(line[10:])
elif line.startswith('PFN: '):
self._pfn_meta = line[5:]
elif line.startswith('PageFrame: '):
self._pfn_meta = line[11:]
elif line.startswith('Time: '):
self._time = float(line[6:])
elif line.startswith('CommandLine: '):
self._command_line = line[13:]
elif line.startswith('Reason: '):
self._reason = line[8:]
|
bsd-3-clause
|
geminateCoder/Character-Archive-Website
|
Lib/site-packages/cffi/model.py
|
43
|
21110
|
import types, sys
import weakref
from .lock import allocate_lock
# type qualifiers
Q_CONST = 0x01
Q_RESTRICT = 0x02
Q_VOLATILE = 0x04
def qualify(quals, replace_with):
if quals & Q_CONST:
replace_with = ' const ' + replace_with.lstrip()
if quals & Q_VOLATILE:
replace_with = ' volatile ' + replace_with.lstrip()
if quals & Q_RESTRICT:
# It seems that __restrict is supported by gcc and msvc.
# If you hit some different compiler, add a #define in
# _cffi_include.h for it (and in its copies, documented there)
replace_with = ' __restrict ' + replace_with.lstrip()
return replace_with
class BaseTypeByIdentity(object):
is_array_type = False
is_raw_function = False
def get_c_name(self, replace_with='', context='a C file', quals=0):
result = self.c_name_with_marker
assert result.count('&') == 1
# some logic duplication with ffi.getctype()... :-(
replace_with = replace_with.strip()
if replace_with:
if replace_with.startswith('*') and '&[' in result:
replace_with = '(%s)' % replace_with
elif not replace_with[0] in '[(':
replace_with = ' ' + replace_with
replace_with = qualify(quals, replace_with)
result = result.replace('&', replace_with)
if '$' in result:
from .ffiplatform import VerificationError
raise VerificationError(
"cannot generate '%s' in %s: unknown type name"
% (self._get_c_name(), context))
return result
def _get_c_name(self):
return self.c_name_with_marker.replace('&', '')
def has_c_name(self):
return '$' not in self._get_c_name()
def is_integer_type(self):
return False
def get_cached_btype(self, ffi, finishlist, can_delay=False):
try:
BType = ffi._cached_btypes[self]
except KeyError:
BType = self.build_backend_type(ffi, finishlist)
BType2 = ffi._cached_btypes.setdefault(self, BType)
assert BType2 is BType
return BType
def __repr__(self):
return '<%s>' % (self._get_c_name(),)
def _get_items(self):
return [(name, getattr(self, name)) for name in self._attrs_]
class BaseType(BaseTypeByIdentity):
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self._get_items() == other._get_items())
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.__class__, tuple(self._get_items())))
class VoidType(BaseType):
_attrs_ = ()
def __init__(self):
self.c_name_with_marker = 'void&'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_void_type')
void_type = VoidType()
class BasePrimitiveType(BaseType):
pass
class PrimitiveType(BasePrimitiveType):
_attrs_ = ('name',)
ALL_PRIMITIVE_TYPES = {
'char': 'c',
'short': 'i',
'int': 'i',
'long': 'i',
'long long': 'i',
'signed char': 'i',
'unsigned char': 'i',
'unsigned short': 'i',
'unsigned int': 'i',
'unsigned long': 'i',
'unsigned long long': 'i',
'float': 'f',
'double': 'f',
'long double': 'f',
'_Bool': 'i',
# the following types are not primitive in the C sense
'wchar_t': 'c',
'int8_t': 'i',
'uint8_t': 'i',
'int16_t': 'i',
'uint16_t': 'i',
'int32_t': 'i',
'uint32_t': 'i',
'int64_t': 'i',
'uint64_t': 'i',
'int_least8_t': 'i',
'uint_least8_t': 'i',
'int_least16_t': 'i',
'uint_least16_t': 'i',
'int_least32_t': 'i',
'uint_least32_t': 'i',
'int_least64_t': 'i',
'uint_least64_t': 'i',
'int_fast8_t': 'i',
'uint_fast8_t': 'i',
'int_fast16_t': 'i',
'uint_fast16_t': 'i',
'int_fast32_t': 'i',
'uint_fast32_t': 'i',
'int_fast64_t': 'i',
'uint_fast64_t': 'i',
'intptr_t': 'i',
'uintptr_t': 'i',
'intmax_t': 'i',
'uintmax_t': 'i',
'ptrdiff_t': 'i',
'size_t': 'i',
'ssize_t': 'i',
}
def __init__(self, name):
assert name in self.ALL_PRIMITIVE_TYPES
self.name = name
self.c_name_with_marker = name + '&'
def is_char_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'c'
def is_integer_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'i'
def is_float_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'f'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_primitive_type', self.name)
class UnknownIntegerType(BasePrimitiveType):
_attrs_ = ('name',)
def __init__(self, name):
self.name = name
self.c_name_with_marker = name + '&'
def is_integer_type(self):
return True
def build_backend_type(self, ffi, finishlist):
raise NotImplementedError("integer type '%s' can only be used after "
"compilation" % self.name)
class UnknownFloatType(BasePrimitiveType):
_attrs_ = ('name', )
def __init__(self, name):
self.name = name
self.c_name_with_marker = name + '&'
def build_backend_type(self, ffi, finishlist):
raise NotImplementedError("float type '%s' can only be used after "
"compilation" % self.name)
class BaseFunctionType(BaseType):
_attrs_ = ('args', 'result', 'ellipsis', 'abi')
def __init__(self, args, result, ellipsis, abi=None):
self.args = args
self.result = result
self.ellipsis = ellipsis
self.abi = abi
#
reprargs = [arg._get_c_name() for arg in self.args]
if self.ellipsis:
reprargs.append('...')
reprargs = reprargs or ['void']
replace_with = self._base_pattern % (', '.join(reprargs),)
if abi is not None:
replace_with = replace_with[:1] + abi + ' ' + replace_with[1:]
self.c_name_with_marker = (
self.result.c_name_with_marker.replace('&', replace_with))
class RawFunctionType(BaseFunctionType):
# Corresponds to a C type like 'int(int)', which is the C type of
# a function, but not a pointer-to-function. The backend has no
# notion of such a type; it's used temporarily by parsing.
_base_pattern = '(&)(%s)'
is_raw_function = True
def build_backend_type(self, ffi, finishlist):
from . import api
raise api.CDefError("cannot render the type %r: it is a function "
"type, not a pointer-to-function type" % (self,))
def as_function_pointer(self):
return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi)
class FunctionPtrType(BaseFunctionType):
_base_pattern = '(*&)(%s)'
def build_backend_type(self, ffi, finishlist):
result = self.result.get_cached_btype(ffi, finishlist)
args = []
for tp in self.args:
args.append(tp.get_cached_btype(ffi, finishlist))
abi_args = ()
if self.abi == "__stdcall":
if not self.ellipsis: # __stdcall ignored for variadic funcs
try:
abi_args = (ffi._backend.FFI_STDCALL,)
except AttributeError:
pass
return global_cache(self, ffi, 'new_function_type',
tuple(args), result, self.ellipsis, *abi_args)
def as_raw_function(self):
return RawFunctionType(self.args, self.result, self.ellipsis, self.abi)
class PointerType(BaseType):
_attrs_ = ('totype', 'quals')
def __init__(self, totype, quals=0):
self.totype = totype
self.quals = quals
extra = qualify(quals, " *&")
if totype.is_array_type:
extra = "(%s)" % (extra.lstrip(),)
self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra)
def build_backend_type(self, ffi, finishlist):
BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True)
return global_cache(self, ffi, 'new_pointer_type', BItem)
voidp_type = PointerType(void_type)
def ConstPointerType(totype):
return PointerType(totype, Q_CONST)
const_voidp_type = ConstPointerType(void_type)
class NamedPointerType(PointerType):
_attrs_ = ('totype', 'name')
def __init__(self, totype, name, quals=0):
PointerType.__init__(self, totype, quals)
self.name = name
self.c_name_with_marker = name + '&'
class ArrayType(BaseType):
_attrs_ = ('item', 'length')
is_array_type = True
def __init__(self, item, length):
self.item = item
self.length = length
#
if length is None:
brackets = '&[]'
elif length == '...':
brackets = '&[/*...*/]'
else:
brackets = '&[%s]' % length
self.c_name_with_marker = (
self.item.c_name_with_marker.replace('&', brackets))
def resolve_length(self, newlength):
return ArrayType(self.item, newlength)
def build_backend_type(self, ffi, finishlist):
if self.length == '...':
from . import api
raise api.CDefError("cannot render the type %r: unknown length" %
(self,))
self.item.get_cached_btype(ffi, finishlist) # force the item BType
BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist)
return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length)
char_array_type = ArrayType(PrimitiveType('char'), None)
class StructOrUnionOrEnum(BaseTypeByIdentity):
_attrs_ = ('name',)
forcename = None
def build_c_name_with_marker(self):
name = self.forcename or '%s %s' % (self.kind, self.name)
self.c_name_with_marker = name + '&'
def force_the_name(self, forcename):
self.forcename = forcename
self.build_c_name_with_marker()
def get_official_name(self):
assert self.c_name_with_marker.endswith('&')
return self.c_name_with_marker[:-1]
class StructOrUnion(StructOrUnionOrEnum):
fixedlayout = None
completed = 0
partial = False
packed = False
def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None):
self.name = name
self.fldnames = fldnames
self.fldtypes = fldtypes
self.fldbitsize = fldbitsize
self.fldquals = fldquals
self.build_c_name_with_marker()
def has_anonymous_struct_fields(self):
if self.fldtypes is None:
return False
for name, type in zip(self.fldnames, self.fldtypes):
if name == '' and isinstance(type, StructOrUnion):
return True
return False
def enumfields(self):
fldquals = self.fldquals
if fldquals is None:
fldquals = (0,) * len(self.fldnames)
for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes,
self.fldbitsize, fldquals):
if name == '' and isinstance(type, StructOrUnion):
# nested anonymous struct/union
for result in type.enumfields():
yield result
else:
yield (name, type, bitsize, quals)
def force_flatten(self):
# force the struct or union to have a declaration that lists
# directly all fields returned by enumfields(), flattening
# nested anonymous structs/unions.
names = []
types = []
bitsizes = []
fldquals = []
for name, type, bitsize, quals in self.enumfields():
names.append(name)
types.append(type)
bitsizes.append(bitsize)
fldquals.append(quals)
self.fldnames = tuple(names)
self.fldtypes = tuple(types)
self.fldbitsize = tuple(bitsizes)
self.fldquals = tuple(fldquals)
def get_cached_btype(self, ffi, finishlist, can_delay=False):
BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist,
can_delay)
if not can_delay:
self.finish_backend_type(ffi, finishlist)
return BType
def finish_backend_type(self, ffi, finishlist):
if self.completed:
if self.completed != 2:
raise NotImplementedError("recursive structure declaration "
"for '%s'" % (self.name,))
return
BType = ffi._cached_btypes[self]
#
self.completed = 1
#
if self.fldtypes is None:
pass # not completing it: it's an opaque struct
#
elif self.fixedlayout is None:
fldtypes = [tp.get_cached_btype(ffi, finishlist)
for tp in self.fldtypes]
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize))
sflags = 0
if self.packed:
sflags = 8 # SF_PACKED
ffi._backend.complete_struct_or_union(BType, lst, self,
-1, -1, sflags)
#
else:
fldtypes = []
fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout
for i in range(len(self.fldnames)):
fsize = fieldsize[i]
ftype = self.fldtypes[i]
#
if isinstance(ftype, ArrayType) and ftype.length == '...':
# fix the length to match the total size
BItemType = ftype.item.get_cached_btype(ffi, finishlist)
nlen, nrest = divmod(fsize, ffi.sizeof(BItemType))
if nrest != 0:
self._verification_error(
"field '%s.%s' has a bogus size?" % (
self.name, self.fldnames[i] or '{}'))
ftype = ftype.resolve_length(nlen)
self.fldtypes = (self.fldtypes[:i] + (ftype,) +
self.fldtypes[i+1:])
#
BFieldType = ftype.get_cached_btype(ffi, finishlist)
if isinstance(ftype, ArrayType) and ftype.length is None:
assert fsize == 0
else:
bitemsize = ffi.sizeof(BFieldType)
if bitemsize != fsize:
self._verification_error(
"field '%s.%s' is declared as %d bytes, but is "
"really %d bytes" % (self.name,
self.fldnames[i] or '{}',
bitemsize, fsize))
fldtypes.append(BFieldType)
#
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs))
ffi._backend.complete_struct_or_union(BType, lst, self,
totalsize, totalalignment)
self.completed = 2
def _verification_error(self, msg):
from .ffiplatform import VerificationError
raise VerificationError(msg)
def check_not_partial(self):
if self.partial and self.fixedlayout is None:
from . import ffiplatform
raise ffiplatform.VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
finishlist.append(self)
#
return global_cache(self, ffi, 'new_%s_type' % self.kind,
self.get_official_name(), key=self)
class StructType(StructOrUnion):
kind = 'struct'
class UnionType(StructOrUnion):
kind = 'union'
class EnumType(StructOrUnionOrEnum):
kind = 'enum'
partial = False
partial_resolved = False
def __init__(self, name, enumerators, enumvalues, baseinttype=None):
self.name = name
self.enumerators = enumerators
self.enumvalues = enumvalues
self.baseinttype = baseinttype
self.build_c_name_with_marker()
def force_the_name(self, forcename):
StructOrUnionOrEnum.force_the_name(self, forcename)
if self.forcename is None:
name = self.get_official_name()
self.forcename = '$' + name.replace(' ', '_')
def check_not_partial(self):
if self.partial and not self.partial_resolved:
from . import ffiplatform
raise ffiplatform.VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
base_btype = self.build_baseinttype(ffi, finishlist)
return global_cache(self, ffi, 'new_enum_type',
self.get_official_name(),
self.enumerators, self.enumvalues,
base_btype, key=self)
def build_baseinttype(self, ffi, finishlist):
if self.baseinttype is not None:
return self.baseinttype.get_cached_btype(ffi, finishlist)
#
from . import api
if self.enumvalues:
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
import warnings
warnings.warn("%r has no values explicitly defined; next version "
"will refuse to guess which integer type it is "
"meant to be (unsigned/signed, int/long)"
% self._get_c_name())
smallest_value = largest_value = 0
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
candidate2 = PrimitiveType("long")
else:
sign = 0
candidate1 = PrimitiveType("unsigned int")
candidate2 = PrimitiveType("unsigned long")
btype1 = candidate1.get_cached_btype(ffi, finishlist)
btype2 = candidate2.get_cached_btype(ffi, finishlist)
size1 = ffi.sizeof(btype1)
size2 = ffi.sizeof(btype2)
if (smallest_value >= ((-1) << (8*size1-1)) and
largest_value < (1 << (8*size1-sign))):
return btype1
if (smallest_value >= ((-1) << (8*size2-1)) and
largest_value < (1 << (8*size2-sign))):
return btype2
raise api.CDefError("%s values don't all fit into either 'long' "
"or 'unsigned long'" % self._get_c_name())
def unknown_type(name, structname=None):
if structname is None:
structname = '$%s' % name
tp = StructType(structname, None, None, None)
tp.force_the_name(name)
tp.origin = "unknown_type"
return tp
def unknown_ptr_type(name, structname=None):
if structname is None:
structname = '$$%s' % name
tp = StructType(structname, None, None, None)
return NamedPointerType(tp, name)
global_lock = allocate_lock()
def global_cache(srctype, ffi, funcname, *args, **kwds):
key = kwds.pop('key', (funcname, args))
assert not kwds
try:
return ffi._backend.__typecache[key]
except KeyError:
pass
except AttributeError:
# initialize the __typecache attribute, either at the module level
# if ffi._backend is a module, or at the class level if ffi._backend
# is some instance.
if isinstance(ffi._backend, types.ModuleType):
ffi._backend.__typecache = weakref.WeakValueDictionary()
else:
type(ffi._backend).__typecache = weakref.WeakValueDictionary()
try:
res = getattr(ffi._backend, funcname)(*args)
except NotImplementedError as e:
raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e))
# note that setdefault() on WeakValueDictionary is not atomic
# and contains a rare bug (http://bugs.python.org/issue19542);
# we have to use a lock and do it ourselves
cache = ffi._backend.__typecache
with global_lock:
res1 = cache.get(key)
if res1 is None:
cache[key] = res
return res
else:
return res1
def pointer_cache(ffi, BType):
return global_cache('?', ffi, 'new_pointer_type', BType)
def attach_exception_info(e, name):
if e.args and type(e.args[0]) is str:
e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:]
|
cc0-1.0
|
ParuninPavel/lenta4_hack
|
config/settings/test.py
|
1
|
1941
|
"""
Test settings for lentach project.
- Used to run tests fast on the continuous integration server and locally
"""
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
# Turn debug off so tests run faster
DEBUG = False
TEMPLATES[0]['OPTIONS']['debug'] = False
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
# In-memory email backend stores messages in django.core.mail.outbox
# for unit testing purposes
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
# CACHING
# ------------------------------------------------------------------------------
# Speed advantages of in-memory caching without having to run Memcached
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# PASSWORD HASHING
# ------------------------------------------------------------------------------
# Use fast password hasher so tests run faster
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
# TEMPLATE LOADERS
# ------------------------------------------------------------------------------
# Keep templates in memory so tests run faster
TEMPLATES[0]['OPTIONS']['loaders'] = [
['django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
], ],
]
|
mit
|
cesargtz/YecoraOdoo
|
openerp/addons/test_impex/tests/test_load.py
|
350
|
44525
|
# -*- coding: utf-8 -*-
import json
import pkgutil
import unittest2
import openerp.modules.registry
import openerp
from openerp.tests import common
from openerp.tools.misc import mute_logger
def message(msg, type='error', from_=0, to_=0, record=0, field='value', **kwargs):
return dict(kwargs,
type=type, rows={'from': from_, 'to': to_}, record=record,
field=field, message=msg)
def moreaction(**kwargs):
return dict(kwargs,
type='ir.actions.act_window',
target='new',
view_mode='tree,form',
view_type='form',
views=[(False, 'tree'), (False, 'form')],
help=u"See all possible values")
def values(seq, field='value'):
return [item[field] for item in seq]
class ImporterCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(ImporterCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(ImporterCase, self).setUp()
self.model = self.registry(self.model_name)
self.registry('ir.model.data').clear_caches()
def import_(self, fields, rows, context=None):
return self.model.load(
self.cr, openerp.SUPERUSER_ID, fields, rows, context=context)
def read(self, fields=('value',), domain=(), context=None):
return self.model.read(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
fields=fields, context=context)
def browse(self, domain=(), context=None):
return self.model.browse(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
context=context)
def xid(self, record):
ModelData = self.registry('ir.model.data')
ids = ModelData.search(
self.cr, openerp.SUPERUSER_ID,
[('model', '=', record._name), ('res_id', '=', record.id)])
if ids:
d = ModelData.read(
self.cr, openerp.SUPERUSER_ID, ids, ['name', 'module'])[0]
if d['module']:
return '%s.%s' % (d['module'], d['name'])
return d['name']
name = record.name_get()[0][1]
# fix dotted name_get results, otherwise xid lookups blow up
name = name.replace('.', '-')
ModelData.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'model': record._name,
'res_id': record.id,
'module': '__test__'
})
return '__test__.' + name
def add_translations(self, name, type, code, *tnx):
Lang = self.registry('res.lang')
if not Lang.search(self.cr, openerp.SUPERUSER_ID, [('code', '=', code)]):
Lang.create(self.cr, openerp.SUPERUSER_ID, {
'name': code,
'code': code,
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
})
Translations = self.registry('ir.translation')
for source, value in tnx:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'lang': code,
'type': type,
'src': source,
'value': value,
'state': 'translated',
})
class test_ids_stuff(ImporterCase):
model_name = 'export.integer'
def test_create_with_id(self):
result = self.import_(['.id', 'value'], [['42', '36']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'field': '.id',
'message': u"Unknown database identifier '42'",
}])
def test_create_with_xid(self):
result = self.import_(['id', 'value'], [['somexmlid', '42']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
'somexmlid',
self.xid(self.browse()[0]))
def test_update_with_id(self):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': 36})
self.assertEqual(
36,
self.model.browse(self.cr, openerp.SUPERUSER_ID, id).value)
result = self.import_(['.id', 'value'], [[str(id), '42']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[42], # updated value to imported
values(self.read()))
def test_update_with_xid(self):
self.import_(['id', 'value'], [['somexmlid', '36']])
self.assertEqual([36], values(self.read()))
self.import_(['id', 'value'], [['somexmlid', '1234567']])
self.assertEqual([1234567], values(self.read()))
class test_boolean_field(ImporterCase):
model_name = 'export.boolean'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_exported(self):
result = self.import_(['value'], [['False'], ['True'], ])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
records = self.read()
self.assertEqual([
False,
True,
], values(records))
def test_falses(self):
for lang, source, value in [('fr_FR', 'no', u'non'),
('de_DE', 'no', u'nein'),
('ru_RU', 'no', u'нет'),
('nl_BE', 'false', u'vals'),
('lt_LT', 'false', u'klaidingas')]:
self.add_translations('test_import.py', 'code', lang, (source, value))
falses = [[u'0'], [u'no'], [u'false'], [u'FALSE'], [u''],
[u'non'], # no, fr
[u'nein'], # no, de
[u'нет'], # no, ru
[u'vals'], # false, nl
[u'klaidingas'], # false, lt,
]
result = self.import_(['value'], falses)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(falses))
self.assertEqual([False] * len(falses), values(self.read()))
def test_trues(self):
trues = [['None'], ['nil'], ['()'], ['f'], ['#f'],
# Problem: OpenOffice (and probably excel) output localized booleans
['VRAI'], ['ok'], ['true'], ['yes'], ['1'], ]
result = self.import_(['value'], trues)
self.assertEqual(len(result['ids']), 10)
self.assertEqual(result['messages'], [
message(u"Unknown value '%s' for boolean field 'unknown', assuming 'yes'" % v[0],
moreinfo=u"Use '1' for yes and '0' for no",
type='warning', from_=i, to_=i, record=i)
for i, v in enumerate(trues)
if v[0] not in ('true', 'yes', '1')
])
self.assertEqual(
[True] * 10,
values(self.read()))
class test_integer_field(ImporterCase):
model_name = 'export.integer'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
result = self.import_(['value'], [['0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
result = self.import_(['value'], [['-0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
result = self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678']
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([
1, 42, 2**31-1, 12345678
], values(self.read()))
def test_negatives(self):
result = self.import_(['value'], [
['-1'],
['-42'],
[str(-(2**31 - 1))],
[str(-(2**31))],
['-12345678']
])
self.assertEqual(len(result['ids']), 5)
self.assertFalse(result['messages'])
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678
], values(self.read()))
@mute_logger('openerp.sql_db', 'openerp.models')
def test_out_of_range(self):
result = self.import_(['value'], [[str(2**31)]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'message': "integer out of range\n"
}])
result = self.import_(['value'], [[str(-2**32)]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'message': "integer out of range\n"
}])
def test_nonsense(self):
result = self.import_(['value'], [['zorglub']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'field': 'value',
'message': u"'zorglub' does not seem to be an integer for field 'unknown'",
}])
class test_float_field(ImporterCase):
model_name = 'export.float'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
result = self.import_(['value'], [['0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
result = self.import_(['value'], [['-0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
result = self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678'],
[str(2**33)],
['0.000001'],
])
self.assertEqual(len(result['ids']), 6)
self.assertFalse(result['messages'])
self.assertEqual([
1, 42, 2**31-1, 12345678, 2.0**33, .000001
], values(self.read()))
def test_negatives(self):
result = self.import_(['value'], [
['-1'],
['-42'],
[str(-2**31 + 1)],
[str(-2**31)],
['-12345678'],
[str(-2**33)],
['-0.000001'],
])
self.assertEqual(len(result['ids']), 7)
self.assertFalse(result['messages'])
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678, -2.0**33, -.000001
], values(self.read()))
def test_nonsense(self):
result = self.import_(['value'], [['foobar']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [
message(u"'foobar' does not seem to be a number for field 'unknown'")])
class test_string_field(ImporterCase):
model_name = 'export.string.bounded'
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False], values(self.read()))
def test_imported(self):
result = self.import_(['value'], [
[u'foobar'],
[u'foobarbaz'],
[u'Með suð í eyrum við spilum endalaust'],
[u"People 'get' types. They use them all the time. Telling "
u"someone he can't pound a nail with a banana doesn't much "
u"surprise him."]
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([
u"foobar",
u"foobarbaz",
u"Með suð í eyrum ",
u"People 'get' typ",
], values(self.read()))
class test_unbound_string_field(ImporterCase):
model_name = 'export.string'
def test_imported(self):
result = self.import_(['value'], [
[u'í dag viðrar vel til loftárása'],
# ackbar.jpg
[u"If they ask you about fun, you tell them – fun is a filthy"
u" parasite"]
])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
self.assertEqual([
u"í dag viðrar vel til loftárása",
u"If they ask you about fun, you tell them – fun is a filthy parasite"
], values(self.read()))
class test_required_string_field(ImporterCase):
model_name = 'export.string.required'
@mute_logger('openerp.sql_db', 'openerp.models')
def test_empty(self):
result = self.import_(['value'], [[]])
self.assertEqual(result['messages'], [message(
u"Missing required value for the field 'unknown' (value)")])
self.assertIs(result['ids'], False)
@mute_logger('openerp.sql_db', 'openerp.models')
def test_not_provided(self):
result = self.import_(['const'], [['12']])
self.assertEqual(result['messages'], [message(
u"Missing required value for the field 'unknown' (value)")])
self.assertIs(result['ids'], False)
class test_text(ImporterCase):
model_name = 'export.text'
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False], values(self.read()))
def test_imported(self):
s = (u"Breiðskífa er notað um útgefna hljómplötu sem inniheldur "
u"stúdíóupptökur frá einum flytjanda. Breiðskífur eru oftast "
u"milli 25-80 mínútur og er lengd þeirra oft miðuð við 33⅓ "
u"snúninga 12 tommu vínylplötur (sem geta verið allt að 30 mín "
u"hvor hlið).\n\nBreiðskífur eru stundum tvöfaldar og eru þær þá"
u" gefnar út á tveimur geisladiskum eða tveimur vínylplötum.")
result = self.import_(['value'], [[s]])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([s], values(self.read()))
class test_selection(ImporterCase):
model_name = 'export.selection'
translations_fr = [
("Foo", "tete"),
("Bar", "titi"),
("Qux", "toto"),
]
def test_imported(self):
result = self.import_(['value'], [
['Qux'],
['Bar'],
['Foo'],
['2'],
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([3, 2, 1, 2], values(self.read()))
def test_imported_translated(self):
self.add_translations(
'export.selection,value', 'selection', 'fr_FR', *self.translations_fr)
result = self.import_(['value'], [
['toto'],
['tete'],
['titi'],
], context={'lang': 'fr_FR'})
self.assertEqual(len(result['ids']), 3)
self.assertFalse(result['messages'])
self.assertEqual([3, 1, 2], values(self.read()))
result = self.import_(['value'], [['Foo']], context={'lang': 'fr_FR'})
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
def test_invalid(self):
result = self.import_(['value'], [['Baz']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [message(
u"Value 'Baz' not found in selection field 'unknown'",
moreinfo="Foo Bar Qux 4".split())])
result = self.import_(['value'], [[42]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [message(
u"Value '42' not found in selection field 'unknown'",
moreinfo="Foo Bar Qux 4".split())])
class test_selection_with_default(ImporterCase):
model_name = 'export.selection.withdefault'
def test_empty(self):
""" Empty cells should set corresponding field to False
"""
result = self.import_(['value'], [['']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
self.assertEqual(
values(self.read()),
[False])
def test_default(self):
""" Non-provided cells should set corresponding field to default
"""
result = self.import_(['const'], [['42']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
self.assertEqual(
values(self.read()),
[2])
class test_selection_function(ImporterCase):
model_name = 'export.selection.function'
translations_fr = [
("Corge", "toto"),
("Grault", "titi"),
("Wheee", "tete"),
("Moog", "tutu"),
]
def test_imported(self):
""" import uses fields_get, so translates import label (may or may not
be good news) *and* serializes the selection function to reverse it:
import does not actually know that the selection field uses a function
"""
# NOTE: conflict between a value and a label => pick first
result = self.import_(['value'], [
['3'],
["Grault"],
])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
self.assertEqual(
[3, 1],
values(self.read()))
def test_translated(self):
""" Expects output of selection function returns translated labels
"""
self.add_translations(
'export.selection,value', 'selection', 'fr_FR', *self.translations_fr)
result = self.import_(['value'], [
['titi'],
['tete'],
], context={'lang': 'fr_FR'})
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
self.assertEqual(values(self.read()), [1, 2])
result = self.import_(['value'], [['Wheee']], context={'lang': 'fr_FR'})
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
class test_m2o(ImporterCase):
model_name = 'export.many2one'
def test_by_name(self):
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# get its name
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
result = self.import_(['value'], [
# import by name_get
[name1],
[name1],
[name2],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
# correct ids assigned to corresponding records
self.assertEqual([
(integer_id1, name1),
(integer_id1, name1),
(integer_id2, name2),],
values(self.read()))
def test_by_xid(self):
ExportInteger = self.registry('export.integer')
integer_id = ExportInteger.create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
xid = self.xid(ExportInteger.browse(
self.cr, openerp.SUPERUSER_ID, [integer_id])[0])
result = self.import_(['value/id'], [[xid]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
result = self.import_(['value/.id'], [[integer_id]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_names(self):
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
# names should be the same
self.assertEqual(name1, name2)
result = self.import_(['value'], [[name2]])
self.assertEqual(
result['messages'],
[message(u"Found multiple matches for field 'unknown' (2 matches)",
type='warning')])
self.assertEqual(len(result['ids']), 1)
self.assertEqual([
(integer_id1, name1)
], values(self.read()))
def test_fail_by_implicit_id(self):
""" Can't implicitly import records by id
"""
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# Because name_search all the things. Fallback schmallback
result = self.import_(['value'], [
# import by id, without specifying it
[integer_id1],
[integer_id2],
[integer_id1],
])
self.assertEqual(result['messages'], [
message(u"No matching record found for name '%s' in field 'unknown'" % id,
from_=index, to_=index, record=index,
moreinfo=moreaction(res_model='export.integer'))
for index, id in enumerate([integer_id1, integer_id2, integer_id1])])
self.assertIs(result['ids'], False)
@mute_logger('openerp.sql_db')
def test_fail_id_mistype(self):
result = self.import_(['value/.id'], [["foo"]])
self.assertEqual(result['messages'], [
message(u"Invalid database id 'foo' for the field 'unknown'",
moreinfo=moreaction(res_model='ir.model.data',
domain=[('model','=','export.integer')]))
])
self.assertIs(result['ids'], False)
def test_sub_field(self):
""" Does not implicitly create the record, does not warn that you can't
import m2o subfields (at all)...
"""
result = self.import_(['value/value'], [['42']])
self.assertEqual(result['messages'], [
message(u"Can not create Many-To-One records indirectly, import "
u"the field separately")])
self.assertIs(result['ids'], False)
def test_fail_noids(self):
result = self.import_(['value'], [['nameisnoexist:3']])
self.assertEqual(result['messages'], [message(
u"No matching record found for name 'nameisnoexist:3' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='export.integer'))])
self.assertIs(result['ids'], False)
result = self.import_(['value/id'], [['noxidhere']])
self.assertEqual(result['messages'], [message(
u"No matching record found for external id 'noxidhere' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.integer')]))])
self.assertIs(result['ids'], False)
result = self.import_(['value/.id'], [['66']])
self.assertEqual(result['messages'], [message(
u"No matching record found for database id '66' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.integer')]))])
self.assertIs(result['ids'], False)
def test_fail_multiple(self):
result = self.import_(
['value', 'value/id'],
[['somename', 'somexid']])
self.assertEqual(result['messages'], [message(
u"Ambiguous specification for field 'unknown', only provide one of "
u"name, external id or database id")])
self.assertIs(result['ids'], False)
class test_m2m(ImporterCase):
model_name = 'export.many2many'
# apparently, one and only thing which works is a
# csv_internal_sep-separated list of ids, xids, or names (depending if
# m2m/.id, m2m/id or m2m[/anythingelse]
def test_ids(self):
id1 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
id5 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 99, 'str': 'record4'})
result = self.import_(['value/.id'], [
['%d,%d' % (id1, id2)],
['%d,%d,%d' % (id1, id3, id4)],
['%d,%d,%d' % (id1, id2, id3)],
['%d' % id5]
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 4)
ids = lambda records: [record.id for record in records]
b = self.browse()
self.assertEqual(ids(b[0].value), [id1, id2])
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(ids(b[2].value), [id1, id2, id3])
self.assertEqual(values(b[2].value), [3, 44, 84])
def test_noids(self):
result = self.import_(['value/.id'], [['42']])
self.assertEqual(result['messages'], [message(
u"No matching record found for database id '42' in field "
u"'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.many2many.other')]))])
self.assertIs(result['ids'], False)
def test_xids(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
result = self.import_(['value/id'], [
['%s,%s' % (self.xid(records[0]), self.xid(records[1]))],
['%s' % self.xid(records[3])],
['%s,%s' % (self.xid(records[2]), self.xid(records[1]))],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
b = self.browse()
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(values(b[2].value), [44, 84])
def test_noxids(self):
result = self.import_(['value/id'], [['noxidforthat']])
self.assertEqual(result['messages'], [message(
u"No matching record found for external id 'noxidforthat' in field"
u" 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.many2many.other')]))])
self.assertIs(result['ids'], False)
def test_names(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
name = lambda record: record.name_get()[0][1]
result = self.import_(['value'], [
['%s,%s' % (name(records[1]), name(records[2]))],
['%s,%s,%s' % (name(records[0]), name(records[1]), name(records[2]))],
['%s,%s' % (name(records[0]), name(records[3]))],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
b = self.browse()
self.assertEqual(values(b[1].value), [3, 44, 84])
self.assertEqual(values(b[2].value), [3, 9])
def test_nonames(self):
result = self.import_(['value'], [['wherethem2mhavenonames']])
self.assertEqual(result['messages'], [message(
u"No matching record found for name 'wherethem2mhavenonames' in "
u"field 'unknown'", moreinfo=moreaction(
res_model='export.many2many.other'))])
self.assertIs(result['ids'], False)
def test_import_to_existing(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
xid = 'myxid'
result = self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id1, id2)]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
result = self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id3, id4)]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(len(b), 1)
# TODO: replacement of existing m2m values is correct?
self.assertEqual(values(b[0].value), [84, 9])
class test_o2m(ImporterCase):
model_name = 'export.one2many'
def test_name_get(self):
s = u'Java is a DSL for taking large XML files and converting them ' \
u'to stack traces'
result = self.import_(
['const', 'value'],
[['5', s]])
self.assertEqual(result['messages'], [message(
u"No matching record found for name '%s' in field 'unknown'" % s,
moreinfo=moreaction(res_model='export.one2many.child'))])
self.assertIs(result['ids'], False)
def test_single(self):
result = self.import_(['const', 'value/value'], [
['5', '63']
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.value), [63])
def test_multicore(self):
result = self.import_(['const', 'value/value'], [
['5', '63'],
['6', '64'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
b1, b2 = self.browse()
self.assertEqual(b1.const, 5)
self.assertEqual(values(b1.value), [63])
self.assertEqual(b2.const, 6)
self.assertEqual(values(b2.value), [64])
def test_multisub(self):
result = self.import_(['const', 'value/value'], [
['5', '63'],
['', '64'],
['', '65'],
['', '66'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
def test_multi_subfields(self):
result = self.import_(['value/str', 'const', 'value/value'], [
['this', '5', '63'],
['is', '', '64'],
['the', '', '65'],
['rhythm', '', '66'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
self.assertEqual(
values(b.value, 'str'),
'this is the rhythm'.split())
def test_link_inline(self):
""" m2m-style specification for o2ms
"""
id1 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id'], [
['42', '%d,%d' % (id1, id2)]
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link(self):
""" O2M relating to an existing record (update) force a LINK_TO as well
"""
O2M = self.registry('export.one2many.child')
id1 = O2M.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id'], [
['42', str(id1)],
['', str(id2)],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link_2(self):
O2M_c = self.registry('export.one2many.child')
id1 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id', 'value/value'], [
['42', str(id1), '1'],
['', str(id2), '2'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
self.assertEqual(values(b.value), [1, 2])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
class test_o2m_multiple(ImporterCase):
model_name = 'export.one2many.multiple'
def test_multi_mixed(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', '22'],
['', '13', '23'],
['', '14', ''],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '22'],
['', '', '23'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi_fullsplit(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', ''],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '21'],
['', '', '22'],
['', '', '23'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
class test_realworld(common.TransactionCase):
def test_bigfile(self):
data = json.loads(pkgutil.get_data(self.__module__, 'contacts_big.json'))
result = self.registry('res.partner').load(
self.cr, openerp.SUPERUSER_ID,
['name', 'mobile', 'email', 'image'],
data)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(data))
def test_backlink(self):
data = json.loads(pkgutil.get_data(self.__module__, 'contacts.json'))
result = self.registry('res.partner').load(
self.cr, openerp.SUPERUSER_ID,
["name", "type", "street", "city", "country_id", "category_id",
"supplier", "customer", "is_company", "parent_id"],
data)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(data))
def test_recursive_o2m(self):
""" The content of the o2m field's dict needs to go through conversion
as it may be composed of convertables or other relational fields
"""
self.registry('ir.model.data').clear_caches()
Model = self.registry('export.one2many.recursive')
result = Model.load(self.cr, openerp.SUPERUSER_ID,
['value', 'child/const', 'child/child1/str', 'child/child2/value'],
[
['4', '42', 'foo', '55'],
['', '43', 'bar', '56'],
['', '', 'baz', ''],
['', '55', 'qux', '57'],
['5', '99', 'wheee', ''],
['', '98', '', '12'],
],
context=None)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
b = Model.browse(self.cr, openerp.SUPERUSER_ID, result['ids'], context=None)
self.assertEqual((b[0].value, b[1].value), (4, 5))
self.assertEqual([child.str for child in b[0].child[1].child1],
['bar', 'baz'])
self.assertFalse(len(b[1].child[1].child1))
self.assertEqual([child.value for child in b[1].child[1].child2],
[12])
class test_date(ImporterCase):
model_name = 'export.date'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_basic(self):
result = self.import_(['value'], [['2012-02-03']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
def test_invalid(self):
result = self.import_(['value'], [['not really a date']])
self.assertEqual(result['messages'], [
message(u"'not really a date' does not seem to be a valid date "
u"for field 'unknown'",
moreinfo=u"Use the format '2012-12-31'")])
self.assertIs(result['ids'], False)
class test_datetime(ImporterCase):
model_name = 'export.datetime'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_basic(self):
result = self.import_(['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
def test_invalid(self):
result = self.import_(['value'], [['not really a datetime']])
self.assertEqual(result['messages'], [
message(u"'not really a datetime' does not seem to be a valid "
u"datetime for field 'unknown'",
moreinfo=u"Use the format '2012-12-31 23:59:59'")])
self.assertIs(result['ids'], False)
def test_checktz1(self):
""" Imported date should be interpreted as being in the tz provided by
the context
"""
# write dummy tz in user (Asia/Hovd UTC+0700), should be superseded by
# context
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': 'Asia/Hovd'})
# UTC+1400
result = self.import_(
['value'], [['2012-02-03 11:11:11']], {'tz': 'Pacific/Kiritimati'})
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-02 21:11:11'])
# UTC-0930
result = self.import_(
['value'], [['2012-02-03 11:11:11']], {'tz': 'Pacific/Marquesas'})
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 20:41:11'])
def test_usertz(self):
""" If the context does not hold a timezone, the importing user's tz
should be used
"""
# UTC +1000
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': 'Asia/Yakutsk'})
result = self.import_(
['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 01:11:11'])
def test_notz(self):
""" If there is no tz either in the context or on the user, falls back
to UTC
"""
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': False})
result = self.import_(['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 11:11:11'])
class test_unique(ImporterCase):
model_name = 'export.unique'
@mute_logger('openerp.sql_db')
def test_unique(self):
result = self.import_(['value'], [
['1'],
['1'],
['2'],
['3'],
['3'],
])
self.assertFalse(result['ids'])
self.assertEqual(result['messages'], [
dict(message=u"The value for the field 'value' already exists. "
u"This might be 'unknown' in the current model, "
u"or a field of the same name in an o2m.",
type='error', rows={'from': 1, 'to': 1},
record=1, field='value'),
dict(message=u"The value for the field 'value' already exists. "
u"This might be 'unknown' in the current model, "
u"or a field of the same name in an o2m.",
type='error', rows={'from': 4, 'to': 4},
record=4, field='value'),
])
|
agpl-3.0
|
jesramirez/odoo
|
addons/website_crm/controllers/main.py
|
10
|
5500
|
# -*- coding: utf-8 -*-
import base64
import werkzeug
import werkzeug.urls
from openerp import http, SUPERUSER_ID
from openerp.http import request
from openerp.tools.translate import _
class contactus(http.Controller):
def generate_google_map_url(self, street, city, city_zip, country_name):
url = "http://maps.googleapis.com/maps/api/staticmap?center=%s&sensor=false&zoom=8&size=298x298" % werkzeug.url_quote_plus(
'%s, %s %s, %s' % (street, city, city_zip, country_name)
)
return url
@http.route(['/page/website.contactus', '/page/contactus'], type='http', auth="public", website=True)
def contact(self, **kwargs):
values = {}
for field in ['description', 'partner_name', 'phone', 'contact_name', 'email_from', 'name']:
if kwargs.get(field):
values[field] = kwargs.pop(field)
values.update(kwargs=kwargs.items())
return request.website.render("website.contactus", values)
def create_lead(self, request, values, kwargs):
""" Allow to be overrided """
cr, context = request.cr, request.context
return request.registry['crm.lead'].create(cr, SUPERUSER_ID, values, context=dict(context, mail_create_nosubscribe=True))
def preRenderThanks(self, values, kwargs):
""" Allow to be overrided """
company = request.website.company_id
return {
'google_map_url': self.generate_google_map_url(company.street, company.city, company.zip, company.country_id and company.country_id.name_get()[0][1] or ''),
'_values': values,
'_kwargs': kwargs,
}
def get_contactus_response(self, values, kwargs):
values = self.preRenderThanks(values, kwargs)
return request.website.render(kwargs.get("view_callback", "website_crm.contactus_thanks"), values)
@http.route(['/crm/contactus'], type='http', auth="public", website=True)
def contactus(self, **kwargs):
def dict_to_str(title, dictvar):
ret = "\n\n%s" % title
for field in dictvar:
ret += "\n%s" % field
return ret
_TECHNICAL = ['show_info', 'view_from', 'view_callback'] # Only use for behavior, don't stock it
_BLACKLIST = ['id', 'create_uid', 'create_date', 'write_uid', 'write_date', 'user_id', 'active'] # Allow in description
_REQUIRED = ['name', 'contact_name', 'email_from', 'description'] # Could be improved including required from model
post_file = [] # List of file to add to ir_attachment once we have the ID
post_description = [] # Info to add after the message
values = {}
for field_name, field_value in kwargs.items():
if hasattr(field_value, 'filename'):
post_file.append(field_value)
elif field_name in request.registry['crm.lead']._fields and field_name not in _BLACKLIST:
values[field_name] = field_value
elif field_name not in _TECHNICAL: # allow to add some free fields or blacklisted field like ID
post_description.append("%s: %s" % (field_name, field_value))
if "name" not in kwargs and values.get("contact_name"): # if kwarg.name is empty, it's an error, we cannot copy the contact_name
values["name"] = values.get("contact_name")
# fields validation : Check that required field from model crm_lead exists
error = set(field for field in _REQUIRED if not values.get(field))
if error:
values = dict(values, error=error, kwargs=kwargs.items())
return request.website.render(kwargs.get("view_from", "website.contactus"), values)
values['medium_id'] = request.registry['ir.model.data'].xmlid_to_res_id(request.cr, SUPERUSER_ID, 'crm.crm_medium_website')
values['section_id'] = request.registry['ir.model.data'].xmlid_to_res_id(request.cr, SUPERUSER_ID, 'website.salesteam_website_sales')
# description is required, so it is always already initialized
if post_description:
values['description'] += dict_to_str(_("Custom Fields: "), post_description)
if kwargs.get("show_info"):
post_description = []
environ = request.httprequest.headers.environ
post_description.append("%s: %s" % ("IP", environ.get("REMOTE_ADDR")))
post_description.append("%s: %s" % ("USER_AGENT", environ.get("HTTP_USER_AGENT")))
post_description.append("%s: %s" % ("ACCEPT_LANGUAGE", environ.get("HTTP_ACCEPT_LANGUAGE")))
post_description.append("%s: %s" % ("REFERER", environ.get("HTTP_REFERER")))
values['description'] += dict_to_str(_("Environ Fields: "), post_description)
lead_id = self.create_lead(request, dict(values, user_id=False), kwargs)
values.update(lead_id=lead_id)
if lead_id:
for field_value in post_file:
attachment_value = {
'name': field_value.filename,
'res_name': field_value.filename,
'res_model': 'crm.lead',
'res_id': lead_id,
'datas': base64.encodestring(field_value.read()),
'datas_fname': field_value.filename,
}
request.registry['ir.attachment'].create(request.cr, SUPERUSER_ID, attachment_value, context=request.context)
return self.get_contactus_response(values, kwargs)
|
agpl-3.0
|
hfeeki/transifex
|
transifex/languages/migrations/0003_changed_rules_equations.py
|
3
|
20958
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.core.management import call_command
from django.db import models
class Migration(DataMigration):
depends_on = (
('resources', '0009_added_translation_wordcount',),
)
def forwards(self, orm):
"""
Drop extra remaining translations after decreasing the nplural of
some languages.
"""
def get_pluralrules_numbers(t):
"""
Clone of the method available in resources.models.Translation.
This is needed once the south orm object does have access to class
methods.
The arg `t` must be a Translation object.
"""
rules=[]
if t.rule_zero:
rules.append(0)
if t.rule_one:
rules.append(1)
if t.rule_two:
rules.append(2)
if t.rule_few:
rules.append(3)
if t.rule_many:
rules.append(4)
rules.append(5)
return rules
# Making sure languages are updated from the latest fixture
#FIXME: Doesn't seem to work. It's not been saved into the db. :/
call_command('txlanguages', '--import', verbosity=2)
# Migration for languages that had its nplurals decreased
# Remove extra (unneeded) translations
LANGS = ['bs', 'hr', 'sr', 'uk']
for l in LANGS:
for lang in orm.Language.objects.filter(code__startswith=l):
rule = get_pluralrules_numbers(lang)[-2:][0]
orm['resources.Translation'].objects.filter(language=lang,
rule__gt=rule).exclude(rule=5).delete()
# Migration for languages that had its nplurals increased
# Remove all translations and add the 'other' one as a suggestion.
LANGS = ['ga']
for l in LANGS:
translations = orm['resources.Translation'].objects.filter(
language__code__startswith=l, source_entity__pluralized=True,
rule=5)
print translations
for t in translations:
sug, created = orm['suggestions.Suggestion'].objects.get_or_create(
string = t.string,
source_entity = t.source_entity,
language = t.language)
if created and t.user:
sug.user = t.user
sug.save()
orm['resources.Translation'].objects.filter(
language__code__startswith=l,
source_entity__pluralized=True).delete()
def backwards(self, orm):
raise Exception("Cannot reverse this migration.")
models = {
'actionlog.logentry': {
'Meta': {'ordering': "('-action_time',)", 'object_name': 'LogEntry'},
'action_time': ('django.db.models.fields.DateTimeField', [], {}),
'action_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notification.NoticeType']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'actionlogs'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'actionlogs'", 'null': 'True', 'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'languages.language': {
'Meta': {'ordering': "('name',)", 'object_name': 'Language', 'db_table': "'translations_language'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'code_aliases': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'nplurals': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'pluralequation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rule_few': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rule_many': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rule_one': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rule_other': ('django.db.models.fields.CharField', [], {'default': "'everything'", 'max_length': '255'}),
'rule_two': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rule_zero': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'specialchars': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'notification.noticetype': {
'Meta': {'object_name': 'NoticeType'},
'default': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'projects.project': {
'Meta': {'ordering': "('name',)", 'object_name': 'Project'},
'anyone_submit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bug_tracker': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'feed': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),
'long_description_html': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),
'maintainers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects_maintaining'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'outsource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']", 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30', 'db_index': 'True'}),
'tags': ('tagging.fields.TagField', [], {})
},
'resources.resource': {
'Meta': {'ordering': "('_order',)", 'unique_together': "(('slug', 'project'),)", 'object_name': 'Resource'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'accept_translations': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'i18n_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'null': 'True', 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'source_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storage.StorageFile']", 'null': 'True', 'blank': 'True'}),
'source_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['languages.Language']"}),
'total_entities': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wordcount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'resources.rlstats': {
'Meta': {'ordering': "('_order',)", 'unique_together': "(('resource', 'language'),)", 'object_name': 'RLStats'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['languages.Language']"}),
'last_committer': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'blank': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['resources.Resource']"}),
'translated': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'translated_perc': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'translated_wordcount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'untranslated': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'untranslated_perc': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'resources.sourceentity': {
'Meta': {'ordering': "('_order',)", 'unique_together': "(('string_hash', 'context', 'resource'),)", 'object_name': 'SourceEntity'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'context': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'developer_comment': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),
'flags': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'occurrences': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'pluralized': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'position': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source_entities'", 'to': "orm['resources.Resource']"}),
'string': ('django.db.models.fields.TextField', [], {}),
'string_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'resources.template': {
'Meta': {'ordering': "['resource']", 'object_name': 'Template'},
'content': ('transifex.txcommon.db.models.CompressedTextField', [], {'null': 'False', 'blank': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source_file_template'", 'unique': 'True', 'to': "orm['resources.Resource']"})
},
'resources.translation': {
'Meta': {'ordering': "('_order',)", 'unique_together': "(('source_entity', 'language', 'rule'),)", 'object_name': 'Translation'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['languages.Language']", 'null': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'rule': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'source_entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['resources.SourceEntity']"}),
'string': ('django.db.models.fields.TextField', [], {}),
'string_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'wordcount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'storage.storagefile': {
'Meta': {'object_name': 'StorageFile'},
'bound': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['languages.Language']", 'null': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'total_strings': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'suggestions.suggestion': {
'Meta': {'ordering': "('_order',)", 'unique_together': "(('source_entity', 'language', 'string_hash'),)", 'object_name': 'Suggestion'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['languages.Language']", 'null': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'blank': 'True'}),
'source_entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'suggestions'", 'to': "orm['resources.SourceEntity']"}),
'string': ('django.db.models.fields.TextField', [], {}),
'string_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'suggestions.vote': {
'Meta': {'unique_together': "(('suggestion', 'user'),)", 'object_name': 'Vote'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'suggestion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['suggestions.Suggestion']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote_type': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['resources', 'suggestions', 'languages']
|
gpl-2.0
|
doot/CouchPotatoServer
|
libs/rsa/util.py
|
115
|
2817
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Utility functions.'''
from __future__ import with_statement
import sys
from optparse import OptionParser
import rsa.key
def private_to_public():
'''Reads a private key and outputs the corresponding public key.'''
# Parse the CLI options
parser = OptionParser(usage='usage: %prog [options]',
description='Reads a private key and outputs the '
'corresponding public key. Both private and public keys use '
'the format described in PKCS#1 v1.5')
parser.add_option('-i', '--input', dest='infilename', type='string',
help='Input filename. Reads from stdin if not specified')
parser.add_option('-o', '--output', dest='outfilename', type='string',
help='Output filename. Writes to stdout of not specified')
parser.add_option('--inform', dest='inform',
help='key format of input - default PEM',
choices=('PEM', 'DER'), default='PEM')
parser.add_option('--outform', dest='outform',
help='key format of output - default PEM',
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv)
# Read the input data
if cli.infilename:
print >>sys.stderr, 'Reading private key from %s in %s format' % \
(cli.infilename, cli.inform)
with open(cli.infilename) as infile:
in_data = infile.read()
else:
print >>sys.stderr, 'Reading private key from stdin in %s format' % \
cli.inform
in_data = sys.stdin.read()
# Take the public fields and create a public key
priv_key = rsa.key.PrivateKey.load_pkcs1(in_data, cli.inform)
pub_key = rsa.key.PublicKey(priv_key.n, priv_key.e)
# Save to the output file
out_data = pub_key.save_pkcs1(cli.outform)
if cli.outfilename:
print >>sys.stderr, 'Writing public key to %s in %s format' % \
(cli.outfilename, cli.outform)
with open(cli.outfilename, 'w') as outfile:
outfile.write(out_data)
else:
print >>sys.stderr, 'Writing public key to stdout in %s format' % \
cli.outform
sys.stdout.write(out_data)
|
gpl-3.0
|
adblockplus/gyp
|
test/win/gyptest-link-fixed-base.py
|
344
|
1099
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure fixed base setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('fixed-base.gyp', chdir=CHDIR)
test.build('fixed-base.gyp', test.ALL, chdir=CHDIR)
def GetHeaders(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
return test.run_dumpbin('/headers', full_path)
# For exe, default is fixed, for dll, it's not fixed.
if 'Relocations stripped' not in GetHeaders('test_fixed_default_exe.exe'):
test.fail_test()
if 'Relocations stripped' in GetHeaders('test_fixed_default_dll.dll'):
test.fail_test()
# Explicitly not fixed.
if 'Relocations stripped' in GetHeaders('test_fixed_no.exe'):
test.fail_test()
# Explicitly fixed.
if 'Relocations stripped' not in GetHeaders('test_fixed_yes.exe'):
test.fail_test()
test.pass_test()
|
bsd-3-clause
|
vFense/vFenseAgent-nix
|
agent/deps/rpm-32/Python-2.7.5/lib/python2.7/distutils/tests/test_build.py
|
141
|
1924
|
"""Tests for distutils.command.build."""
import unittest
import os
import sys
from test.test_support import run_unittest
from distutils.command.build import build
from distutils.tests import support
from sysconfig import get_platform
class BuildTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_finalize_options(self):
pkg_dir, dist = self.create_dist()
cmd = build(dist)
cmd.finalize_options()
# if not specified, plat_name gets the current platform
self.assertEqual(cmd.plat_name, get_platform())
# build_purelib is build + lib
wanted = os.path.join(cmd.build_base, 'lib')
self.assertEqual(cmd.build_purelib, wanted)
# build_platlib is 'build/lib.platform-x.x[-pydebug]'
# examples:
# build/lib.macosx-10.3-i386-2.7
plat_spec = '.%s-%s' % (cmd.plat_name, sys.version[0:3])
if hasattr(sys, 'gettotalrefcount'):
self.assertTrue(cmd.build_platlib.endswith('-pydebug'))
plat_spec += '-pydebug'
wanted = os.path.join(cmd.build_base, 'lib' + plat_spec)
self.assertEqual(cmd.build_platlib, wanted)
# by default, build_lib = build_purelib
self.assertEqual(cmd.build_lib, cmd.build_purelib)
# build_temp is build/temp.<plat>
wanted = os.path.join(cmd.build_base, 'temp' + plat_spec)
self.assertEqual(cmd.build_temp, wanted)
# build_scripts is build/scripts-x.x
wanted = os.path.join(cmd.build_base, 'scripts-' + sys.version[0:3])
self.assertEqual(cmd.build_scripts, wanted)
# executable is os.path.normpath(sys.executable)
self.assertEqual(cmd.executable, os.path.normpath(sys.executable))
def test_suite():
return unittest.makeSuite(BuildTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
lgpl-3.0
|
yuvipanda/kubernetes
|
cluster/juju/charms/trusty/kubernetes-master/unit_tests/test_install.py
|
145
|
4106
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch, Mock, MagicMock
from path import Path
import pytest
import sys
# Munge the python path so we can find our hook code
d = Path('__file__').parent.abspath() / 'hooks'
sys.path.insert(0, d.abspath())
# Import the modules from the hook
import install
class TestInstallHook():
@patch('install.path')
def test_update_rc_files(self, pmock):
"""
Test happy path on updating env files. Assuming everything
exists and is in place.
"""
pmock.return_value.lines.return_value = ['line1', 'line2']
install.update_rc_files(['test1', 'test2'])
pmock.return_value.write_lines.assert_called_with(['line1', 'line2',
'test1', 'test2'])
def test_update_rc_files_with_nonexistent_path(self):
"""
Test an unhappy path if the bashrc/users do not exist.
"""
with pytest.raises(OSError) as exinfo:
install.update_rc_files(['test1','test2'])
@patch('install.fetch')
@patch('install.hookenv')
def test_package_installation(self, hemock, ftmock):
"""
Verify we are calling the known essentials to build and syndicate
kubes.
"""
pkgs = ['build-essential', 'git',
'make', 'nginx', 'python-pip']
install.install_packages()
hemock.log.assert_called_with('Installing Debian packages')
ftmock.filter_installed_packages.assert_called_with(pkgs)
@patch('install.archiveurl.ArchiveUrlFetchHandler')
def test_go_download(self, aumock):
"""
Test that we are actually handing off to charm-helpers to
download a specific archive of Go. This is non-configurable so
its reasonably safe to assume we're going to always do this,
and when it changes we shall curse the brittleness of this test.
"""
ins_mock = aumock.return_value.install
install.download_go()
url = 'https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz'
sha1='5020af94b52b65cc9b6f11d50a67e4bae07b0aff'
ins_mock.assert_called_with(url, '/usr/local', sha1, 'sha1')
@patch('install.subprocess')
def test_clone_repository(self, spmock):
"""
We're not using a unit-tested git library - so ensure our subprocess
call is consistent. If we change this, we want to know we've broken it.
"""
install.clone_repository()
repo = 'https://github.com/kubernetes/kubernetes.git'
direct = '/opt/kubernetes'
spmock.check_output.assert_called_with(['git', 'clone', repo, direct])
@patch('install.install_packages')
@patch('install.download_go')
@patch('install.clone_repository')
@patch('install.update_rc_files')
@patch('install.hookenv')
def test_install_main(self, hemock, urmock, crmock, dgmock, ipmock):
"""
Ensure the driver/main method is calling all the supporting methods.
"""
strings = [
'export GOROOT=/usr/local/go\n',
'export PATH=$PATH:$GOROOT/bin\n',
'export KUBE_MASTER_IP=0.0.0.0\n',
'export KUBERNETES_MASTER=http://$KUBE_MASTER_IP\n',
]
install.install()
crmock.assert_called_once()
dgmock.assert_called_once()
crmock.assert_called_once()
urmock.assert_called_with(strings)
hemock.open_port.assert_called_with(8080)
|
apache-2.0
|
mitsuhiko/zine
|
zine/feeds.py
|
1
|
19421
|
"""
Syndication feed generation library -- used for generating RSS, etc.
Implementation lifted from Django and modified to remove Djano
dependencies.
For definitions of the different versions of RSS, see:
http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
import types
import datetime
import urllib
import urlparse
from decimal import Decimal
from xml.sax.saxutils import XMLGenerator
def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal)
)
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
elif not isinstance(s, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError, e:
if not isinstance(s, Exception):
raise
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
return s
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return urllib.quote(smart_str(iri), safe="/#%[]=:;$&()+,!?*@'~")
class SimplerXMLGenerator(XMLGenerator):
def addQuickElement(self, name, contents=None, attrs=None):
"Convenience method for adding an element with no children"
if attrs is None: attrs = {}
self.startElement(name, attrs)
if contents is not None:
self.characters(contents)
self.endElement(name)
def rfc2822_date(date):
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
if date.tzinfo:
time_str = date.strftime('%a, %d %b %Y %H:%M:%S ')
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds / 60)
hour, minute = divmod(timezone, 60)
return time_str + "%+03d%02d" % (hour, minute)
else:
return date.strftime('%a, %d %b %Y %H:%M:%S -0000')
def rfc3339_date(date):
if date.tzinfo:
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds / 60)
hour, minute = divmod(timezone, 60)
return time_str + "%+03d:%02d" % (hour, minute)
else:
return date.strftime('%Y-%m-%dT%H:%M:%SZ')
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
url_split = urlparse.urlparse(url)
# Python 2.4 didn't have named attributes on split results or the hostname.
hostname = getattr(url_split, 'hostname', url_split[1].split(':')[0])
path = url_split[2]
fragment = url_split[5]
d = ''
if date is not None:
d = ',%s' % date.strftime('%Y-%m-%d')
return u'tag:%s%s:%s/%s' % (hostname, d, path, fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_unicode(s, strings_only=True)
if categories:
categories = [force_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_unicode(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, enclosure=None, categories=(), item_copyright=None,
ttl=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate, which is a datetime.datetime object, and
enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_unicode(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_unicode(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
from StringIO import StringIO
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate. If none of them have a pubdate,
this returns the current date/time.
"""
updates = [i['pubdate'] for i in self.items if i['pubdate'] is not None]
if len(updates) > 0:
updates.sort()
return updates[-1]
else:
return datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement(u"rss", self.rss_attributes())
handler.startElement(u"channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement(u"rss")
def rss_attributes(self):
return {u"version": self._version,
u"xmlns:atom": u"http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement(u'item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement(u"item")
def add_root_elements(self, handler):
handler.addQuickElement(u"title", self.feed['title'])
handler.addQuickElement(u"link", self.feed['link'])
handler.addQuickElement(u"description", self.feed['description'])
handler.addQuickElement(u"atom:link", None, {u"rel": u"self", u"href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement(u"language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement(u"category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement(u"copyright", self.feed['feed_copyright'])
handler.addQuickElement(u"lastBuildDate", rfc2822_date(self.latest_post_date()).decode('utf-8'))
if self.feed['ttl'] is not None:
handler.addQuickElement(u"ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement(u"channel")
class RssUserland091Feed(RssFeed):
_version = u"0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", item['link'])
if item['description'] is not None:
handler.addQuickElement(u"description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = u"2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", item['link'])
if item['description'] is not None:
handler.addQuickElement(u"description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement(u"author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement(u"author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement(u"dc:creator", item["author_name"], {u"xmlns:dc": u"http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement(u"pubDate", rfc2822_date(item['pubdate']).decode('utf-8'))
if item['comments'] is not None:
handler.addQuickElement(u"comments", item['comments'])
if item['unique_id'] is not None:
handler.addQuickElement(u"guid", item['unique_id'])
if item['ttl'] is not None:
handler.addQuickElement(u"ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement(u"enclosure", '',
{u"url": item['enclosure'].url, u"length": item['enclosure'].length,
u"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement(u"category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml'
ns = u"http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement(u'feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement(u"feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {u"xmlns": self.ns, u"xml:lang": self.feed['language']}
else:
return {u"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement(u"title", self.feed['title'])
handler.addQuickElement(u"link", "", {u"rel": u"alternate", u"href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement(u"link", "", {u"rel": u"self", u"href": self.feed['feed_url']})
handler.addQuickElement(u"id", self.feed['id'])
handler.addQuickElement(u"updated", rfc3339_date(self.latest_post_date()).decode('utf-8'))
if self.feed['author_name'] is not None:
handler.startElement(u"author", {})
handler.addQuickElement(u"name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement(u"email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement(u"uri", self.feed['author_link'])
handler.endElement(u"author")
if self.feed['subtitle'] is not None:
handler.addQuickElement(u"subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement(u"category", "", {u"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement(u"rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement(u"entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement(u"entry")
def add_item_elements(self, handler, item):
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", u"", {u"href": item['link'], u"rel": u"alternate"})
if item['pubdate'] is not None:
handler.addQuickElement(u"updated", rfc3339_date(item['pubdate']).decode('utf-8'))
# Author information.
if item['author_name'] is not None:
handler.startElement(u"author", {})
handler.addQuickElement(u"name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement(u"email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement(u"uri", item['author_link'])
handler.endElement(u"author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement(u"id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement(u"summary", item['description'], {u"type": u"html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement(u"link", '',
{u"rel": u"enclosure",
u"href": item['enclosure'].url,
u"length": item['enclosure'].length,
u"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement(u"category", u"", {u"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement(u"rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.