repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
BMJHayward/django | django/utils/translation/trans_null.py | 467 | 1408 | # These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
from django.conf import settings
from django.utils.encoding import force_text
def ngettext(singular, plural, number):
if number == 1:
return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_text(ngettext(singular, plural, number))
def pgettext(context, message):
return ugettext(message)
def npgettext(context, singular, plural, number):
return ungettext(singular, plural, number)
activate = lambda x: None
deactivate = deactivate_all = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
check_for_language = lambda x: True
def gettext(message):
return message
def ugettext(message):
return force_text(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def get_language_from_request(request, check_path=False):
return settings.LANGUAGE_CODE
def get_language_from_path(request):
return None
| bsd-3-clause |
cloudera/hue | desktop/core/ext-py/boto-2.46.1/boto/services/result.py | 153 | 5596 | #!/usr/bin/env python
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
from datetime import datetime, timedelta
from boto.utils import parse_ts
import boto
class ResultProcessor(object):
LogFileName = 'log.csv'
def __init__(self, batch_name, sd, mimetype_files=None):
self.sd = sd
self.batch = batch_name
self.log_fp = None
self.num_files = 0
self.total_time = 0
self.min_time = timedelta.max
self.max_time = timedelta.min
self.earliest_time = datetime.max
self.latest_time = datetime.min
self.queue = self.sd.get_obj('output_queue')
self.domain = self.sd.get_obj('output_domain')
def calculate_stats(self, msg):
start_time = parse_ts(msg['Service-Read'])
end_time = parse_ts(msg['Service-Write'])
elapsed_time = end_time - start_time
if elapsed_time > self.max_time:
self.max_time = elapsed_time
if elapsed_time < self.min_time:
self.min_time = elapsed_time
self.total_time += elapsed_time.seconds
if start_time < self.earliest_time:
self.earliest_time = start_time
if end_time > self.latest_time:
self.latest_time = end_time
def log_message(self, msg, path):
keys = sorted(msg.keys())
if not self.log_fp:
self.log_fp = open(os.path.join(path, self.LogFileName), 'a')
line = ','.join(keys)
self.log_fp.write(line+'\n')
values = []
for key in keys:
value = msg[key]
if value.find(',') > 0:
value = '"%s"' % value
values.append(value)
line = ','.join(values)
self.log_fp.write(line+'\n')
def process_record(self, record, path, get_file=True):
self.log_message(record, path)
self.calculate_stats(record)
outputs = record['OutputKey'].split(',')
if 'OutputBucket' in record:
bucket = boto.lookup('s3', record['OutputBucket'])
else:
bucket = boto.lookup('s3', record['Bucket'])
for output in outputs:
if get_file:
key_name = output.split(';')[0]
key = bucket.lookup(key_name)
file_name = os.path.join(path, key_name)
print('retrieving file: %s to %s' % (key_name, file_name))
key.get_contents_to_filename(file_name)
self.num_files += 1
def get_results_from_queue(self, path, get_file=True, delete_msg=True):
m = self.queue.read()
while m:
if 'Batch' in m and m['Batch'] == self.batch:
self.process_record(m, path, get_file)
if delete_msg:
self.queue.delete_message(m)
m = self.queue.read()
def get_results_from_domain(self, path, get_file=True):
rs = self.domain.query("['Batch'='%s']" % self.batch)
for item in rs:
self.process_record(item, path, get_file)
def get_results_from_bucket(self, path):
bucket = self.sd.get_obj('output_bucket')
if bucket:
print('No output queue or domain, just retrieving files from output_bucket')
for key in bucket:
file_name = os.path.join(path, key)
print('retrieving file: %s to %s' % (key, file_name))
key.get_contents_to_filename(file_name)
self.num_files + 1
def get_results(self, path, get_file=True, delete_msg=True):
if not os.path.isdir(path):
os.mkdir(path)
if self.queue:
self.get_results_from_queue(path, get_file)
elif self.domain:
self.get_results_from_domain(path, get_file)
else:
self.get_results_from_bucket(path)
if self.log_fp:
self.log_fp.close()
print('%d results successfully retrieved.' % self.num_files)
if self.num_files > 0:
self.avg_time = float(self.total_time)/self.num_files
print('Minimum Processing Time: %d' % self.min_time.seconds)
print('Maximum Processing Time: %d' % self.max_time.seconds)
print('Average Processing Time: %f' % self.avg_time)
self.elapsed_time = self.latest_time-self.earliest_time
print('Elapsed Time: %d' % self.elapsed_time.seconds)
tput = 1.0 / ((self.elapsed_time.seconds/60.0) / self.num_files)
print('Throughput: %f transactions / minute' % tput)
| apache-2.0 |
ArianaGashi/Techstitution | venv/lib/python2.7/site-packages/jinja2/debug.py | 335 | 11553 | # -*- coding: utf-8 -*-
"""
jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from types import TracebackType, CodeType
from jinja2.utils import missing, internal_code
from jinja2.exceptions import TemplateSyntaxError
from jinja2._compat import iteritems, reraise, PY2
# on pypy we can take advantage of transparent proxies
try:
from __pypy__ import tproxy
except ImportError:
tproxy = None
# how does the raise helper look like?
try:
exec("raise TypeError, 'foo'")
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
self._tb_next = None
@property
def tb_next(self):
return self._tb_next
def set_next(self, next):
if tb_set_next is not None:
try:
tb_set_next(self.tb, next and next.tb or None)
except Exception:
# this function can fail due to all the hackery it does
# on various python implementations. We just catch errors
# down and ignore them if necessary.
pass
self._tb_next = next
@property
def is_jinja_frame(self):
return '__jinja_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
def make_frame_proxy(frame):
proxy = TracebackFrameProxy(frame)
if tproxy is None:
return proxy
def operation_handler(operation, *args, **kwargs):
if operation in ('__getattribute__', '__getattr__'):
return getattr(proxy, args[0])
elif operation == '__setattr__':
proxy.__setattr__(*args, **kwargs)
else:
return getattr(proxy, operation)(*args, **kwargs)
return tproxy(TracebackType, operation_handler)
class ProcessedTraceback(object):
"""Holds a Jinja preprocessed traceback for printing or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
# newly concatenate the frames (which are proxies)
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.set_next(tb)
prev_tb = tb
prev_tb.set_next(None)
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
tb = self.frames[0]
# the frame will be an actual traceback (or transparent proxy) if
# we are on pypy or a python implementation with support for tproxy
if type(tb) is not TracebackType:
tb = tb.tb
return self.exc_type, self.exc_value, tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in range(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(make_frame_proxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to
# reraise it unchanged.
# XXX: can we backup here? when could this happen?
if not frames:
reraise(exc_info[0], exc_info[1], exc_info[2])
return ProcessedTraceback(exc_info[0], exc_info[1], frames)
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
real_locals = tb.tb_frame.f_locals.copy()
ctx = real_locals.get('context')
if ctx:
locals = ctx.get_all()
else:
locals = {}
for name, value in iteritems(real_locals):
if name.startswith('l_') and value is not missing:
locals[name[2:]] = value
# if there is a local called __jinja_exception__, we get
# rid of it to not break the debug functionality.
locals.pop('__jinja_exception__', None)
else:
locals = {}
# assamble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__jinja_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__': None
}
# and fake the exception
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == 'root':
location = 'top-level template code'
elif function.startswith('block_'):
location = 'block "%s"' % function[6:]
else:
location = 'template'
if PY2:
code = CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
else:
code = CodeType(0, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except Exception as e:
pass
# execute the code and catch the new traceback
try:
exec(code, globals, locals)
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object. Do not attempt to use this on non cpython
interpreters
"""
import ctypes
from types import TracebackType
if PY2:
# figure out size of _Py_ssize_t for Python 2:
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
else:
# platform ssize_t on Python 3
_Py_ssize_t = ctypes.c_ssize_t
# regular python
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
# python with trace
if hasattr(sys, 'getobjects'):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('_ob_next', ctypes.POINTER(_PyObject)),
('_ob_prev', ctypes.POINTER(_PyObject)),
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [
('tb_next', ctypes.POINTER(_Traceback)),
('tb_frame', ctypes.POINTER(_PyObject)),
('tb_lasti', ctypes.c_int),
('tb_lineno', ctypes.c_int)
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation if we don't have transparent
# proxies.
tb_set_next = None
if tproxy is None:
try:
tb_set_next = _init_ugly_crap()
except:
pass
del _init_ugly_crap
| cc0-1.0 |
Wuteyan/VTK | Examples/Modelling/Python/constrainedDelaunay.py | 15 | 4503 | #!/usr/bin/env python
# This example demonstrates how to use a constraint polygon in
# Delaunay triangulation.
import vtk
from vtk.util.colors import peacock
# Generate the input points and constrained edges/polygons.
points = vtk.vtkPoints()
points.InsertPoint(0, 1, 4, 0)
points.InsertPoint(1, 3, 4, 0)
points.InsertPoint(2, 7, 4, 0)
points.InsertPoint(3, 11, 4, 0)
points.InsertPoint(4, 13, 4, 0)
points.InsertPoint(5, 13, 8, 0)
points.InsertPoint(6, 13, 12, 0)
points.InsertPoint(7, 10, 12, 0)
points.InsertPoint(8, 7, 12, 0)
points.InsertPoint(9, 4, 12, 0)
points.InsertPoint(10, 1, 12, 0)
points.InsertPoint(11, 1, 8, 0)
points.InsertPoint(12, 3.5, 5, 0)
points.InsertPoint(13, 4.5, 5, 0)
points.InsertPoint(14, 5.5, 8, 0)
points.InsertPoint(15, 6.5, 8, 0)
points.InsertPoint(16, 6.5, 5, 0)
points.InsertPoint(17, 7.5, 5, 0)
points.InsertPoint(18, 7.5, 8, 0)
points.InsertPoint(19, 9, 8, 0)
points.InsertPoint(20, 9, 5, 0)
points.InsertPoint(21, 10, 5, 0)
points.InsertPoint(22, 10, 7, 0)
points.InsertPoint(23, 11, 5, 0)
points.InsertPoint(24, 12, 5, 0)
points.InsertPoint(25, 10.5, 8, 0)
points.InsertPoint(26, 12, 11, 0)
points.InsertPoint(27, 11, 11, 0)
points.InsertPoint(28, 10, 9, 0)
points.InsertPoint(29, 10, 11, 0)
points.InsertPoint(30, 9, 11, 0)
points.InsertPoint(31, 9, 9, 0)
points.InsertPoint(32, 7.5, 9, 0)
points.InsertPoint(33, 7.5, 11, 0)
points.InsertPoint(34, 6.5, 11, 0)
points.InsertPoint(35, 6.5, 9, 0)
points.InsertPoint(36, 5, 9, 0)
points.InsertPoint(37, 4, 6, 0)
points.InsertPoint(38, 3, 9, 0)
points.InsertPoint(39, 2, 9, 0)
polys = vtk.vtkCellArray()
polys.InsertNextCell(12)
polys.InsertCellPoint(0)
polys.InsertCellPoint(1)
polys.InsertCellPoint(2)
polys.InsertCellPoint(3)
polys.InsertCellPoint(4)
polys.InsertCellPoint(5)
polys.InsertCellPoint(6)
polys.InsertCellPoint(7)
polys.InsertCellPoint(8)
polys.InsertCellPoint(9)
polys.InsertCellPoint(10)
polys.InsertCellPoint(11)
polys.InsertNextCell(28)
polys.InsertCellPoint(39)
polys.InsertCellPoint(38)
polys.InsertCellPoint(37)
polys.InsertCellPoint(36)
polys.InsertCellPoint(35)
polys.InsertCellPoint(34)
polys.InsertCellPoint(33)
polys.InsertCellPoint(32)
polys.InsertCellPoint(31)
polys.InsertCellPoint(30)
polys.InsertCellPoint(29)
polys.InsertCellPoint(28)
polys.InsertCellPoint(27)
polys.InsertCellPoint(26)
polys.InsertCellPoint(25)
polys.InsertCellPoint(24)
polys.InsertCellPoint(23)
polys.InsertCellPoint(22)
polys.InsertCellPoint(21)
polys.InsertCellPoint(20)
polys.InsertCellPoint(19)
polys.InsertCellPoint(18)
polys.InsertCellPoint(17)
polys.InsertCellPoint(16)
polys.InsertCellPoint(15)
polys.InsertCellPoint(14)
polys.InsertCellPoint(13)
polys.InsertCellPoint(12)
polyData = vtk.vtkPolyData()
polyData.SetPoints(points)
polyData.SetPolys(polys)
# Notice this trick. The SetInput() method accepts a vtkPolyData that
# is also the input to the Delaunay filter. The points of the
# vtkPolyData are used to generate the triangulation; the polygons are
# used to create a constraint region. The polygons are very carefully
# created and ordered in the right direction to indicate inside and
# outside of the polygon.
delny = vtk.vtkDelaunay2D()
delny.SetInput(polyData)
delny.SetSource(polyData)
mapMesh = vtk.vtkPolyDataMapper()
mapMesh.SetInputConnection(delny.GetOutputPort())
meshActor = vtk.vtkActor()
meshActor.SetMapper(mapMesh)
# Now we just pretty the mesh up with tubed edges and balls at the
# vertices.
extract = vtk.vtkExtractEdges()
extract.SetInputConnection(delny.GetOutputPort())
tubes = vtk.vtkTubeFilter()
tubes.SetInputConnection(extract.GetOutputPort())
tubes.SetRadius(0.1)
tubes.SetNumberOfSides(6)
mapEdges = vtk.vtkPolyDataMapper()
mapEdges.SetInputConnection(tubes.GetOutputPort())
edgeActor = vtk.vtkActor()
edgeActor.SetMapper(mapEdges)
edgeActor.GetProperty().SetColor(peacock)
edgeActor.GetProperty().SetSpecularColor(1, 1, 1)
edgeActor.GetProperty().SetSpecular(0.3)
edgeActor.GetProperty().SetSpecularPower(20)
edgeActor.GetProperty().SetAmbient(0.2)
edgeActor.GetProperty().SetDiffuse(0.8)
# Create the rendering window, renderer, and interactive renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(meshActor)
ren.AddActor(edgeActor)
ren.SetBackground(0, 0, 0)
renWin.SetSize(450, 300)
ren.ResetCamera()
ren.GetActiveCamera().Zoom(2)
iren.Initialize()
renWin.Render()
iren.Start()
| bsd-3-clause |
GitAngel/django | tests/field_subclassing/fields.py | 35 | 2704 | from __future__ import unicode_literals
import json
import warnings
from django.db import models
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text, python_2_unicode_compatible
# Catch warning about subfieldbase -- remove in Django 1.10
warnings.filterwarnings(
'ignore',
'SubfieldBase has been deprecated. Use Field.from_db_value instead.',
RemovedInDjango110Warning
)
@python_2_unicode_compatible
class Small(object):
"""
A simple class to show that non-trivial Python objects can be used as
attributes.
"""
def __init__(self, first, second):
self.first, self.second = first, second
def __str__(self):
return '%s%s' % (force_text(self.first), force_text(self.second))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.first == other.first and self.second == other.second
return False
class SmallField(six.with_metaclass(models.SubfieldBase, models.Field)):
"""
Turns the "Small" class into a Django field. Because of the similarities
with normal character fields and the fact that Small.__unicode__ does
something sensible, we don't need to implement a lot here.
"""
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 2
super(SmallField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'CharField'
def to_python(self, value):
if isinstance(value, Small):
return value
return Small(value[0], value[1])
def get_db_prep_save(self, value, connection):
return six.text_type(value)
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'exact':
return force_text(value)
if lookup_type == 'in':
return [force_text(v) for v in value]
if lookup_type == 'isnull':
return []
raise TypeError('Invalid lookup type: %r' % lookup_type)
class SmallerField(SmallField):
pass
class JSONField(six.with_metaclass(models.SubfieldBase, models.TextField)):
description = ("JSONField automatically serializes and deserializes values to "
"and from JSON.")
def to_python(self, value):
if not value:
return None
if isinstance(value, six.string_types):
value = json.loads(value)
return value
def get_db_prep_save(self, value, connection):
if value is None:
return None
return json.dumps(value)
class CustomTypedField(models.TextField):
def db_type(self, connection):
return 'custom_field'
| bsd-3-clause |
slozier/ironpython2 | Src/StdLib/Lib/test/test_wait3.py | 136 | 1062 | """This test checks for correct wait3() behavior.
"""
import os
import time
import unittest
from test.fork_wait import ForkWait
from test.test_support import run_unittest, reap_children
try:
os.fork
except AttributeError:
raise unittest.SkipTest, "os.fork not defined -- skipping test_wait3"
try:
os.wait3
except AttributeError:
raise unittest.SkipTest, "os.wait3 not defined -- skipping test_wait3"
class Wait3Test(ForkWait):
def wait_impl(self, cpid):
for i in range(10):
# wait3() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status, rusage = os.wait3(os.WNOHANG)
if spid == cpid:
break
time.sleep(1.0)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
self.assertTrue(rusage)
def test_main():
run_unittest(Wait3Test)
reap_children()
if __name__ == "__main__":
test_main()
| apache-2.0 |
nhippenmeyer/django | tests/field_subclassing/tests.py | 214 | 4475 | from __future__ import unicode_literals
import inspect
from django.core import exceptions, serializers
from django.db import connection
from django.test import SimpleTestCase, TestCase
from .fields import CustomTypedField, Small
from .models import ChoicesModel, DataModel, MyModel, OtherModel
class CustomField(TestCase):
def test_refresh(self):
d = DataModel.objects.create(data=[1, 2, 3])
d.refresh_from_db(fields=['data'])
self.assertIsInstance(d.data, list)
self.assertEqual(d.data, [1, 2, 3])
def test_defer(self):
d = DataModel.objects.create(data=[1, 2, 3])
self.assertIsInstance(d.data, list)
d = DataModel.objects.get(pk=d.pk)
self.assertIsInstance(d.data, list)
self.assertEqual(d.data, [1, 2, 3])
d = DataModel.objects.defer("data").get(pk=d.pk)
self.assertIsInstance(d.data, list)
self.assertEqual(d.data, [1, 2, 3])
# Refetch for save
d = DataModel.objects.defer("data").get(pk=d.pk)
d.save()
d = DataModel.objects.get(pk=d.pk)
self.assertIsInstance(d.data, list)
self.assertEqual(d.data, [1, 2, 3])
def test_custom_field(self):
# Creating a model with custom fields is done as per normal.
s = Small(1, 2)
self.assertEqual(str(s), "12")
m = MyModel.objects.create(name="m", data=s)
# Custom fields still have normal field's attributes.
self.assertEqual(m._meta.get_field("data").verbose_name, "small field")
# The m.data attribute has been initialized correctly. It's a Small
# object.
self.assertEqual((m.data.first, m.data.second), (1, 2))
# The data loads back from the database correctly and 'data' has the
# right type.
m1 = MyModel.objects.get(pk=m.pk)
self.assertIsInstance(m1.data, Small)
self.assertEqual(str(m1.data), "12")
# We can do normal filtering on the custom field (and will get an error
# when we use a lookup type that does not make sense).
s1 = Small(1, 3)
s2 = Small("a", "b")
self.assertQuerysetEqual(
MyModel.objects.filter(data__in=[s, s1, s2]), [
"m",
],
lambda m: m.name,
)
self.assertRaises(TypeError, lambda: MyModel.objects.filter(data__lt=s))
# Serialization works, too.
stream = serializers.serialize("json", MyModel.objects.all())
self.assertJSONEqual(stream, [{
"pk": m1.pk,
"model": "field_subclassing.mymodel",
"fields": {"data": "12", "name": "m"}
}])
obj = list(serializers.deserialize("json", stream))[0]
self.assertEqual(obj.object, m)
# Test retrieving custom field data
m.delete()
m1 = MyModel.objects.create(name="1", data=Small(1, 2))
MyModel.objects.create(name="2", data=Small(2, 3))
self.assertQuerysetEqual(
MyModel.objects.all(), [
"12",
"23",
],
lambda m: str(m.data),
ordered=False
)
def test_field_subclassing(self):
o = OtherModel.objects.create(data=Small("a", "b"))
o = OtherModel.objects.get()
self.assertEqual(o.data.first, "a")
self.assertEqual(o.data.second, "b")
def test_subfieldbase_plays_nice_with_module_inspect(self):
"""
Custom fields should play nice with python standard module inspect.
http://users.rcn.com/python/download/Descriptor.htm#properties
"""
# Even when looking for totally different properties, SubfieldBase's
# non property like behavior made inspect crash. Refs #12568.
data = dict(inspect.getmembers(MyModel))
self.assertIn('__module__', data)
self.assertEqual(data['__module__'], 'field_subclassing.models')
def test_validation_of_choices_for_custom_field(self):
# a valid choice
o = ChoicesModel.objects.create(data=Small('a', 'b'))
o.full_clean()
# an invalid choice
o = ChoicesModel.objects.create(data=Small('d', 'e'))
with self.assertRaises(exceptions.ValidationError):
o.full_clean()
class TestDbType(SimpleTestCase):
def test_db_parameters_respects_db_type(self):
f = CustomTypedField()
self.assertEqual(f.db_parameters(connection)['type'], 'custom_field')
| bsd-3-clause |
theo-l/django | django/views/static.py | 6 | 4553 | """
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
import mimetypes
import posixpath
import re
from pathlib import Path
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils._os import safe_join
from django.utils.http import http_date, parse_http_date
from django.utils.translation import gettext as _, gettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
path('<path:path>', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(path).lstrip('/')
fullpath = Path(safe_join(document_root, path))
if fullpath.is_dir():
if show_indexes:
return directory_index(path, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not fullpath.exists():
raise Http404(_('“%(path)s” does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = fullpath.stat()
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(str(fullpath))
content_type = content_type or 'application/octet-stream'
response = FileResponse(fullpath.open('rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8">
<meta http-equiv="Content-Language" content="en-us">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% blocktranslate %}Index of {{ directory }}{% endblocktranslate %}</title>
</head>
<body>
<h1>{% blocktranslate %}Index of {{ directory }}{% endblocktranslate %}</h1>
<ul>
{% if directory != "/" %}
<li><a href="../">../</a></li>
{% endif %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = gettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine(libraries={'i18n': 'django.templatetags.i18n'}).from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
c = Context()
else:
c = {}
files = []
for f in fullpath.iterdir():
if not f.name.startswith('.'):
url = str(f.relative_to(fullpath))
if f.is_dir():
url += '/'
files.append(url)
c.update({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches[1])
header_len = matches[3]
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
| bsd-3-clause |
ajvpot/CTFd | migrations/versions/75e8ab9a0014_add_fields_and_fieldentries_tables.py | 4 | 1867 | """Add Fields and FieldEntries tables
Revision ID: 75e8ab9a0014
Revises: 0366ba6575ca
Create Date: 2020-08-19 00:36:17.579497
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "75e8ab9a0014"
down_revision = "0366ba6575ca"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"fields",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.Text(), nullable=True),
sa.Column("type", sa.String(length=80), nullable=True),
sa.Column("field_type", sa.String(length=80), nullable=True),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("required", sa.Boolean(), nullable=True),
sa.Column("public", sa.Boolean(), nullable=True),
sa.Column("editable", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"field_entries",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("type", sa.String(length=80), nullable=True),
sa.Column("value", sa.JSON(), nullable=True),
sa.Column("field_id", sa.Integer(), nullable=True),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("team_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["field_id"], ["fields.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["team_id"], ["teams.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("field_entries")
op.drop_table("fields")
# ### end Alembic commands ###
| apache-2.0 |
WorldViews/Spirals | KinPy/KinOSCWatcher.py | 2 | 5277 |
import os, socket, time
import threading
#import MessageBoard
import traceback
import OSC
OSC_SERVER = None
ALL_JOINTS = {
'HandRight': 'RIGHT_HAND',
'HandLeft': 'LEFT_HAND',
'WristRight': 'RIGHT_WRIST',
'WristLeft': 'LEFT_WRIST',
'ElbowRight': 'RIGHT_ELBOW',
'ElbowLeft': 'LEFT_ELBOW',
'ShoulderRight': 'RIGHT_SHOULDER',
'ShoulderLeft': 'LEFT_SHOULDER',
'Neck': 'NECK',
'Head': 'HEAD',
'SpineMid': 'MID_SPINE',
'SpineBase': 'BASE_SPINE',
'HipRight': 'RIGHT_HIP',
'HipLeft': 'LEFT_HIP',
'KneeRight': 'RIGHT_KNEE',
'KneeLeft': 'LEFT_KNEE',
'AnkleRight': 'RIGHT_ANKLE',
'AnkleLeft': 'LEFT_ANKLE',
'FootRight': 'RIGHT_FOOT',
'FootLeft': 'LEFT_FOOT'
}
JOINTS = {
'HandRight': 'RIGHT_HAND',
'HandLeft': 'LEFT_HAND',
'ElbowRight': 'RIGHT_ELBOW',
'ElbowLeft': 'LEFT_ELBOW',
'Head': 'HEAD'
}
KINECT_CONTROL = None
"""
This is a simple class for holding the message associate
with a body, and some other information such as body num
or timing.
"""
class Body:
numBodies = 0
bodyById = {}
@staticmethod
def getBody(bodyId):
if bodyId in Body.bodyById:
return Body.bodyById[bodyId]
# MyOSCHandler.numPeople += 1
# personNum = MyOSCHandler.numPeople
body = Body(bodyId)
Body.bodyById[bodyId] = body
return body
def __init__(self, id):
Body.numBodies += 1
self.bodyId = id
self.personNum = Body.numBodies
self.msg = None
def setJoint(self, joint, xyz, trackState):
"""
This gets called with a joint position and acculumates
the joint information in a message. When this gets called
with a joint that is already in the message, it is assumed
the message is "complete" (i.e. has a complete set of
the joints being watched) and a single message is sent
with all those joints.
"""
global OSC_SERVER
#print "buildMessage", bodyId, joint, xyz
if JOINTS != None:
jname = JOINTS[joint]
else:
jname = joint
msg = self.msg
if msg != None and jname in msg:
#print "sending message!!!!", msg
if OSC_SERVER.kinSkelHandler:
OSC_SERVER.kinSkelHandler(msg)
msg = None
if msg == None:
msg = {'msgType':'kinect.skeleton.pose',
'personNum': self.personNum}
msg[jname] = xyz
c = .2
if trackState == 'Tracked':
c = 1.0
msg["%s_c" % jname] = c
self.msg = msg
class MyOSCHandler(OSC.OSCRequestHandler):
def dispatchMessage(self, pattern, tags, data):
parts = pattern.split("/")
if len(parts) != 5:
print "Unexpected number of parts"
return []
bodyId = parts[2]
if parts[3] == "hands":
if tags != "ss":
print "Unexpected format", tags
print "pattern:", pattern
return []
elif parts[3] == "joints":
joint = parts[4]
if tags != "fffs":
print "Unexpected format", tags
print "pattern:", pattern
return []
if JOINTS and joint not in JOINTS:
return []
#print "data: %s\n" % (data,)
x,y,z,trackState = data
pos = 1000.0*x, 1000.0*y, 1000.0*z
body = Body.getBody(bodyId)
body.setJoint(joint, pos, trackState)
else:
print "Unexpected pattern", pattern
return []
if self.server.kinJointHandler:
body = Body.getBody(bodyId)
msg = {'msgType': 'joint', 'personNum': body.personNum, 'joint': joint,
'bodyId': bodyId, 'pos': [x,y,z]}
self.server.kinJointHandler(msg)
# if SERVER:
# SERVER.sendMessageToAllClients(msg)
return []
#class MyOSCServer(OSC.ThreadingOSCServer):
class MyOSCServer(OSC.OSCServer):
RequestHandlerClass = MyOSCHandler
def bodyMsgHandler(msg):
print msg
OSC_HOST_ADDR = None
OSC_PORT = 12345
def getOSC_ADDR():
global OSC_HOST_ADDR
if not OSC_HOST_ADDR:
host = socket.gethostname()
OSC_HOST_ADDR = socket.gethostbyname(host)
"""
path = "%s.OSC_PARAMS.json"
if os.path.exists(path):
try:
params = json.load(file(path))
return tuple(params['OSC_ADDR'])
except:
traceback.print_exc()
return OSC_ADDR
"""
return OSC_HOST_ADDR, OSC_PORT
def startOSC(kinSkelHandler=None, kinJointHandler=None):
global OSC_SERVER
addr = getOSC_ADDR()
print "Using addr:", addr
s = MyOSCServer(addr)
OSC_SERVER = s
s.kinSkelHandler = kinSkelHandler
s.kinJointHandler = kinJointHandler
#s.app = app
s.addMsgHandler("/bodies", bodyMsgHandler)
t = threading.Thread(target=s.serve_forever)
t.start()
#t.setDaemon(True)
#s.serve_forever()
def kinSkelHandler(msg):
if 0:
print msg
def kinJointHandler(msg):
if 0:
print msg
def run(setupServer=True):
startOSC(kinSkelHandler, kinJointHandler)
while 1:
time.sleep(1)
if __name__ == '__main__':
run()
| mit |
int19h/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/heapq.py | 14 | 23017 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
a usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
return returnitem
return lastelt
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappop_max(heap):
"""Maxheap version of a heappop."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup_max(heap, 0)
return returnitem
return lastelt
def _heapreplace_max(heap, item):
"""Maxheap version of a heappop followed by a heappush."""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup_max(heap, 0)
return returnitem
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
def merge(*iterables, key=None, reverse=False):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
If *key* is not None, applies a key function to each element to determine
its sort order.
>>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len))
['dog', 'cat', 'fish', 'horse', 'kangaroo']
'''
h = []
h_append = h.append
if reverse:
_heapify = _heapify_max
_heappop = _heappop_max
_heapreplace = _heapreplace_max
direction = -1
else:
_heapify = heapify
_heappop = heappop
_heapreplace = heapreplace
direction = 1
if key is None:
for order, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), order * direction, next])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
value, order, next = s = h[0]
yield value
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
value, order, next = h[0]
yield value
yield from next.__self__
return
for order, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
value = next()
h_append([key(value), order * direction, value, next])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
key_value, order, value, next = s = h[0]
yield value
value = next()
s[0] = key(value)
s[2] = value
_heapreplace(h, s)
except StopIteration:
_heappop(h)
if h:
key_value, order, value, next = h[0]
yield value
yield from next.__self__
# Algorithm notes for nlargest() and nsmallest()
# ==============================================
#
# Make a single pass over the data while keeping the k most extreme values
# in a heap. Memory consumption is limited to keeping k values in a list.
#
# Measured performance for random inputs:
#
# number of comparisons
# n inputs k-extreme values (average of 5 trials) % more than min()
# ------------- ---------------- --------------------- -----------------
# 1,000 100 3,317 231.7%
# 10,000 100 14,046 40.5%
# 100,000 100 105,749 5.7%
# 1,000,000 100 1,007,751 0.8%
# 10,000,000 100 10,009,401 0.1%
#
# Theoretical number of comparisons for k smallest of n random inputs:
#
# Step Comparisons Action
# ---- -------------------------- ---------------------------
# 1 1.66 * k heapify the first k-inputs
# 2 n - k compare remaining elements to top of heap
# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap
# 4 k * lg2(k) - (k/2) final sort of the k most extreme values
#
# Combining and simplifying for a rough estimate gives:
#
# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k))
#
# Computing the number of comparisons for step 3:
# -----------------------------------------------
# * For the i-th new value from the iterable, the probability of being in the
# k most extreme values is k/i. For example, the probability of the 101st
# value seen being in the 100 most extreme values is 100/101.
# * If the value is a new extreme value, the cost of inserting it into the
# heap is 1 + log(k, 2).
# * The probability times the cost gives:
# (k/i) * (1 + log(k, 2))
# * Summing across the remaining n-k elements gives:
# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1))
# * This reduces to:
# (H(n) - H(k)) * k * (1 + log(k, 2))
# * Where H(n) is the n-th harmonic number estimated by:
# gamma = 0.5772156649
# H(n) = log(n, e) + gamma + 1 / (2 * n)
# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence
# * Substituting the H(n) formula:
# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2)
#
# Worst-case for step 3:
# ----------------------
# In the worst case, the input data is reversed sorted so that every new element
# must be inserted in the heap:
#
# comparisons = 1.66 * k + log(k, 2) * (n - k)
#
# Alternative Algorithms
# ----------------------
# Other algorithms were not used because they:
# 1) Took much more auxiliary memory,
# 2) Made multiple passes over the data.
# 3) Made more comparisons in common cases (small k, large n, semi-random input).
# See the more detailed comparison of approach at:
# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = min(it, default=sentinel)
else:
result = min(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
# put the range(n) first so that zip() doesn't
# consume one too many elements from the iterator
result = [(elem, i) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
if elem < top:
_heapreplace(result, (elem, order))
top, _order = result[0]
order += 1
result.sort()
return [elem for (elem, order) in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
k = key(elem)
if k < top:
_heapreplace(result, (k, order, elem))
top, _order, _elem = result[0]
order += 1
result.sort()
return [elem for (k, order, elem) in result]
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = max(it, default=sentinel)
else:
result = max(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
if top < elem:
_heapreplace(result, (elem, order))
top, _order = result[0]
order -= 1
result.sort(reverse=True)
return [elem for (elem, order) in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
k = key(elem)
if top < k:
_heapreplace(result, (k, order, elem))
top, _order, _elem = result[0]
order -= 1
result.sort(reverse=True)
return [elem for (k, order, elem) in result]
# If available, use C implementation
try:
from _heapq import *
except ImportError:
pass
try:
from _heapq import _heapreplace_max
except ImportError:
pass
try:
from _heapq import _heapify_max
except ImportError:
pass
try:
from _heapq import _heappop_max
except ImportError:
pass
if __name__ == "__main__":
import doctest
print(doctest.testmod())
| apache-2.0 |
xlqian/navitia | release/script_release.py | 1 | 16196 | # -*- coding: utf-8 -*-
# Copyright (c) 2001-2018, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division
import os
os.environ['LC_ALL'] = 'en_US'
os.environ['GIT_PYTHON_TRACE'] = '1' # can be 0 (no trace), 1 (git commands) or full (git commands + git output)
from git import *
from datetime import datetime
import subprocess
import re
from sys import exit, argv
from shutil import copyfile
from os import remove, stat
import codecs
import requests
import logging
def get_tag_name(version):
return "v{maj}.{min}.{hf}".format(maj=version[0], min=version[1], hf=version[2])
class ReleaseManager:
def __init__(self, release_type, remote_name="canalTP"):
self.directory = ".."
self.changelog_filename = self.directory + "/debian/changelog"
self.data_version_filename = self.directory + "/source/type/data.cpp"
self.release_type = release_type
self.repo = Repo(self.directory)
self.git = self.repo.git
# we fetch latest version from remote
self.remote_name = remote_name
print("fetching from {}...".format(remote_name))
self.repo.remote(remote_name).fetch("--tags")
# and we update dev and release branches
print("rebasing dev and release...")
# TODO quit on error
self.git.rebase(remote_name + "/dev", "dev")
self.dev_data_version = self.get_data_version()
remote_release = remote_name + "/release"
try:
self.git.checkout("-B release ", remote_release)
except Exception as e:
print("Cannot checkout 'release':{}, creating from distant branch".format(str(e)))
self.git.checkout("-b", "release", remote_release)
print("checking that release was merged into dev...")
unmerged = self.git.branch("--no-merged", "dev", '--no-color')
is_release_unmerged = re.search(" release(\n|$)", unmerged)
if is_release_unmerged:
print(is_release_unmerged.group(0))
print("ABORTING: {rem}/release branch was not merged in {rem}/dev".format(rem=remote_name))
print("This is required before releasing. You may use (be careful):")
print("git checkout dev; git submodule update --recursive")
print("git merge release")
exit(1)
print("current branch: {}".format(self.repo.active_branch))
self.version = None
self.str_version = ""
self.latest_tag = ""
# if API rate limit exceeded use, get 'personal access token' on github then provide:
# self.auth = ('user', 'pass')
self.auth = None
def get_data_version(self):
f_data_version = codecs.open(self.data_version_filename, 'r', 'utf-8')
version = None
for line in f_data_version:
res = re.search('^ *const .*data_version *= *([0-9]+) *;.*$', line)
if res:
version = res.group(1)
break
if version is None:
print("ABORTING: data_version could not be retrieved from {f}".format(f=self.data_version_filename))
exit(1)
print("Current data_version is " + version)
try:
return int(version)
except ValueError:
print("ABORTING: data_version {d} is not an Integer".format(d=version))
exit(1)
def get_new_version_number(self):
latest_version = None
last_tag = self.git.describe('--tags', abbrev=0)
version = re.search('.*(\d+\.\d+\.\d+).*', last_tag)
if version:
latest_version = version.group(1)
if not latest_version:
print("no latest version found")
exit(1)
version_n = latest_version.split('.')
print("latest version is {}".format(version_n))
self.version = [int(i) for i in version_n]
self.latest_tag = get_tag_name(self.version)
print("last tag is " + self.latest_tag)
if self.release_type == "regular":
if self.version[0] > self.dev_data_version:
print(
"ABORTING: data_version {d} is < to latest tag {t}".format(
d=self.dev_data_version, t=self.latest_tag
)
)
exit(1)
elif self.version[0] < self.dev_data_version: # major version
self.version[0] = self.dev_data_version
self.version[1] = self.version[2] = 0
else: # versions equal: minor version
self.version[0] = self.dev_data_version
self.version[1] += 1
self.version[2] = 0
elif self.release_type == "major":
self.version[0] += 1
self.version[1] = self.version[2] = 0
elif self.release_type == "minor":
self.version[1] += 1
self.version[2] = 0
elif self.release_type == "hotfix":
self.version[2] += 1
else:
exit(5)
if self.version[0] > self.dev_data_version:
print(
"ABORTING: data_version {d} is < to tag {t} to be published".format(
d=self.dev_data_version, t=self.latest_tag
)
)
exit(1)
self.str_version = "{maj}.{min}.{hf}".format(
maj=self.version[0], min=self.version[1], hf=self.version[2]
)
print("New version is {}".format(self.str_version))
return self.str_version
def checkout_parent_branch(self):
parent = ""
if self.release_type == "hotfix":
parent = "release"
else:
parent = "dev"
self.git.checkout(parent)
self.git.submodule('update', '--recursive')
print("current branch {}".format(self.repo.active_branch))
def closed_pr_generator(self):
# lazy get all closed PR ordered by last updated
closed_pr = []
page = 1
while True:
query = (
"https://api.github.com/repos/CanalTP/navitia/"
"pulls?state=closed&base=dev&sort=updated&direction=desc&page={page}".format(page=page)
)
print("query github api: " + query)
github_response = requests.get(query, auth=self.auth)
if github_response.status_code != 200:
message = github_response.json()['message']
print(u' * Impossible to retrieve PR\n * ' + message)
return
closed_pr = github_response.json()
if not closed_pr:
print("Reached end of PR list")
return
for pr in closed_pr:
yield pr
page += 1
def get_merged_pullrequest(self):
lines = []
nb_successive_merged_pr = 0
for pr in self.closed_pr_generator():
title = pr['title']
url = pr['html_url']
pr_head_sha = pr['head']['sha']
# test if PR was merged (not simply closed)
# and if distant/release contains HEAD of PR
# (stops after 10 successive merged PR)
if pr['merged_at']:
branches = []
try:
branches = self.git.branch('-r', '--contains', pr_head_sha, '--no-color') + '\n'
except:
print(
"ERROR while searching for commit in release branch: "
"Following PR added to changelog, remove it if needed.\n"
)
# adding separators before and after to match only branch name
release_branch_name = ' ' + self.remote_name + '/release\n'
if release_branch_name in branches:
nb_successive_merged_pr += 1
if nb_successive_merged_pr >= 10:
break
else:
# doing the label search as late as possible to save api calls
has_excluded_label = False
label_query = pr['_links']['issue']['href'] + '/labels'
labels = requests.get(label_query, auth=self.auth).json()
if any(label['name'] in ("hotfix", "not_in_changelog") for label in labels):
has_excluded_label = True
if not has_excluded_label:
lines.append(u' * {title} <{url}>\n'.format(title=title, url=url))
print(lines[-1])
nb_successive_merged_pr = 0
return lines
def create_changelog(self):
write_lines = [u'navitia2 (%s) unstable; urgency=low\n' % self.str_version, u'\n']
if self.release_type != "hotfix":
pullrequests = self.get_merged_pullrequest()
write_lines.extend(pullrequests)
else:
write_lines.append(u' * \n')
author_name = self.git.config('user.name')
author_mail = self.git.config('user.email')
write_lines.extend(
[
u'\n',
u' -- {name} <{mail}> {now} +0100\n'.format(
name=author_name, mail=author_mail, now=datetime.now().strftime("%a, %d %b %Y %H:%m:%S")
),
u'\n',
]
)
return write_lines
def update_changelog(self):
print("updating changelog")
changelog = self.create_changelog()
f_changelog = None
back_filename = self.changelog_filename + "~"
try:
f_changelog = codecs.open(self.changelog_filename, 'r', 'utf-8')
except IOError:
print("Unable to open file: " + self.changelog_filename)
exit(1)
f_changelogback = codecs.open(back_filename, "w", "utf-8")
for line in changelog:
f_changelogback.write(line)
for line in f_changelog:
f_changelogback.write(line)
f_changelog.close()
f_changelogback.close()
last_modified = stat(back_filename)
(stdout, stderr) = subprocess.Popen(
["vim", back_filename, "--nofork"], stderr=subprocess.PIPE
).communicate()
after = stat(back_filename)
if last_modified == after:
print("No changes made, we stop")
remove(back_filename)
exit(2)
copyfile(back_filename, self.changelog_filename)
self.git.add(os.path.abspath(self.changelog_filename))
def get_modified_changelog(self):
# the changelog might have been modified by the user, so we have to read it again
f_changelog = codecs.open(self.changelog_filename, 'r', 'utf-8')
lines = []
nb_version = 0
for line in f_changelog:
# each version are separated by a line like
# navitia2 (0.94.1) unstable; urgency=low
if line.startswith("navitia2 "):
nb_version += 1
continue
if nb_version >= 2:
break # we can stop
if nb_version == 0:
continue
lines.append(line + u'\n')
f_changelog.close()
return lines
def publish_release(self, temp_branch):
self.git.checkout("release")
self.git.submodule('update', '--recursive')
# merge with the release branch
self.git.merge(temp_branch, "release", '--no-ff')
print("current branch {}".format(self.repo.active_branch))
# we tag the release
tag_message = u'Version {}\n'.format(self.str_version)
changelog = self.get_modified_changelog()
for change in changelog:
tag_message += change
print("tag: " + tag_message)
self.repo.create_tag(get_tag_name(self.version), message=tag_message)
# and we merge back the release branch to dev (at least for the tag in release)
self.git.merge("release", "dev", '--no-ff')
print("publishing the release")
print("Check the release, you will probably want to merge release in dev:")
print(" git checkout dev; git submodule update --recursive")
print(" git merge release")
print("And when you're happy do:")
print(" git push {} release dev --tags".format(self.remote_name))
# TODO: when we'll be confident, we will do that automaticaly
def release_the_kraken(self, new_version):
tmp_name = "release_%s" % new_version
self.checkout_parent_branch()
# we then create a new temporary branch
print("creating temporary release branch {}".format(tmp_name))
self.git.checkout(b=tmp_name)
print("current branch {}".format(self.repo.active_branch))
self.update_changelog()
self.git.commit(m="Version %s" % self.str_version)
if self.release_type == "hotfix":
print("now time to do your actual hotfix! (cherry-pick commits)")
print("PLEASE check that \"release\" COMPILES and TESTS!")
print("Note: you'll have to merge/tag/push manually after your fix:")
print(" git checkout release")
print(" git merge --no-ff {tmp_branch}".format(tmp_branch=tmp_name))
print(
" git tag -a {} #then add message on Version and mention concerned PRs".format(
get_tag_name(self.version)
)
)
print(" git checkout dev")
print(" git merge --ff release")
print(" git push {} release dev --tags".format(self.remote_name))
# TODO2 try to script that (put 2 hotfix param, like hotfix init and hotfix publish ?)
exit(0)
self.publish_release(tmp_name)
def get_release_type():
if raw_input("Do you need a binarization ? [Y/n] ").lower() == "y":
return "major"
if raw_input("Have you changed the API or Data interface ? [Y/n] ").lower() == "y":
return "major"
if raw_input("Are the changes backward compatible ? [Y/n] ").lower() == "y":
return "minor"
if raw_input("Are you hotfixing ? [Y/n] ").lower() == "y":
return "hotfix"
raise RuntimeError("Couldn't find out the release type")
if __name__ == '__main__':
if len(argv) < 1:
print("mandatory argument: {regular|major|minor|hotfix}")
print("possible additional argument: remote (default is CanalTP)")
exit(5)
logging.basicConfig(level=logging.INFO)
release_type = get_release_type()
remote = argv[1] if len(argv) >= 2 else "CanalTP"
manager = ReleaseManager(release_type, remote_name=remote)
new_version = manager.get_new_version_number()
print("Release type: {}".format(release_type))
print("Release version: {}".format(new_version))
if raw_input("Shall we proceed ? [Y/n] ").lower() != "y":
exit(6)
manager.release_the_kraken(new_version)
| agpl-3.0 |
levilucio/SyVOLT | GM2AUTOSAR_MM/graph_MT_post__directLink_T.py | 2 | 3614 | """
__graph_MT_post__directLink_T.py___________________________________________________________
Automatically generated LINK for entity MT_post__directLink_T
DO NOT MODIFY DIRECTLY
___________________________________________________________________________________________
"""
from graphLink import *
from stickylink import *
from widthXfillXdecoration import *
class graph_MT_post__directLink_T(graphLink):
def __init__(self, xc, yc, semObject = None ):
self.semObject = semObject
self.semanticObject = semObject
from linkEditor import *
self.le=linkEditor(self,self.semObject, "directLink_T")
self.le.FirstLink= stickylink()
self.le.FirstLink.arrow=ATOM3Boolean()
self.le.FirstLink.arrow.setValue((' ', 0))
self.le.FirstLink.arrow.config = 0
self.le.FirstLink.arrowShape1=ATOM3Integer(8)
self.le.FirstLink.arrowShape2=ATOM3Integer(10)
self.le.FirstLink.arrowShape3=ATOM3Integer(3)
self.le.FirstLink.decoration=ATOM3Appearance()
self.le.FirstLink.decoration.setValue( ('directLink_T_1stLink', self.le.FirstLink))
self.le.FirstSegment= widthXfillXdecoration()
self.le.FirstSegment.width=ATOM3Integer(2)
self.le.FirstSegment.fill=ATOM3String('black', 20)
self.le.FirstSegment.stipple=ATOM3String('', 20)
self.le.FirstSegment.arrow=ATOM3Boolean()
self.le.FirstSegment.arrow.setValue((' ', 0))
self.le.FirstSegment.arrow.config = 0
self.le.FirstSegment.arrowShape1=ATOM3Integer(8)
self.le.FirstSegment.arrowShape2=ATOM3Integer(10)
self.le.FirstSegment.arrowShape3=ATOM3Integer(3)
self.le.FirstSegment.decoration=ATOM3Appearance()
self.le.FirstSegment.decoration.setValue( ('directLink_T_1stSegment', self.le.FirstSegment))
self.le.FirstSegment.decoration_Position=ATOM3Enum(['Up', 'Down', 'Middle', 'No decoration'],3,0)
self.le.Center=ATOM3Appearance()
self.le.Center.setValue( ('directLink_T_Center', self.le))
self.le.SecondSegment= widthXfillXdecoration()
self.le.SecondSegment.width=ATOM3Integer(2)
self.le.SecondSegment.fill=ATOM3String('black', 20)
self.le.SecondSegment.stipple=ATOM3String('', 20)
self.le.SecondSegment.arrow=ATOM3Boolean()
self.le.SecondSegment.arrow.setValue((' ', 0))
self.le.SecondSegment.arrow.config = 0
self.le.SecondSegment.arrowShape1=ATOM3Integer(8)
self.le.SecondSegment.arrowShape2=ATOM3Integer(10)
self.le.SecondSegment.arrowShape3=ATOM3Integer(3)
self.le.SecondSegment.decoration=ATOM3Appearance()
self.le.SecondSegment.decoration.setValue( ('directLink_T_2ndSegment', self.le.SecondSegment))
self.le.SecondSegment.decoration_Position=ATOM3Enum(['Up', 'Down', 'Middle', 'No decoration'],3,0)
self.le.SecondLink= stickylink()
self.le.SecondLink.arrow=ATOM3Boolean()
self.le.SecondLink.arrow.setValue((' ', 1))
self.le.SecondLink.arrow.config = 0
self.le.SecondLink.arrowShape1=ATOM3Integer(8)
self.le.SecondLink.arrowShape2=ATOM3Integer(10)
self.le.SecondLink.arrowShape3=ATOM3Integer(3)
self.le.SecondLink.decoration=ATOM3Appearance()
self.le.SecondLink.decoration.setValue( ('directLink_T_2ndLink', self.le.SecondLink))
self.le.FirstLink.decoration.semObject=self.semObject
self.le.FirstSegment.decoration.semObject=self.semObject
self.le.Center.semObject=self.semObject
self.le.SecondSegment.decoration.semObject=self.semObject
self.le.SecondLink.decoration.semObject=self.semObject
graphLink.__init__(self, xc, yc, self.le,semObject)
| mit |
pycrystem/pycrystem | pyxem/signals/tensor_field.py | 1 | 3246 | # -*- coding: utf-8 -*-
# Copyright 2016-2020 The pyXem developers
#
# This file is part of pyXem.
#
# pyXem is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyXem is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyXem. If not, see <http://www.gnu.org/licenses/>.
from hyperspy.signals import Signal2D
import numpy as np
from scipy.linalg import polar
from hyperspy.utils import stack
import math
from pyxem.signals.strain_map import StrainMap
"""
Signal class for Tensor Fields
"""
def _polar_decomposition(image, side):
"""Perform a polar decomposition of a second rank tensor.
Parameters
----------
image : np.array()
Matrix on which to form polar decomposition.
side : str
'left' or 'right' the side on which to perform polar decomposition.
Returns
-------
U, R : np.array()
Stretch and rotation matrices obtained by polar decomposition.
"""
return np.array(polar(image, side=side))
def _get_rotation_angle(matrix):
"""Find the rotation angle associated with a given rotation matrix.
Parameters
----------
matrix : np.array()
A rotation matrix.
Returns
-------
angle : np.array()
Rotation angle associated with matrix.
"""
return np.array(-math.asin(matrix[1, 0]))
class DisplacementGradientMap(Signal2D):
_signal_type = "tensor_field"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Check that the signal dimensions are (3,3) for it to be a valid
# TensorField
def polar_decomposition(self):
"""Perform polar decomposition on the second rank tensors describing
the TensorField. The polar decomposition is right handed and given by
:math:`D = RU`
Returns
-------
R : TensorField
The orthogonal matrix describing the rotation field.
U : TensorField
The strain tensor field.
"""
RU = self.map(_polar_decomposition, side="right", inplace=False)
return RU.isig[:, :, 0], RU.isig[:, :, 1]
def get_strain_maps(self):
"""Obtain strain maps from the displacement gradient tensor at each
navigation position in the small strain approximation.
Returns
-------
strain_results : BaseSignal
Signal of shape < 4 | , > , navigation order is e11,e22,e12,theta
"""
R, U = self.polar_decomposition()
e11 = -U.isig[0, 0].T + 1
e12 = U.isig[0, 1].T
e21 = U.isig[1, 0].T
e22 = -U.isig[1, 1].T + 1
theta = R.map(_get_rotation_angle, inplace=False)
theta.axes_manager.set_signal_dimension(2)
strain_results = stack([e11, e22, e12, theta])
return StrainMap(strain_results)
| gpl-3.0 |
srm912/servo | tests/wpt/web-platform-tests/html/tools/update_html5lib_tests.py | 125 | 5358 | import sys
import os
import hashlib
import urllib
import itertools
import re
import json
import glob
import shutil
try:
import genshi
from genshi.template import MarkupTemplate
from html5lib.tests import support
except ImportError:
print """This script requires the Genshi templating library and html5lib source
It is recommended that these are installed in a virtualenv:
virtualenv venv
source venv/bin/activate
pip install genshi
cd venv
git clone [email protected]:html5lib/html5lib-python.git html5lib
cd html5lib
git submodule init
git submodule update
pip install -e ./
Then run this script again, with the virtual environment still active.
When you are done, type "deactivate" to deactivate the virtual environment.
"""
TESTS_PATH = "html/syntax/parsing/"
def get_paths():
script_path = os.path.split(os.path.abspath(__file__))[0]
repo_base = get_repo_base(script_path)
tests_path = os.path.join(repo_base, TESTS_PATH)
return script_path, tests_path
def get_repo_base(path):
while path:
if os.path.exists(os.path.join(path, ".git")):
return path
else:
path = os.path.split(path)[0]
def get_expected(data):
data = "#document\n" + data
return data
def get_hash(data, container=None):
if container == None:
container = ""
return hashlib.sha1("#container%s#data%s"%(container.encode("utf8"),
data.encode("utf8"))).hexdigest()
def make_tests(script_dir, out_dir, input_file_name, test_data):
tests = []
innerHTML_tests = []
ids_seen = {}
print input_file_name
for test in test_data:
if "script-off" in test:
continue
is_innerHTML = "document-fragment" in test
data = test["data"]
container = test["document-fragment"] if is_innerHTML else None
assert test["document"], test
expected = get_expected(test["document"])
test_list = innerHTML_tests if is_innerHTML else tests
test_id = get_hash(data, container)
if test_id in ids_seen:
print "WARNING: id %s seen multiple times in file %s this time for test (%s, %s) before for test %s, skipping"%(test_id, input_file_name, container, data, ids_seen[test_id])
continue
ids_seen[test_id] = (container, data)
test_list.append({'string_uri_encoded_input':"\"%s\""%urllib.quote(data.encode("utf8")),
'input':data,
'expected':expected,
'string_escaped_expected':json.dumps(urllib.quote(expected.encode("utf8"))),
'id':test_id,
'container':container
})
path_normal = None
if tests:
path_normal = write_test_file(script_dir, out_dir,
tests, "html5lib_%s"%input_file_name,
"html5lib_test.xml")
path_innerHTML = None
if innerHTML_tests:
path_innerHTML = write_test_file(script_dir, out_dir,
innerHTML_tests, "html5lib_innerHTML_%s"%input_file_name,
"html5lib_test_fragment.xml")
return path_normal, path_innerHTML
def write_test_file(script_dir, out_dir, tests, file_name, template_file_name):
file_name = os.path.join(out_dir, file_name + ".html")
short_name = os.path.split(file_name)[1]
with open(os.path.join(script_dir, template_file_name)) as f:
template = MarkupTemplate(f)
stream = template.generate(file_name=short_name, tests=tests)
with open(file_name, "w") as f:
f.write(stream.render('html', doctype='html5',
encoding="utf8"))
return file_name
def escape_js_string(in_data):
return in_data.encode("utf8").encode("string-escape")
def serialize_filenames(test_filenames):
return "[" + ",\n".join("\"%s\""%item for item in test_filenames) + "]"
def main():
script_dir, out_dir = get_paths()
test_files = []
inner_html_files = []
if len(sys.argv) > 2:
test_iterator = itertools.izip(
itertools.repeat(False),
sorted(os.path.abspath(item) for item in
glob.glob(os.path.join(sys.argv[2], "*.dat"))))
else:
test_iterator = itertools.chain(
itertools.izip(itertools.repeat(False),
sorted(support.get_data_files("tree-construction"))),
itertools.izip(itertools.repeat(True),
sorted(support.get_data_files(
os.path.join("tree-construction", "scripted")))))
for (scripted, test_file) in test_iterator:
input_file_name = os.path.splitext(os.path.split(test_file)[1])[0]
if scripted:
input_file_name = "scripted_" + input_file_name
test_data = support.TestData(test_file)
test_filename, inner_html_file_name = make_tests(script_dir, out_dir,
input_file_name, test_data)
if test_filename is not None:
test_files.append(test_filename)
if inner_html_file_name is not None:
inner_html_files.append(inner_html_file_name)
if __name__ == "__main__":
main()
| mpl-2.0 |
NSAmelchev/ignite | modules/platforms/python/pyignite/cache.py | 11 | 22098 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Iterable, Optional, Union
from .datatypes import prop_codes
from .exceptions import (
CacheCreationError, CacheError, ParameterError, SQLError,
)
from .utils import cache_id, is_wrapped, status_to_exception, unwrap_binary
from .api.cache_config import (
cache_create, cache_create_with_config,
cache_get_or_create, cache_get_or_create_with_config,
cache_destroy, cache_get_configuration,
)
from .api.key_value import (
cache_get, cache_put, cache_get_all, cache_put_all, cache_replace,
cache_clear, cache_clear_key, cache_clear_keys,
cache_contains_key, cache_contains_keys,
cache_get_and_put, cache_get_and_put_if_absent, cache_put_if_absent,
cache_get_and_remove, cache_get_and_replace,
cache_remove_key, cache_remove_keys, cache_remove_all,
cache_remove_if_equals, cache_replace_if_equals, cache_get_size,
)
from .api.sql import scan, scan_cursor_get_page, sql, sql_cursor_get_page
PROP_CODES = set([
getattr(prop_codes, x)
for x in dir(prop_codes)
if x.startswith('PROP_')
])
CACHE_CREATE_FUNCS = {
True: {
True: cache_get_or_create_with_config,
False: cache_create_with_config,
},
False: {
True: cache_get_or_create,
False: cache_create,
},
}
class Cache:
"""
Ignite cache abstraction. Users should never use this class directly,
but construct its instances with
:py:meth:`~pyignite.client.Client.create_cache`,
:py:meth:`~pyignite.client.Client.get_or_create_cache` or
:py:meth:`~pyignite.client.Client.get_cache` methods instead. See
:ref:`this example <create_cache>` on how to do it.
"""
_cache_id = None
_name = None
_client = None
_settings = None
@staticmethod
def _validate_settings(
settings: Union[str, dict]=None, get_only: bool=False,
):
if any([
not settings,
type(settings) not in (str, dict),
type(settings) is dict and prop_codes.PROP_NAME not in settings,
]):
raise ParameterError('You should supply at least cache name')
if all([
type(settings) is dict,
not set(settings).issubset(PROP_CODES),
]):
raise ParameterError('One or more settings was not recognized')
if get_only and type(settings) is dict and len(settings) != 1:
raise ParameterError('Only cache name allowed as a parameter')
def __init__(
self, client: 'Client', settings: Union[str, dict]=None,
with_get: bool=False, get_only: bool=False,
):
"""
Initialize cache object.
:param client: Ignite client,
:param settings: cache settings. Can be a string (cache name) or a dict
of cache properties and their values. In this case PROP_NAME is
mandatory,
:param with_get: (optional) do not raise exception, if the cache
is already exists. Defaults to False,
:param get_only: (optional) do not communicate with Ignite server
at all, only create Cache instance. Defaults to False.
"""
self._client = client
self._validate_settings(settings)
if type(settings) == str:
self._name = settings
else:
self._name = settings[prop_codes.PROP_NAME]
if not get_only:
func = CACHE_CREATE_FUNCS[type(settings) is dict][with_get]
result = func(client, settings)
if result.status != 0:
raise CacheCreationError(result.message)
self._cache_id = cache_id(self._name)
@property
def settings(self) -> Optional[dict]:
"""
Lazy Cache settings. See the :ref:`example <sql_cache_read>`
of reading this property.
All cache properties are documented here: :ref:`cache_props`.
:return: dict of cache properties and their values.
"""
if self._settings is None:
config_result = cache_get_configuration(self._client, self._cache_id)
if config_result.status == 0:
self._settings = config_result.value
else:
raise CacheError(config_result.message)
return self._settings
@property
def name(self) -> str:
"""
Lazy cache name.
:return: cache name string.
"""
if self._name is None:
self._name = self.settings[prop_codes.PROP_NAME]
return self._name
@property
def client(self) -> 'Client':
"""
Ignite :class:`~pyignite.client.Client` object.
:return: Client object, through which the cache is accessed.
"""
return self._client
@property
def cache_id(self) -> int:
"""
Cache ID.
:return: integer value of the cache ID.
"""
return self._cache_id
def _process_binary(self, value: Any) -> Any:
"""
Detects and recursively unwraps Binary Object.
:param value: anything that could be a Binary Object,
:return: the result of the Binary Object unwrapping with all other data
left intact.
"""
if is_wrapped(value):
return unwrap_binary(self._client, value)
return value
@status_to_exception(CacheError)
def destroy(self):
"""
Destroys cache with a given name.
"""
return cache_destroy(self._client, self._cache_id)
@status_to_exception(CacheError)
def get(self, key, key_hint: object=None) -> Any:
"""
Retrieves a value from cache by key.
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:return: value retrieved.
"""
result = cache_get(self._client, self._cache_id, key, key_hint=key_hint)
result.value = self._process_binary(result.value)
return result
@status_to_exception(CacheError)
def put(self, key, value, key_hint: object=None, value_hint: object=None):
"""
Puts a value with a given key to cache (overwriting existing value
if any).
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given
value should be converted.
"""
return cache_put(
self._client, self._cache_id, key, value,
key_hint=key_hint, value_hint=value_hint
)
@status_to_exception(CacheError)
def get_all(self, keys: list) -> list:
"""
Retrieves multiple key-value pairs from cache.
:param keys: list of keys or tuples of (key, key_hint),
:return: a dict of key-value pairs.
"""
result = cache_get_all(self._client, self._cache_id, keys)
if result.value:
for key, value in result.value.items():
result.value[key] = self._process_binary(value)
return result
@status_to_exception(CacheError)
def put_all(self, pairs: dict):
"""
Puts multiple key-value pairs to cache (overwriting existing
associations if any).
:param pairs: dictionary type parameters, contains key-value pairs
to save. Each key or value can be an item of representable
Python type or a tuple of (item, hint),
"""
return cache_put_all(self._client, self._cache_id, pairs)
@status_to_exception(CacheError)
def replace(
self, key, value, key_hint: object=None, value_hint: object=None
):
"""
Puts a value with a given key to cache only if the key already exist.
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given
value should be converted.
"""
result = cache_replace(
self._client, self._cache_id, key, value,
key_hint=key_hint, value_hint=value_hint
)
result.value = self._process_binary(result.value)
return result
@status_to_exception(CacheError)
def clear(self, keys: Optional[list]=None):
"""
Clears the cache without notifying listeners or cache writers.
:param keys: (optional) list of cache keys or (key, key type
hint) tuples to clear (default: clear all).
"""
if keys:
return cache_clear_keys(self._client, self._cache_id, keys)
else:
return cache_clear(self._client, self._cache_id)
@status_to_exception(CacheError)
def clear_key(self, key, key_hint: object=None):
"""
Clears the cache key without notifying listeners or cache writers.
:param key: key for the cache entry,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
"""
return cache_clear_key(
self._client, self._cache_id, key, key_hint=key_hint
)
@status_to_exception(CacheError)
def contains_key(self, key, key_hint=None) -> bool:
"""
Returns a value indicating whether given key is present in cache.
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:return: boolean `True` when key is present, `False` otherwise.
"""
return cache_contains_key(
self._client, self._cache_id, key, key_hint=key_hint
)
@status_to_exception(CacheError)
def contains_keys(self, keys: Iterable) -> bool:
"""
Returns a value indicating whether all given keys are present in cache.
:param keys: a list of keys or (key, type hint) tuples,
:return: boolean `True` when all keys are present, `False` otherwise.
"""
return cache_contains_keys(self._client, self._cache_id, keys)
@status_to_exception(CacheError)
def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any:
"""
Puts a value with a given key to cache, and returns the previous value
for that key, or null value if there was not such key.
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given
value should be converted.
:return: old value or None.
"""
result = cache_get_and_put(
self._client, self._cache_id, key, value, key_hint, value_hint
)
result.value = self._process_binary(result.value)
return result
@status_to_exception(CacheError)
def get_and_put_if_absent(
self, key, value, key_hint=None, value_hint=None
):
"""
Puts a value with a given key to cache only if the key does not
already exist.
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given
value should be converted,
:return: old value or None.
"""
result = cache_get_and_put_if_absent(
self._client, self._cache_id, key, value, key_hint, value_hint
)
result.value = self._process_binary(result.value)
return result
@status_to_exception(CacheError)
def put_if_absent(self, key, value, key_hint=None, value_hint=None):
"""
Puts a value with a given key to cache only if the key does not
already exist.
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given
value should be converted.
"""
return cache_put_if_absent(
self._client, self._cache_id, key, value, key_hint, value_hint
)
@status_to_exception(CacheError)
def get_and_remove(self, key, key_hint=None) -> Any:
"""
Removes the cache entry with specified key, returning the value.
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:return: old value or None.
"""
result = cache_get_and_remove(
self._client, self._cache_id, key, key_hint
)
result.value = self._process_binary(result.value)
return result
@status_to_exception(CacheError)
def get_and_replace(
self, key, value, key_hint=None, value_hint=None
) -> Any:
"""
Puts a value with a given key to cache, returning previous value
for that key, if and only if there is a value currently mapped
for that key.
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given
value should be converted.
:return: old value or None.
"""
result = cache_get_and_replace(
self._client, self._cache_id, key, value, key_hint, value_hint
)
result.value = self._process_binary(result.value)
return result
@status_to_exception(CacheError)
def remove_key(self, key, key_hint=None):
"""
Clears the cache key without notifying listeners or cache writers.
:param key: key for the cache entry,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
"""
return cache_remove_key(self._client, self._cache_id, key, key_hint)
@status_to_exception(CacheError)
def remove_keys(self, keys: list):
"""
Removes cache entries by given list of keys, notifying listeners
and cache writers.
:param keys: list of keys or tuples of (key, key_hint) to remove.
"""
return cache_remove_keys(self._client, self._cache_id, keys)
@status_to_exception(CacheError)
def remove_all(self):
"""
Removes all cache entries, notifying listeners and cache writers.
"""
return cache_remove_all(self._client, self._cache_id)
@status_to_exception(CacheError)
def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None):
"""
Removes an entry with a given key if provided value is equal to
actual value, notifying listeners and cache writers.
:param key: key for the cache entry,
:param sample: a sample to compare the stored value with,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param sample_hint: (optional) Ignite data type, for whic
the given sample should be converted.
"""
return cache_remove_if_equals(
self._client, self._cache_id, key, sample, key_hint, sample_hint
)
@status_to_exception(CacheError)
def replace_if_equals(
self, key, sample, value,
key_hint=None, sample_hint=None, value_hint=None
) -> Any:
"""
Puts a value with a given key to cache only if the key already exists
and value equals provided sample.
:param key: key for the cache entry,
:param sample: a sample to compare the stored value with,
:param value: new value for the given key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param sample_hint: (optional) Ignite data type, for whic
the given sample should be converted
:param value_hint: (optional) Ignite data type, for which the given
value should be converted,
:return: boolean `True` when key is present, `False` otherwise.
"""
result = cache_replace_if_equals(
self._client, self._cache_id, key, sample, value,
key_hint, sample_hint, value_hint
)
result.value = self._process_binary(result.value)
return result
@status_to_exception(CacheError)
def get_size(self, peek_modes=0):
"""
Gets the number of entries in cache.
:param peek_modes: (optional) limit count to near cache partition
(PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache
(PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL),
:return: integer number of cache entries.
"""
return cache_get_size(self._client, self._cache_id, peek_modes)
def scan(self, page_size: int=1, partitions: int=-1, local: bool=False):
"""
Returns all key-value pairs from the cache, similar to `get_all`, but
with internal pagination, which is slower, but safer.
:param page_size: (optional) page size. Default size is 1 (slowest
and safest),
:param partitions: (optional) number of partitions to query
(negative to query entire cache),
:param local: (optional) pass True if this query should be executed
on local node only. Defaults to False,
:return: generator with key-value pairs.
"""
result = scan(self._client, self._cache_id, page_size, partitions, local)
if result.status != 0:
raise CacheError(result.message)
cursor = result.value['cursor']
for k, v in result.value['data'].items():
k = self._process_binary(k)
v = self._process_binary(v)
yield k, v
while result.value['more']:
result = scan_cursor_get_page(self._client, cursor)
if result.status != 0:
raise CacheError(result.message)
for k, v in result.value['data'].items():
k = self._process_binary(k)
v = self._process_binary(v)
yield k, v
def select_row(
self, query_str: str, page_size: int=1,
query_args: Optional[list]=None, distributed_joins: bool=False,
replicated_only: bool=False, local: bool=False, timeout: int=0
):
"""
Executes a simplified SQL SELECT query over data stored in the cache.
The query returns the whole record (key and value).
:param query_str: SQL query string,
:param page_size: (optional) cursor page size. Default is 1, which
means that client makes one server call per row,
:param query_args: (optional) query arguments,
:param distributed_joins: (optional) distributed joins. Defaults
to False,
:param replicated_only: (optional) whether query contains only
replicated tables or not. Defaults to False,
:param local: (optional) pass True if this query should be executed
on local node only. Defaults to False,
:param timeout: (optional) non-negative timeout value in ms. Zero
disables timeout (default),
:return: generator with key-value pairs.
"""
def generate_result(value):
cursor = value['cursor']
more = value['more']
for k, v in value['data'].items():
k = self._process_binary(k)
v = self._process_binary(v)
yield k, v
while more:
inner_result = sql_cursor_get_page(self._client, cursor)
if result.status != 0:
raise SQLError(result.message)
more = inner_result.value['more']
for k, v in inner_result.value['data'].items():
k = self._process_binary(k)
v = self._process_binary(v)
yield k, v
type_name = self.settings[
prop_codes.PROP_QUERY_ENTITIES
][0]['value_type_name']
if not type_name:
raise SQLError('Value type is unknown')
result = sql(
self._client,
self._cache_id,
type_name,
query_str,
page_size,
query_args,
distributed_joins,
replicated_only,
local,
timeout
)
if result.status != 0:
raise SQLError(result.message)
return generate_result(result.value)
| apache-2.0 |
mabushadi/dpxdt | dpxdt/tools/diff_my_urls.py | 7 | 6027 | #!/usr/bin/env python
# Copyright 2014 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for diffing a set of URL pairs defined in a config file.
Example usage:
./dpxdt/tools/diff_my_urls.py \
--upload_build_id=1234 \
--release_server_prefix=https://my-dpxdt-apiserver.example.com/api \
--release_client_id=<your api key> \
--release_client_secret=<your api secret> \
--upload_release_name="My release name" \
--release_cut_url=http://example.com/path/to/my/release/tool/for/this/cut
--tests_json_path=my_url_tests.json
Example input file "my_url_tests.json". One entry per test:
[
{
"name": "My homepage",
"run_url": "http://localhost:5000/static/dummy/dummy_page1.html",
"run_config": {
"viewportSize": {
"width": 1024,
"height": 768
},
"injectCss": "#foobar { background-color: lime",
"injectJs": "document.getElementById('foobar').innerText = 'bar';",
},
"ref_url": "http://localhost:5000/static/dummy/dummy_page1.html",
"ref_config": {
"viewportSize": {
"width": 1024,
"height": 768
},
"injectCss": "#foobar { background-color: goldenrod; }",
"injectJs": "document.getElementById('foobar').innerText = 'foo';",
}
},
...
]
See README.md for documentation of config parameters.
"""
import datetime
import json
import logging
import sys
# Local Libraries
import gflags
FLAGS = gflags.FLAGS
# Local modules
from dpxdt.client import fetch_worker
from dpxdt.client import release_worker
from dpxdt.client import workers
import flags
class Test(object):
"""Represents the JSON of a single test."""
def __init__(self, name=None, run_url=None, run_config=None,
ref_url=None, ref_config=None):
self.name = name
self.run_url = run_url
self.run_config_data = json.dumps(run_config) if run_config else None
self.ref_url = ref_url
self.ref_config_data = json.dumps(ref_config) if ref_config else None
def load_tests(data):
"""Loads JSON data and returns a list of Test objects it contains."""
test_list = json.loads(data)
results = []
for test_json in test_list:
results.append(Test(**test_json))
return results
class DiffMyUrls(workers.WorkflowItem):
"""Workflow for diffing a set of URL pairs defined in a config file.
Args:
release_url: URL of the newest and best version of the page.
tests: List of Test objects to test.
upload_build_id: Optional. Build ID of the site being compared. When
supplied a new release will be cut for this build comparing it
to the last good release.
upload_release_name: Optional. Release name to use for the build. When
not supplied, a new release based on the current time will be
created.
heartbeat: Function to call with progress status.
"""
def run(self,
release_url,
tests,
upload_build_id,
upload_release_name,
heartbeat=None):
if not upload_release_name:
upload_release_name = str(datetime.datetime.utcnow())
yield heartbeat('Creating release %s' % upload_release_name)
release_number = yield release_worker.CreateReleaseWorkflow(
upload_build_id, upload_release_name, release_url)
pending_uploads = []
for test in tests:
item = release_worker.RequestRunWorkflow(
upload_build_id, upload_release_name, release_number,
test.name, url=test.run_url, config_data=test.run_config_data,
ref_url=test.ref_url, ref_config_data=test.ref_config_data)
pending_uploads.append(item)
yield heartbeat('Requesting %d runs' % len(pending_uploads))
yield pending_uploads
yield heartbeat('Marking runs as complete')
release_url = yield release_worker.RunsDoneWorkflow(
upload_build_id, upload_release_name, release_number)
yield heartbeat('Results viewable at: %s' % release_url)
def real_main(release_url=None,
tests_json_path=None,
upload_build_id=None,
upload_release_name=None):
"""Runs diff_my_urls."""
coordinator = workers.get_coordinator()
fetch_worker.register(coordinator)
coordinator.start()
data = open(FLAGS.tests_json_path).read()
tests = load_tests(data)
item = DiffMyUrls(
release_url,
tests,
upload_build_id,
upload_release_name,
heartbeat=workers.PrintWorkflow)
item.root = True
coordinator.input_queue.put(item)
coordinator.wait_one()
coordinator.stop()
coordinator.join()
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
assert FLAGS.release_cut_url
assert FLAGS.release_server_prefix
assert FLAGS.tests_json_path
assert FLAGS.upload_build_id
if FLAGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
real_main(
release_url=FLAGS.release_cut_url,
tests_json_path=FLAGS.tests_json_path,
upload_build_id=FLAGS.upload_build_id,
upload_release_name=FLAGS.upload_release_name)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
Osmose/kitsune | kitsune/sumo/tests/test_utils.py | 11 | 7012 | # -*- coding: utf8 -*-
import json
from django.contrib.auth.models import Permission
from django.test.client import RequestFactory
from mock import patch, Mock
from nose.tools import eq_
from kitsune.journal.models import Record
from kitsune.sumo.utils import (
chunked, get_next_url, is_ratelimited, smart_int, truncated_json_dumps, get_browser)
from kitsune.sumo.tests import TestCase
from kitsune.users.tests import profile
class SmartIntTestCase(TestCase):
def test_sanity(self):
eq_(10, smart_int('10'))
eq_(10, smart_int('10.5'))
def test_int(self):
eq_(10, smart_int(10))
def test_invalid_string(self):
eq_(0, smart_int('invalid'))
def test_empty_string(self):
eq_(0, smart_int(''))
def test_wrong_type(self):
eq_(0, smart_int(None))
eq_(10, smart_int([], 10))
def test_large_values(self):
"""Makes sure ints that would cause an overflow result in fallback."""
eq_(0, smart_int('1' * 1000))
class GetNextUrlTests(TestCase):
def setUp(self):
super(GetNextUrlTests, self).setUp()
self.r = RequestFactory()
self.patcher = patch('django.contrib.sites.models.Site.objects')
mock = self.patcher.start()
mock.get_current.return_value.domain = 'su.mo.com'
def tearDown(self):
self.patcher.stop()
super(GetNextUrlTests, self).tearDown()
def test_query_string(self):
"""Query-strings remain intact."""
r = self.r.get('/', {'next': '/new?f=b'})
eq_('/new?f=b', get_next_url(r))
def test_good_host_https(self):
"""Full URLs work with valid hosts."""
r = self.r.post('/users/login',
{'next': 'https://su.mo.com/kb/new'})
eq_('https://su.mo.com/kb/new', get_next_url(r))
def test_post(self):
"""'next' in POST overrides GET."""
r = self.r.post('/?next=/foo', {'next': '/bar'})
eq_('/bar', get_next_url(r))
def test_get(self):
"""'next' can be a query-string parameter."""
r = self.r.get('/users/login', {'next': '/kb/new'})
eq_('/kb/new', get_next_url(r))
def test_referer(self):
"""Use HTTP referer if nothing else."""
r = self.r.get('/')
r.META['HTTP_REFERER'] = 'http://su.mo.com/new'
eq_('http://su.mo.com/new', get_next_url(r))
def test_bad_host_https(self):
r = self.r.get('/', {'next': 'https://example.com'})
eq_(None, get_next_url(r))
def test_bad_host_protocol_relative(self):
"""Protocol-relative URLs do not let bad hosts through."""
r = self.r.get('/', {'next': '//example.com'})
eq_(None, get_next_url(r))
class JSONTests(TestCase):
def test_truncated_noop(self):
"""Make sure short enough things are unmodified."""
d = {'foo': 'bar'}
trunc = truncated_json_dumps(d, 1000, 'foo')
eq_(json.dumps(d), trunc)
def test_truncated_key(self):
"""Make sure truncation works as expected."""
d = {'foo': 'a long string that should be truncated'}
trunc = truncated_json_dumps(d, 30, 'foo')
obj = json.loads(trunc)
eq_(obj['foo'], 'a long string that ')
eq_(len(trunc), 30)
def test_unicode(self):
"""Unicode should not be treated as longer than it is."""
d = {'formula': u'A=πr²'}
trunc = truncated_json_dumps(d, 25, 'formula')
eq_(json.dumps(d, ensure_ascii=False), trunc)
class ChunkedTests(TestCase):
def test_chunked(self):
# chunking nothing yields nothing.
eq_(list(chunked([], 1)), [])
# chunking list where len(list) < n
eq_(list(chunked([1], 10)), [[1]])
# chunking a list where len(list) == n
eq_(list(chunked([1, 2], 2)), [[1, 2]])
# chunking list where len(list) > n
eq_(list(chunked([1, 2, 3, 4, 5], 2)),
[[1, 2], [3, 4], [5]])
# passing in a length overrides the real len(list)
eq_(list(chunked([1, 2, 3, 4, 5, 6, 7], 2, length=4)),
[[1, 2], [3, 4]])
class IsRatelimitedTest(TestCase):
def test_ratelimited(self):
u = profile().user
request = Mock()
request.user = u
request.limited = False
request.method = 'POST'
# One call to the rate limit won't trigger it.
eq_(is_ratelimited(request, 'test-ratelimited', '1/min'), False)
# But two will
eq_(is_ratelimited(request, 'test-ratelimited', '1/min'), True)
def test_ratelimit_bypass(self):
u = profile().user
bypass = Permission.objects.get(codename='bypass_ratelimit')
u.user_permissions.add(bypass)
request = Mock()
request.user = u
request.limited = False
request.method = 'POST'
# One call to the rate limit won't trigger it.
eq_(is_ratelimited(request, 'test-ratelimited', '1/min'), False)
# And a second one still won't, because the user has the bypass permission.
eq_(is_ratelimited(request, 'test-ratelimited', '1/min'), False)
def test_ratelimit_logging(self):
u = profile().user
request = Mock()
request.user = u
request.limited = False
request.method = 'POST'
eq_(Record.objects.count(), 0)
# Two calls will trigger the ratelimit once.
is_ratelimited(request, 'test-ratelimited', '1/min')
is_ratelimited(request, 'test-ratelimited', '1/min')
eq_(Record.objects.count(), 1)
class GetBrowserNameTest(TestCase):
def test_firefox(self):
"""Test with User Agent of Firefox"""
user_agent = 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
# Check Firefox is returning
eq_(get_browser(user_agent), 'Firefox')
def test_chrome(self):
"""Test with User Agent of Chrome"""
user_agent = ('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/41.0.2228.0 Safari/537.36')
# Check Chrome is returning
eq_(get_browser(user_agent), 'Chrome')
def test_internet_explorer(self):
"""Test with User Agent of Internet Explorer"""
# Check with default User Agent of IE 11
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko'
eq_(get_browser(user_agent), 'Trident')
# Check with Compatibility View situation user Agent of IE11
user_agent = ('Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; '
'Trident/7.0; rv:11.0) like Gecko')
eq_(get_browser(user_agent), 'MSIE')
def test_safari(self):
"""Test with User Agent of Safari"""
user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14'
'(KHTML, like Gecko) Version/7.0.3 Safari/7046A194A')
# Check Safari is returning
eq_(get_browser(user_agent), 'Safari')
| bsd-3-clause |
rpm-software-management/librepo | examples/python/download_packages_with_fastestmirror.py | 1 | 1645 | #!/usr/bin/env python3
"""
librepo - download packages
"""
import os
import os.path
import time
import librepo
CACHE = "fastestmirror.cache"
LIBREPOPKG = "librepo-1.2.1-2.fc20.x86_64.rpm"
LAMEPKG = "lame-3.99.5-2.fc19.x86_64.rpm"
if __name__ == "__main__":
# Setup logging
def debug_function(msg, _):
print(msg)
#librepo.set_debug_log_handler(debug_function)
# Remove packages if already exists
def remove_pkg(filename):
if os.path.exists(filename):
os.remove(filename)
remove_pkg(LIBREPOPKG)
remove_pkg(LAMEPKG)
# Prepare list of targets
packages = []
# Prepare first target
h1 = librepo.Handle()
h1.metalinkurl = "https://mirrors.fedoraproject.org/metalink?repo=fedora-20&arch=x86_64"
h1.repotype = librepo.YUMREPO
h1.fastestmirror = True
h1.fastestmirrorcache = CACHE
target = librepo.PackageTarget("Packages/l/"+LIBREPOPKG, handle=h1)
packages.append(target)
# Prepare second target
h2 = librepo.Handle()
h2.mirrorlisturl = "http://mirrors.rpmfusion.org/mirrorlist?repo=free-fedora-19&arch=x86_64"
h2.repotype = librepo.YUMREPO
h2.fastestmirror = True
h2.fastestmirrorcache = CACHE
target = librepo.PackageTarget(LAMEPKG, handle=h2)
packages.append(target)
t = time.time()
librepo.download_packages(packages)
print("Download duration: {0}s\n".format((time.time() - t)))
for target in packages:
print("### %s: %s" % (target.local_path, target.err or "OK"))
print("Local path: ", target.local_path)
print("Error: ", target.err)
print()
| lgpl-2.1 |
mizuy/mizwiki | mizwiki/utils/conv_pukiwiki.py | 1 | 4421 | # -*- coding: utf-8 -*-
import codecs, os, cStringIO as StringIO, re, sys
class IStreamBuffer:
@staticmethod
def _conv(v):
return v.rstrip(u'\n\r')
def __init__(self,inputfile):
self.input = codecs.getreader('utf-8')(inputfile)
self.stack = []
def __iter__(self):
return self
def next(self):
if len(self.stack)>0:
return self._conv(self.stack.pop())
return self._conv(self.input.next())
def push(self,line):
self.stack.append(self._conv(line))
def eof(self):
if len(self.stack)==0:
try:
self.push(self.input.next())
except StopIteration:
return True
return False
def top(self):
assert not self.eof()
if len(self.stack)==0:
self.push(self.input.next())
return self.stack[-1]
def conv(inputs,os):
os = codecs.getwriter('utf-8')(os)
istr = IStreamBuffer(inputs)
for l in istr:
l = l.rstrip('~')
assert type(l)==unicode
if l.startswith('{{{'):
os.write(l+'\n')
for ll in istr:
os.write(ll+'\n')
if ll.startswith('}}}'):
break
continue
if l.startswith(' '):
istr.push(l)
parse_quote(istr,os)
continue
if l.strip().startswith('----') and l.replace('-',' ').strip()=='':
os.write('====\n')
continue
parse_inline(os,l)
os.write('\n')
def parse_quote(istr,os):
os.write('{{{\n')
for l in istr:
if l.startswith(' '):
os.write(l[1:]+'\n')
else:
break
os.write('}}}\n')
wikilabel = re.compile(ur'\[\[([^\]]+)>([\w_/\.\-]+)\]\]',re.U)
namelabel = re.compile(ur'\[\[([^\]]+)>#([_a-zA-Z0-9]+)\]\]',re.U)
areaedit = re.compile(ur'&areaedit\([^\)]*\){([^}]*)};', re.U)
new = re.compile(ur'&new{([^}]*)};', re.U)
pre = re.compile(ur"\[|&",re.U)
def parse_inline(doc, src):
assert type(src)==unicode
pos = 0
while pos<len(src):
m = pre.search(src,pos)
if not m:
doc.write(src[pos:])
return
doc.write(src[pos:m.start()])
pos = m.start()
if src[pos]=='[':
m = wikilabel.match(src,pos)
if m:
pos += len(m.group(0))
name = m.group(1)
url = m.group(2)
doc.write('[[%s:%s]]'%(name,url))
continue
m = namelabel.match(src,pos)
if m:
pos += len(m.group(0))
name = m.group(1)
url = m.group(2)
doc.write('[[%s:#%s]]'%(name,url))
continue
if src[pos]=='&':
m = areaedit.match(src,pos)
if m:
pos += len(m.group(0))
doc.write(m.group(1))
continue
m = new.match(src,pos)
if m:
pos += len(m.group(0))
doc.write(m.group(1))
continue
doc.write(src[pos])
pos += 1
class iterdir(object):
def __init__(self, path, deep=False):
self._root = path
self._files = None
self.deep = deep
def __iter__(self):
return self
def next(self):
if self._files:
join = os.path.join
d = self._files.pop()
r = join(self._root, d)
if self.deep and os.path.isdir(r):
self._files += [join(d,n) for n in os.listdir(r)]
elif self._files is None:
self._files = os.listdir(self._root)
if self._files:
return self._files[-1]
else:
raise StopIteration
if __name__=='__main__':
sin = codecs.getreader('utf-8')(sys.stdin)
sout = codecs.getwriter('utf-8')(sys.stdout)
it = iterdir('.',4)
for x in it:
p = os.path.basename(x)
if p == 'body.txt':
print x
f = open(x,'r')
try:
out = StringIO.StringIO()
conv(f,out)
out.seek(0)
f.close()
f = open(x, 'w')
f.write(out.read())
finally:
f.close()
| mit |
fabian4/trove | trove/guestagent/strategies/backup/experimental/mongo_impl.py | 1 | 4132 | # Copyright (c) 2014 eBay Software Foundation
# Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.mongodb import (
service as mongo_service)
from trove.guestagent.datastore.experimental.mongodb import (
system as mongo_system)
from trove.guestagent.strategies.backup import base
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
MONGODB_DBPATH = CONF.mongodb.mount_point
MONGO_DUMP_DIR = MONGODB_DBPATH + "/dump"
LARGE_TIMEOUT = 1200
class MongoDump(base.BackupRunner):
"""Implementation of Backup Strategy for MongoDump."""
__strategy_name__ = 'mongodump'
backup_cmd = 'mongodump --out ' + MONGO_DUMP_DIR
def __init__(self, *args, **kwargs):
self.app = mongo_service.MongoDBApp()
super(MongoDump, self).__init__(*args, **kwargs)
def _run_pre_backup(self):
"""Create archival contents in dump dir"""
try:
est_dump_size = self.estimate_dump_size()
avail = operating_system.get_bytes_free_on_fs(MONGODB_DBPATH)
if est_dump_size > avail:
self.cleanup()
# TODO(atomic77) Though we can fully recover from this error
# BackupRunner will leave the trove instance in a BACKUP state
raise OSError(_("Need more free space to run mongodump, "
"estimated %(est_dump_size)s"
" and found %(avail)s bytes free ") %
{'est_dump_size': est_dump_size,
'avail': avail})
operating_system.create_directory(MONGO_DUMP_DIR, as_root=True)
operating_system.chown(MONGO_DUMP_DIR, mongo_system.MONGO_USER,
"nogroup", as_root=True)
# high timeout here since mongodump can take a long time
utils.execute_with_timeout(
'mongodump', '--out', MONGO_DUMP_DIR,
*(self.app.admin_cmd_auth_params()),
run_as_root=True, root_helper='sudo',
timeout=LARGE_TIMEOUT
)
except exception.ProcessExecutionError as e:
LOG.debug("Caught exception when creating the dump")
self.cleanup()
raise e
@property
def cmd(self):
"""Tars and streams the dump dir contents to
the stdout
"""
cmd = 'sudo tar cPf - ' + MONGO_DUMP_DIR
return cmd + self.zip_cmd + self.encrypt_cmd
def cleanup(self):
operating_system.remove(MONGO_DUMP_DIR, force=True, as_root=True)
def _run_post_backup(self):
self.cleanup()
def estimate_dump_size(self):
"""
Estimate the space that the mongodump will take based on the output of
db.stats().dataSize. This seems to be conservative, as the actual bson
output in many cases is a fair bit smaller.
"""
dbs = self.app.list_all_dbs()
# mongodump does not dump the content of the local database
dbs.remove('local')
dbstats = dict([(d, 0) for d in dbs])
for d in dbstats:
dbstats[d] = self.app.db_data_size(d)
LOG.debug("Estimated size for databases: " + str(dbstats))
return sum(dbstats.values())
| apache-2.0 |
eciis/web | backend/handlers/resend_invite_handler.py | 1 | 1157 | # -*- coding: utf-8 -*-
"""Resend Invite Handler."""
import json
from util import login_required
from utils import json_response
from utils import Utils
from custom_exceptions import NotAuthorizedException
from . import BaseHandler
from google.appengine.ext import ndb
__all__ = ['ResendInviteHandler']
class ResendInviteHandler(BaseHandler):
"""Resend Invite Handler."""
@json_response
@login_required
def post(self, user, invite_key):
"""Handle POST Requests."""
body = json.loads(self.request.body)
host = self.request.host
invite = ndb.Key(urlsafe=invite_key).get()
Utils._assert(invite.status != 'sent',
"The invite has already been used", NotAuthorizedException)
user.check_permission("invite_members",
"User is not allowed to send invites",
invite.institution_key.urlsafe())
institution = invite.institution_key.get()
Utils._assert(not institution.is_active(),
"This institution is not active", NotAuthorizedException)
invite.send_invite(host, user.current_institution)
| gpl-3.0 |
marcusmartins/compose | compose/cli/verbose_proxy.py | 67 | 1691 |
import functools
from itertools import chain
import logging
import pprint
import six
def format_call(args, kwargs):
args = (repr(a) for a in args)
kwargs = ("{0!s}={1!r}".format(*item) for item in six.iteritems(kwargs))
return "({0})".format(", ".join(chain(args, kwargs)))
def format_return(result, max_lines):
if isinstance(result, (list, tuple, set)):
return "({0} with {1} items)".format(type(result).__name__, len(result))
if result:
lines = pprint.pformat(result).split('\n')
extra = '\n...' if len(lines) > max_lines else ''
return '\n'.join(lines[:max_lines]) + extra
return result
class VerboseProxy(object):
"""Proxy all function calls to another class and log method name, arguments
and return values for each call.
"""
def __init__(self, obj_name, obj, log_name=None, max_lines=10):
self.obj_name = obj_name
self.obj = obj
self.max_lines = max_lines
self.log = logging.getLogger(log_name or __name__)
def __getattr__(self, name):
attr = getattr(self.obj, name)
if not six.callable(attr):
return attr
return functools.partial(self.proxy_callable, name)
def proxy_callable(self, call_name, *args, **kwargs):
self.log.info("%s %s <- %s",
self.obj_name,
call_name,
format_call(args, kwargs))
result = getattr(self.obj, call_name)(*args, **kwargs)
self.log.info("%s %s -> %s",
self.obj_name,
call_name,
format_return(result, self.max_lines))
return result
| apache-2.0 |
ritchyteam/odoo | addons/purchase/wizard/purchase_line_invoice.py | 205 | 5419 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class purchase_line_invoice(osv.osv_memory):
""" To create invoice for purchase order line"""
_name = 'purchase.order.line_invoice'
_description = 'Purchase Order Line Make Invoice'
def makeInvoices(self, cr, uid, ids, context=None):
"""
To get Purchase Order line and create Invoice
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : retrun view of Invoice
"""
if context is None:
context={}
record_ids = context.get('active_ids',[])
if record_ids:
res = False
invoices = {}
invoice_obj = self.pool.get('account.invoice')
purchase_obj = self.pool.get('purchase.order')
purchase_line_obj = self.pool.get('purchase.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
account_jrnl_obj = self.pool.get('account.journal')
def multiple_order_invoice_notes(orders):
notes = ""
for order in orders:
notes += "%s \n" % order.notes
return notes
def make_invoice_by_partner(partner, orders, lines_ids):
"""
create a new invoice for one supplier
@param partner : The object partner
@param orders : The set of orders to add in the invoice
@param lines : The list of line's id
"""
name = orders and orders[0].name or ''
journal_id = account_jrnl_obj.search(cr, uid, [('type', '=', 'purchase')], context=None)
journal_id = journal_id and journal_id[0] or False
a = partner.property_account_payable.id
inv = {
'name': name,
'origin': name,
'type': 'in_invoice',
'journal_id':journal_id,
'reference' : partner.ref,
'account_id': a,
'partner_id': partner.id,
'invoice_line': [(6,0,lines_ids)],
'currency_id' : orders[0].currency_id.id,
'comment': multiple_order_invoice_notes(orders),
'payment_term': orders[0].payment_term_id.id,
'fiscal_position': partner.property_account_position.id
}
inv_id = invoice_obj.create(cr, uid, inv)
for order in orders:
order.write({'invoice_ids': [(4, inv_id)]})
return inv_id
for line in purchase_line_obj.browse(cr, uid, record_ids, context=context):
if (not line.invoiced) and (line.state not in ('draft', 'cancel')):
if not line.partner_id.id in invoices:
invoices[line.partner_id.id] = []
acc_id = purchase_obj._choose_account_from_po_line(cr, uid, line, context=context)
inv_line_data = purchase_obj._prepare_inv_line(cr, uid, acc_id, line, context=context)
inv_line_data.update({'origin': line.order_id.name})
inv_id = invoice_line_obj.create(cr, uid, inv_line_data, context=context)
purchase_line_obj.write(cr, uid, [line.id], {'invoiced': True, 'invoice_lines': [(4, inv_id)]})
invoices[line.partner_id.id].append((line,inv_id))
res = []
for result in invoices.values():
il = map(lambda x: x[1], result)
orders = list(set(map(lambda x : x[0].order_id, result)))
res.append(make_invoice_by_partner(orders[0].partner_id, orders, il))
return {
'domain': "[('id','in', ["+','.join(map(str,res))+"])]",
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.invoice',
'view_id': False,
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
runefriborg/pycsp | test/unix/multiprocesstest.py | 1 | 7496 | """
Copyright (c) 2009 John Markus Bjoerndalen <[email protected]>,
Brian Vinter <[email protected]>, Rune M. Friborg <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software. THE
SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
sys.path.insert(0, "../..")
from pycsp.parallel import *
import check
import time
import random
@choice
def action(assertCheck, id, channel_input=None):
if assertCheck:
assertCheck(id)
@multiprocess
def reader(cin, id, sleeper, assertCheck=None):
while True:
if sleeper: sleeper()
got = cin()
if assertCheck:
assertCheck(id)
@multiprocess
def writer(cout, id, cnt, sleeper):
for i in range(cnt):
if sleeper: sleeper()
cout((id, i))
retire(cout)
@multiprocess
def par_reader(cin1,cin2,cin3,cin4, cnt, sleeper, assertCheck=None):
while True:
if sleeper: sleeper()
AltSelect(
InputGuard(cin1, action(assertCheck, 0)),
InputGuard(cin2, action(assertCheck, 1)),
InputGuard(cin3, action(assertCheck, 2)),
InputGuard(cin4, action(assertCheck, 3))
)
@multiprocess
def par_fair_reader(cin1,cin2,cin3,cin4, cnt, sleeper, assertCheck=None):
while True:
if sleeper: sleeper()
FairSelect(
InputGuard(cin1, action(assertCheck, 0)),
InputGuard(cin2, action(assertCheck, 1)),
InputGuard(cin3, action(assertCheck, 2)),
InputGuard(cin4, action(assertCheck, 3))
)
@multiprocess
def par_pri_reader(cin1,cin2,cin3,cin4, cnt, sleeper, assertCheck=None):
while True:
if sleeper: sleeper()
PriSelect(
InputGuard(cin1, action(assertCheck, 0)),
InputGuard(cin2, action(assertCheck, 1)),
InputGuard(cin3, action(assertCheck, 2)),
InputGuard(cin4, action(assertCheck, 3))
)
@multiprocess
def return_msg(cin, sleeper):
if sleeper: sleeper()
return cin()
@io
def sleep_one():
time.sleep(0.01)
@io
def sleep_random():
time.sleep(random.random()/100)
def Parallel_Test(sleeper):
c1=Channel()
L= Parallel(writer(c1.writer(), 0, 10, sleeper), 10 * return_msg(c1.reader(), sleeper))
if L and len(L) == 11 and L[0] == None and not None in L[1:]:
print(("OK - MultiProcess_Parallel_Test"+str(sleeper)))
else:
print(("Error - MultiProcess_Parallel_Test"+str(sleeper)))
print((str(L)))
def Sequence_Test(sleeper):
c1=Channel()
Spawn(writer(c1.writer(), 0, 10, sleeper))
L= Sequence(10 * return_msg(c1.reader(), sleeper))
if L and len(L) == 10 and not None in L:
print(("OK - MultiProcess_Sequence_Test"+str(sleeper)))
else:
print(("Error - MultiProcess_Sequence_Test"+str(sleeper)))
print((str(L)))
def One2One_Test(read_sleeper, write_sleeper):
x = Channel()
Spawn(check.Assert(x.reader(), "MultiProcess_One2One_Test"+str(read_sleeper)+str(write_sleeper), count=10, vocabulary=[0]))
c1=Channel()
Parallel(reader(c1.reader(), 0 , read_sleeper, x.writer()), writer(c1.writer(),1,10, write_sleeper))
def Any2One_Alting_Test(read_sleeper, write_sleeper):
x = Channel()
Spawn(check.Assert(x.reader(), "MultiProcess_Any2One_Alting_Test"+str(read_sleeper)+str(write_sleeper), count=40, minimum=10, vocabulary=[0,1,2,3], quit_on_count=True))
c1=Channel()
c2=Channel()
c3=Channel()
c4=Channel()
cnt = 10
Parallel(par_reader(c1.reader(), c2.reader(), c3.reader(), c4.reader(),cnt, read_sleeper, x.writer()),
writer(c1.writer(),0,cnt, write_sleeper),
writer(c2.writer(),1,cnt, write_sleeper),
writer(c3.writer(),2,cnt, write_sleeper),
writer(c4.writer(),3,cnt, write_sleeper))
def Any2One_FairAlting_Test(read_sleeper, write_sleeper):
x = Channel()
Spawn(check.Assert(x.reader(), "MultiProcess_Any2One_FairAlting_Test"+str(read_sleeper)+str(write_sleeper), count=40, minimum=10, vocabulary=[0,1,2,3], quit_on_count=True))
c1=Channel()
c2=Channel()
c3=Channel()
c4=Channel()
cnt = 10
Parallel(par_fair_reader(c1.reader(), c2.reader(), c3.reader(), c4.reader(),cnt, read_sleeper, x.writer()),
writer(c1.writer(),0,cnt, write_sleeper),
writer(c2.writer(),1,cnt, write_sleeper),
writer(c3.writer(),2,cnt, write_sleeper),
writer(c4.writer(),3,cnt, write_sleeper))
def Any2One_PriAlting_Test(read_sleeper, write_sleeper):
x = Channel()
Spawn(check.Assert(x.reader(), "MultiProcess_Any2One_PriAlting_Test"+str(read_sleeper)+str(write_sleeper), count=40, minimum=10, vocabulary=[0,1,2,3], quit_on_count=True))
c1=Channel()
c2=Channel()
c3=Channel()
c4=Channel()
cnt = 10
Parallel(par_pri_reader(c1.reader(), c2.reader(), c3.reader(), c4.reader(),cnt, read_sleeper, x.writer()),
writer(c1.writer(),0,cnt, write_sleeper),
writer(c2.writer(),1,cnt, write_sleeper),
writer(c3.writer(),2,cnt, write_sleeper),
writer(c4.writer(),3,cnt, write_sleeper))
def Any2Any_Test(read_sleeper, write_sleeper):
x = Channel()
Spawn(check.Assert(x.reader(), "MultiProcess_Any2Any_Test"+str(read_sleeper)+str(write_sleeper), count=40, vocabulary=[0,1,2,3]))
c1=Channel()
cnt = 10
Parallel(reader(c1.reader(),0, read_sleeper, x.writer()), writer(c1.writer(),0,cnt, write_sleeper),
reader(c1.reader(),1, read_sleeper, x.writer()), writer(c1.writer(),1,cnt, write_sleeper),
reader(c1.reader(),2, read_sleeper, x.writer()), writer(c1.writer(),2,cnt, write_sleeper),
reader(c1.reader(),3, read_sleeper, x.writer()), writer(c1.writer(),3,cnt, write_sleeper))
def autotest():
for read_sleep in [('Zero', None), ('One',sleep_one), ('Random',sleep_random)]:
Sequence_Test(read_sleep[1])
Parallel_Test(read_sleep[1])
for write_sleep in [('Zero', None), ('One',sleep_one), ('Random',sleep_random)]:
rname, rsleep = read_sleep
wname, wsleep = write_sleep
if not rsleep==wsleep==sleep_one:
One2One_Test(rsleep, wsleep)
Any2One_Alting_Test(rsleep, wsleep)
Any2One_FairAlting_Test(rsleep, wsleep)
Any2One_PriAlting_Test(rsleep, wsleep)
Any2Any_Test(rsleep, wsleep)
if __name__ == '__main__':
autotest()
shutdown()
| mit |
alvarolopez/nova | nova/cmd/network.py | 27 | 2415 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Nova Network."""
import sys
import traceback
from oslo_config import cfg
from oslo_log import log as logging
from nova.conductor import rpcapi as conductor_rpcapi
from nova import config
import nova.db.api
from nova import exception
from nova.i18n import _LE
from nova import objects
from nova.objects import base as objects_base
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
from nova import utils
from nova import version
CONF = cfg.CONF
CONF.import_opt('network_topic', 'nova.network.rpcapi')
CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
def block_db_access():
class NoDB(object):
def __getattr__(self, attr):
return self
def __call__(self, *args, **kwargs):
stacktrace = "".join(traceback.format_stack())
LOG = logging.getLogger('nova.network')
LOG.error(_LE('No db access allowed in nova-network: %s'),
stacktrace)
raise exception.DBNotAllowed('nova-network')
nova.db.api.IMPL = NoDB()
def main():
config.parse_args(sys.argv)
logging.setup(CONF, "nova")
utils.monkey_patch()
objects.register_all()
gmr.TextGuruMeditation.setup_autorun(version)
if not CONF.conductor.use_local:
block_db_access()
objects_base.NovaObject.indirection_api = \
conductor_rpcapi.ConductorAPI()
server = service.Service.create(binary='nova-network',
topic=CONF.network_topic,
db_allowed=CONF.conductor.use_local)
service.serve(server)
service.wait()
| apache-2.0 |
quinot/ansible-modules-core | cloud/amazon/iam_cert.py | 20 | 11703 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam_cert
short_description: Manage server certificates for use on ELBs and CloudFront
description:
- Allows for the management of server certificates
version_added: "2.0"
options:
name:
description:
- Name of certificate to add, update or remove.
required: true
aliases: []
new_name:
description:
- When present, this will update the name of the cert with the value passed here.
required: false
aliases: []
new_path:
description:
- When present, this will update the path of the cert with the value passed here.
required: false
aliases: []
state:
description:
- Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified.
required: true
default: null
choices: [ "present", "absent" ]
aliases: []
path:
description:
- When creating or updating, specify the desired path of the certificate
required: false
default: "/"
aliases: []
cert_chain:
description:
- The path to the CA certificate chain in PEM encoded format.
required: false
default: null
aliases: []
cert:
description:
- The path to the certificate body in PEM encoded format.
required: false
aliases: []
key:
description:
- The path to the private key of the certificate in PEM encoded format.
dup_ok:
description:
- By default the module will not upload a certifcate that is already uploaded into AWS. If set to True, it will upload the certifcate as long as the name is unique.
required: false
default: False
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
requirements: [ "boto" ]
author: Jonathan I. Davila
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Basic server certificate upload
tasks:
- name: Upload Certifcate
iam_cert:
name: very_ssl
state: present
cert: somecert.pem
key: privcertkey
cert_chain: myverytrustedchain
'''
import json
import sys
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def cert_meta(iam, name):
opath = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
path
ocert = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
certificate_body
ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
server_certificate_id
upload_date = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
upload_date
exp = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
expiration
return opath, ocert, ocert_id, upload_date, exp
def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok):
update=False
if any(ct in orig_cert_names for ct in [name, new_name]):
for i_name in [name, new_name]:
if i_name is None:
continue
if cert is not None:
try:
c_index=orig_cert_names.index(i_name)
except NameError:
continue
else:
if orig_cert_bodies[c_index] == cert:
update=True
break
elif orig_cert_bodies[c_index] != cert:
module.fail_json(changed=False, msg='A cert with the name %s already exists and'
' has a different certificate body associated'
' with it. Certifcates cannot have the same name')
else:
update=True
break
elif cert in orig_cert_bodies and not dup_ok:
for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies):
if crt_body == cert:
module.fail_json(changed=False, msg='This certificate already'
' exists under the name %s' % crt_name)
return update
def cert_action(module, iam, name, cpath, new_name, new_path, state,
cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok):
if state == 'present':
update = dup_check(module, iam, name, new_name, cert, orig_cert_names,
orig_cert_bodies, dup_ok)
if update:
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
changed=True
if new_name and new_path:
iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif new_name and not new_path:
iam.update_server_cert(name, new_cert_name=new_name)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif not new_name and new_path:
iam.update_server_cert(name, new_path=new_path)
module.exit_json(changed=changed, name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
else:
changed=False
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp,
msg='No new path or name specified. No changes made')
else:
changed=True
iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath)
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif state == 'absent':
if name in orig_cert_names:
changed=True
iam.delete_server_cert(name)
module.exit_json(changed=changed, deleted_cert=name)
else:
changed=False
module.exit_json(changed=changed, msg='Certifcate with the name %s already absent' % name)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(
default=None, required=True, choices=['present', 'absent']),
name=dict(default=None, required=False),
cert=dict(default=None, required=False),
key=dict(default=None, required=False),
cert_chain=dict(default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False),
dup_ok=dict(default=False, required=False, choices=[False, True])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[],
)
if not HAS_BOTO:
module.fail_json(msg="Boto is required for this module")
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
if region:
iam = boto.iam.connect_to_region(region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
state = module.params.get('state')
name = module.params.get('name')
path = module.params.get('path')
new_name = module.params.get('new_name')
new_path = module.params.get('new_path')
cert_chain = module.params.get('cert_chain')
dup_ok = module.params.get('dup_ok')
if state == 'present':
cert = open(module.params.get('cert'), 'r').read().rstrip()
key = open(module.params.get('key'), 'r').read().rstrip()
if cert_chain is not None:
cert_chain = open(module.params.get('cert_chain'), 'r').read()
else:
key=cert=chain=None
orig_certs = [ctb['server_certificate_name'] for ctb in \
iam.get_all_server_certs().\
list_server_certificates_result.\
server_certificate_metadata_list]
orig_bodies = [iam.get_server_certificate(thing).\
get_server_certificate_result.\
certificate_body \
for thing in orig_certs]
if new_name == name:
new_name = None
if new_path == path:
new_path = None
changed = False
try:
cert_action(module, iam, name, path, new_name, new_path, state,
cert, key, cert_chain, orig_certs, orig_bodies, dup_ok)
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err), debug=[cert,key])
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
gnowxilef/Wox | PythonHome/Lib/site-packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| mit |
huijunwu/heron | heron/shell/src/python/handlers/pmaphandler.py | 5 | 1352 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' pmaphandler.py '''
import json
import tornado.web
from heron.shell.src.python import utils
class PmapHandler(tornado.web.RequestHandler):
"""
Responsible for reporting memory map of a process given its pid.
"""
# pylint: disable=attribute-defined-outside-init
@tornado.web.asynchronous
def get(self, pid):
''' get method '''
body = utils.str_cmd(['pmap', '-pXX', pid], None, None)
self.content_type = 'application/json'
self.write(json.dumps(body))
self.finish()
| apache-2.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pygments/lexers/_vim_builtins.py | 31 | 57090 | # -*- coding: utf-8 -*-
"""
pygments.lexers._vim_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file is autogenerated by scripts/get_vimkw.py
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Split up in multiple functions so it's importable by jython, which has a
# per-method size limit.
def _getauto():
var = (
('BufAdd','BufAdd'),
('BufCreate','BufCreate'),
('BufDelete','BufDelete'),
('BufEnter','BufEnter'),
('BufFilePost','BufFilePost'),
('BufFilePre','BufFilePre'),
('BufHidden','BufHidden'),
('BufLeave','BufLeave'),
('BufNew','BufNew'),
('BufNewFile','BufNewFile'),
('BufRead','BufRead'),
('BufReadCmd','BufReadCmd'),
('BufReadPost','BufReadPost'),
('BufReadPre','BufReadPre'),
('BufUnload','BufUnload'),
('BufWinEnter','BufWinEnter'),
('BufWinLeave','BufWinLeave'),
('BufWipeout','BufWipeout'),
('BufWrite','BufWrite'),
('BufWriteCmd','BufWriteCmd'),
('BufWritePost','BufWritePost'),
('BufWritePre','BufWritePre'),
('Cmd','Cmd'),
('CmdwinEnter','CmdwinEnter'),
('CmdwinLeave','CmdwinLeave'),
('ColorScheme','ColorScheme'),
('CompleteDone','CompleteDone'),
('CursorHold','CursorHold'),
('CursorHoldI','CursorHoldI'),
('CursorMoved','CursorMoved'),
('CursorMovedI','CursorMovedI'),
('EncodingChanged','EncodingChanged'),
('FileAppendCmd','FileAppendCmd'),
('FileAppendPost','FileAppendPost'),
('FileAppendPre','FileAppendPre'),
('FileChangedRO','FileChangedRO'),
('FileChangedShell','FileChangedShell'),
('FileChangedShellPost','FileChangedShellPost'),
('FileEncoding','FileEncoding'),
('FileReadCmd','FileReadCmd'),
('FileReadPost','FileReadPost'),
('FileReadPre','FileReadPre'),
('FileType','FileType'),
('FileWriteCmd','FileWriteCmd'),
('FileWritePost','FileWritePost'),
('FileWritePre','FileWritePre'),
('FilterReadPost','FilterReadPost'),
('FilterReadPre','FilterReadPre'),
('FilterWritePost','FilterWritePost'),
('FilterWritePre','FilterWritePre'),
('FocusGained','FocusGained'),
('FocusLost','FocusLost'),
('FuncUndefined','FuncUndefined'),
('GUIEnter','GUIEnter'),
('GUIFailed','GUIFailed'),
('InsertChange','InsertChange'),
('InsertCharPre','InsertCharPre'),
('InsertEnter','InsertEnter'),
('InsertLeave','InsertLeave'),
('MenuPopup','MenuPopup'),
('QuickFixCmdPost','QuickFixCmdPost'),
('QuickFixCmdPre','QuickFixCmdPre'),
('QuitPre','QuitPre'),
('RemoteReply','RemoteReply'),
('SessionLoadPost','SessionLoadPost'),
('ShellCmdPost','ShellCmdPost'),
('ShellFilterPost','ShellFilterPost'),
('SourceCmd','SourceCmd'),
('SourcePre','SourcePre'),
('SpellFileMissing','SpellFileMissing'),
('StdinReadPost','StdinReadPost'),
('StdinReadPre','StdinReadPre'),
('SwapExists','SwapExists'),
('Syntax','Syntax'),
('TabEnter','TabEnter'),
('TabLeave','TabLeave'),
('TermChanged','TermChanged'),
('TermResponse','TermResponse'),
('TextChanged','TextChanged'),
('TextChangedI','TextChangedI'),
('User','User'),
('UserGettingBored','UserGettingBored'),
('VimEnter','VimEnter'),
('VimLeave','VimLeave'),
('VimLeavePre','VimLeavePre'),
('VimResized','VimResized'),
('WinEnter','WinEnter'),
('WinLeave','WinLeave'),
('event','event'),
)
return var
auto = _getauto()
def _getcommand():
var = (
('a','a'),
('ab','ab'),
('abc','abclear'),
('abo','aboveleft'),
('al','all'),
('ar','ar'),
('ar','args'),
('arga','argadd'),
('argd','argdelete'),
('argdo','argdo'),
('arge','argedit'),
('argg','argglobal'),
('argl','arglocal'),
('argu','argument'),
('as','ascii'),
('au','au'),
('b','buffer'),
('bN','bNext'),
('ba','ball'),
('bad','badd'),
('bd','bdelete'),
('bel','belowright'),
('bf','bfirst'),
('bl','blast'),
('bm','bmodified'),
('bn','bnext'),
('bo','botright'),
('bp','bprevious'),
('br','br'),
('br','brewind'),
('brea','break'),
('breaka','breakadd'),
('breakd','breakdel'),
('breakl','breaklist'),
('bro','browse'),
('bu','bu'),
('buf','buf'),
('bufdo','bufdo'),
('buffers','buffers'),
('bun','bunload'),
('bw','bwipeout'),
('c','c'),
('c','change'),
('cN','cN'),
('cN','cNext'),
('cNf','cNf'),
('cNf','cNfile'),
('cabc','cabclear'),
('cad','cad'),
('cad','caddexpr'),
('caddb','caddbuffer'),
('caddf','caddfile'),
('cal','call'),
('cat','catch'),
('cb','cbuffer'),
('cc','cc'),
('ccl','cclose'),
('cd','cd'),
('ce','center'),
('cex','cexpr'),
('cf','cfile'),
('cfir','cfirst'),
('cg','cgetfile'),
('cgetb','cgetbuffer'),
('cgete','cgetexpr'),
('changes','changes'),
('chd','chdir'),
('che','checkpath'),
('checkt','checktime'),
('cl','cl'),
('cl','clist'),
('cla','clast'),
('clo','close'),
('cmapc','cmapclear'),
('cn','cn'),
('cn','cnext'),
('cnew','cnewer'),
('cnf','cnf'),
('cnf','cnfile'),
('co','copy'),
('col','colder'),
('colo','colorscheme'),
('com','com'),
('comc','comclear'),
('comp','compiler'),
('con','con'),
('con','continue'),
('conf','confirm'),
('cope','copen'),
('cp','cprevious'),
('cpf','cpfile'),
('cq','cquit'),
('cr','crewind'),
('cs','cs'),
('cscope','cscope'),
('cstag','cstag'),
('cuna','cunabbrev'),
('cw','cwindow'),
('d','d'),
('d','delete'),
('de','de'),
('debug','debug'),
('debugg','debuggreedy'),
('del','del'),
('delc','delcommand'),
('delel','delel'),
('delep','delep'),
('deletel','deletel'),
('deletep','deletep'),
('deletl','deletl'),
('deletp','deletp'),
('delf','delf'),
('delf','delfunction'),
('dell','dell'),
('delm','delmarks'),
('delp','delp'),
('dep','dep'),
('di','di'),
('di','display'),
('diffg','diffget'),
('diffo','diffoff'),
('diffp','diffpatch'),
('diffpu','diffput'),
('diffs','diffsplit'),
('difft','diffthis'),
('diffu','diffupdate'),
('dig','dig'),
('dig','digraphs'),
('dir','dir'),
('dj','djump'),
('dl','dl'),
('dli','dlist'),
('do','do'),
('doau','doau'),
('dp','dp'),
('dr','drop'),
('ds','dsearch'),
('dsp','dsplit'),
('e','e'),
('e','edit'),
('ea','ea'),
('earlier','earlier'),
('ec','ec'),
('echoe','echoerr'),
('echom','echomsg'),
('echon','echon'),
('el','else'),
('elsei','elseif'),
('em','emenu'),
('en','en'),
('en','endif'),
('endf','endf'),
('endf','endfunction'),
('endfo','endfor'),
('endfun','endfun'),
('endt','endtry'),
('endw','endwhile'),
('ene','enew'),
('ex','ex'),
('exi','exit'),
('exu','exusage'),
('f','f'),
('f','file'),
('files','files'),
('filet','filet'),
('filetype','filetype'),
('fin','fin'),
('fin','find'),
('fina','finally'),
('fini','finish'),
('fir','first'),
('fix','fixdel'),
('fo','fold'),
('foldc','foldclose'),
('foldd','folddoopen'),
('folddoc','folddoclosed'),
('foldo','foldopen'),
('for','for'),
('fu','fu'),
('fu','function'),
('fun','fun'),
('g','g'),
('go','goto'),
('gr','grep'),
('grepa','grepadd'),
('gui','gui'),
('gvim','gvim'),
('h','h'),
('h','help'),
('ha','hardcopy'),
('helpf','helpfind'),
('helpg','helpgrep'),
('helpt','helptags'),
('hi','hi'),
('hid','hide'),
('his','history'),
('i','i'),
('ia','ia'),
('iabc','iabclear'),
('if','if'),
('ij','ijump'),
('il','ilist'),
('imapc','imapclear'),
('in','in'),
('intro','intro'),
('is','isearch'),
('isp','isplit'),
('iuna','iunabbrev'),
('j','join'),
('ju','jumps'),
('k','k'),
('kee','keepmarks'),
('keepa','keepa'),
('keepalt','keepalt'),
('keepj','keepjumps'),
('keepp','keeppatterns'),
('l','l'),
('l','list'),
('lN','lN'),
('lN','lNext'),
('lNf','lNf'),
('lNf','lNfile'),
('la','la'),
('la','last'),
('lad','lad'),
('lad','laddexpr'),
('laddb','laddbuffer'),
('laddf','laddfile'),
('lan','lan'),
('lan','language'),
('lat','lat'),
('later','later'),
('lb','lbuffer'),
('lc','lcd'),
('lch','lchdir'),
('lcl','lclose'),
('lcs','lcs'),
('lcscope','lcscope'),
('le','left'),
('lefta','leftabove'),
('lex','lexpr'),
('lf','lfile'),
('lfir','lfirst'),
('lg','lgetfile'),
('lgetb','lgetbuffer'),
('lgete','lgetexpr'),
('lgr','lgrep'),
('lgrepa','lgrepadd'),
('lh','lhelpgrep'),
('ll','ll'),
('lla','llast'),
('lli','llist'),
('lmak','lmake'),
('lmapc','lmapclear'),
('lne','lne'),
('lne','lnext'),
('lnew','lnewer'),
('lnf','lnf'),
('lnf','lnfile'),
('lo','lo'),
('lo','loadview'),
('loadk','loadk'),
('loadkeymap','loadkeymap'),
('loc','lockmarks'),
('lockv','lockvar'),
('lol','lolder'),
('lop','lopen'),
('lp','lprevious'),
('lpf','lpfile'),
('lr','lrewind'),
('ls','ls'),
('lt','ltag'),
('lua','lua'),
('luado','luado'),
('luafile','luafile'),
('lv','lvimgrep'),
('lvimgrepa','lvimgrepadd'),
('lw','lwindow'),
('m','move'),
('ma','ma'),
('ma','mark'),
('mak','make'),
('marks','marks'),
('mat','match'),
('menut','menut'),
('menut','menutranslate'),
('mes','mes'),
('messages','messages'),
('mk','mk'),
('mk','mkexrc'),
('mks','mksession'),
('mksp','mkspell'),
('mkv','mkv'),
('mkv','mkvimrc'),
('mkvie','mkview'),
('mo','mo'),
('mod','mode'),
('mz','mz'),
('mz','mzscheme'),
('mzf','mzfile'),
('n','n'),
('n','next'),
('nb','nbkey'),
('nbc','nbclose'),
('nbs','nbstart'),
('ne','ne'),
('new','new'),
('nmapc','nmapclear'),
('noa','noa'),
('noautocmd','noautocmd'),
('noh','nohlsearch'),
('nu','number'),
('o','o'),
('o','open'),
('ol','oldfiles'),
('omapc','omapclear'),
('on','only'),
('opt','options'),
('ownsyntax','ownsyntax'),
('p','p'),
('p','print'),
('pc','pclose'),
('pe','pe'),
('pe','perl'),
('ped','pedit'),
('perld','perldo'),
('po','pop'),
('popu','popu'),
('popu','popup'),
('pp','ppop'),
('pr','pr'),
('pre','preserve'),
('prev','previous'),
('pro','pro'),
('prof','profile'),
('profd','profdel'),
('promptf','promptfind'),
('promptr','promptrepl'),
('ps','psearch'),
('ptN','ptN'),
('ptN','ptNext'),
('pta','ptag'),
('ptf','ptfirst'),
('ptj','ptjump'),
('ptl','ptlast'),
('ptn','ptn'),
('ptn','ptnext'),
('ptp','ptprevious'),
('ptr','ptrewind'),
('pts','ptselect'),
('pu','put'),
('pw','pwd'),
('py','py'),
('py','python'),
('py3','py3'),
('py3','py3'),
('py3do','py3do'),
('pydo','pydo'),
('pyf','pyfile'),
('python3','python3'),
('q','q'),
('q','quit'),
('qa','qall'),
('quita','quitall'),
('r','r'),
('r','read'),
('re','re'),
('rec','recover'),
('red','red'),
('red','redo'),
('redi','redir'),
('redr','redraw'),
('redraws','redrawstatus'),
('reg','registers'),
('res','resize'),
('ret','retab'),
('retu','return'),
('rew','rewind'),
('ri','right'),
('rightb','rightbelow'),
('ru','ru'),
('ru','runtime'),
('rub','ruby'),
('rubyd','rubydo'),
('rubyf','rubyfile'),
('rundo','rundo'),
('rv','rviminfo'),
('sN','sNext'),
('sa','sargument'),
('sal','sall'),
('san','sandbox'),
('sav','saveas'),
('sb','sbuffer'),
('sbN','sbNext'),
('sba','sball'),
('sbf','sbfirst'),
('sbl','sblast'),
('sbm','sbmodified'),
('sbn','sbnext'),
('sbp','sbprevious'),
('sbr','sbrewind'),
('scrip','scrip'),
('scrip','scriptnames'),
('scripte','scriptencoding'),
('scs','scs'),
('scscope','scscope'),
('se','set'),
('setf','setfiletype'),
('setg','setglobal'),
('setl','setlocal'),
('sf','sfind'),
('sfir','sfirst'),
('sh','shell'),
('si','si'),
('sig','sig'),
('sign','sign'),
('sil','silent'),
('sim','simalt'),
('sl','sl'),
('sl','sleep'),
('sla','slast'),
('sm','smagic'),
('sm','smap'),
('sme','sme'),
('smenu','smenu'),
('sn','snext'),
('sni','sniff'),
('sno','snomagic'),
('snoreme','snoreme'),
('snoremenu','snoremenu'),
('so','so'),
('so','source'),
('sor','sort'),
('sp','split'),
('spe','spe'),
('spe','spellgood'),
('spelld','spelldump'),
('spelli','spellinfo'),
('spellr','spellrepall'),
('spellu','spellundo'),
('spellw','spellwrong'),
('spr','sprevious'),
('sre','srewind'),
('st','st'),
('st','stop'),
('sta','stag'),
('star','star'),
('star','startinsert'),
('start','start'),
('startg','startgreplace'),
('startr','startreplace'),
('stj','stjump'),
('stopi','stopinsert'),
('sts','stselect'),
('sun','sunhide'),
('sunme','sunme'),
('sunmenu','sunmenu'),
('sus','suspend'),
('sv','sview'),
('sw','swapname'),
('sy','sy'),
('syn','syn'),
('sync','sync'),
('syncbind','syncbind'),
('syntime','syntime'),
('t','t'),
('tN','tN'),
('tN','tNext'),
('ta','ta'),
('ta','tag'),
('tab','tab'),
('tabN','tabN'),
('tabN','tabNext'),
('tabc','tabclose'),
('tabd','tabdo'),
('tabe','tabedit'),
('tabf','tabfind'),
('tabfir','tabfirst'),
('tabl','tablast'),
('tabm','tabmove'),
('tabn','tabnext'),
('tabnew','tabnew'),
('tabo','tabonly'),
('tabp','tabprevious'),
('tabr','tabrewind'),
('tabs','tabs'),
('tags','tags'),
('tc','tcl'),
('tcld','tcldo'),
('tclf','tclfile'),
('te','tearoff'),
('tf','tfirst'),
('th','throw'),
('tj','tjump'),
('tl','tlast'),
('tm','tm'),
('tm','tmenu'),
('tn','tn'),
('tn','tnext'),
('to','topleft'),
('tp','tprevious'),
('tr','tr'),
('tr','trewind'),
('try','try'),
('ts','tselect'),
('tu','tu'),
('tu','tunmenu'),
('u','u'),
('u','undo'),
('un','un'),
('una','unabbreviate'),
('undoj','undojoin'),
('undol','undolist'),
('unh','unhide'),
('unl','unl'),
('unlo','unlockvar'),
('uns','unsilent'),
('up','update'),
('v','v'),
('ve','ve'),
('ve','version'),
('verb','verbose'),
('vert','vertical'),
('vi','vi'),
('vi','visual'),
('vie','view'),
('vim','vimgrep'),
('vimgrepa','vimgrepadd'),
('viu','viusage'),
('vmapc','vmapclear'),
('vne','vnew'),
('vs','vsplit'),
('w','w'),
('w','write'),
('wN','wNext'),
('wa','wall'),
('wh','while'),
('win','win'),
('win','winsize'),
('winc','wincmd'),
('windo','windo'),
('winp','winpos'),
('wn','wnext'),
('wp','wprevious'),
('wq','wq'),
('wqa','wqall'),
('ws','wsverb'),
('wundo','wundo'),
('wv','wviminfo'),
('x','x'),
('x','xit'),
('xa','xall'),
('xmapc','xmapclear'),
('xme','xme'),
('xmenu','xmenu'),
('xnoreme','xnoreme'),
('xnoremenu','xnoremenu'),
('xunme','xunme'),
('xunmenu','xunmenu'),
('xwininfo','xwininfo'),
('y','yank'),
)
return var
command = _getcommand()
def _getoption():
var = (
('acd','acd'),
('ai','ai'),
('akm','akm'),
('al','al'),
('aleph','aleph'),
('allowrevins','allowrevins'),
('altkeymap','altkeymap'),
('ambiwidth','ambiwidth'),
('ambw','ambw'),
('anti','anti'),
('antialias','antialias'),
('ar','ar'),
('arab','arab'),
('arabic','arabic'),
('arabicshape','arabicshape'),
('ari','ari'),
('arshape','arshape'),
('autochdir','autochdir'),
('autoindent','autoindent'),
('autoread','autoread'),
('autowrite','autowrite'),
('autowriteall','autowriteall'),
('aw','aw'),
('awa','awa'),
('background','background'),
('backspace','backspace'),
('backup','backup'),
('backupcopy','backupcopy'),
('backupdir','backupdir'),
('backupext','backupext'),
('backupskip','backupskip'),
('balloondelay','balloondelay'),
('ballooneval','ballooneval'),
('balloonexpr','balloonexpr'),
('bdir','bdir'),
('bdlay','bdlay'),
('beval','beval'),
('bex','bex'),
('bexpr','bexpr'),
('bg','bg'),
('bh','bh'),
('bin','bin'),
('binary','binary'),
('biosk','biosk'),
('bioskey','bioskey'),
('bk','bk'),
('bkc','bkc'),
('bl','bl'),
('bomb','bomb'),
('breakat','breakat'),
('brk','brk'),
('browsedir','browsedir'),
('bs','bs'),
('bsdir','bsdir'),
('bsk','bsk'),
('bt','bt'),
('bufhidden','bufhidden'),
('buflisted','buflisted'),
('buftype','buftype'),
('casemap','casemap'),
('cb','cb'),
('cc','cc'),
('ccv','ccv'),
('cd','cd'),
('cdpath','cdpath'),
('cedit','cedit'),
('cf','cf'),
('cfu','cfu'),
('ch','ch'),
('charconvert','charconvert'),
('ci','ci'),
('cin','cin'),
('cindent','cindent'),
('cink','cink'),
('cinkeys','cinkeys'),
('cino','cino'),
('cinoptions','cinoptions'),
('cinw','cinw'),
('cinwords','cinwords'),
('clipboard','clipboard'),
('cmdheight','cmdheight'),
('cmdwinheight','cmdwinheight'),
('cmp','cmp'),
('cms','cms'),
('co','co'),
('cocu','cocu'),
('cole','cole'),
('colorcolumn','colorcolumn'),
('columns','columns'),
('com','com'),
('comments','comments'),
('commentstring','commentstring'),
('compatible','compatible'),
('complete','complete'),
('completefunc','completefunc'),
('completeopt','completeopt'),
('concealcursor','concealcursor'),
('conceallevel','conceallevel'),
('confirm','confirm'),
('consk','consk'),
('conskey','conskey'),
('copyindent','copyindent'),
('cot','cot'),
('cp','cp'),
('cpo','cpo'),
('cpoptions','cpoptions'),
('cpt','cpt'),
('crb','crb'),
('cryptmethod','cryptmethod'),
('cscopepathcomp','cscopepathcomp'),
('cscopeprg','cscopeprg'),
('cscopequickfix','cscopequickfix'),
('cscoperelative','cscoperelative'),
('cscopetag','cscopetag'),
('cscopetagorder','cscopetagorder'),
('cscopeverbose','cscopeverbose'),
('cspc','cspc'),
('csprg','csprg'),
('csqf','csqf'),
('csre','csre'),
('cst','cst'),
('csto','csto'),
('csverb','csverb'),
('cuc','cuc'),
('cul','cul'),
('cursorbind','cursorbind'),
('cursorcolumn','cursorcolumn'),
('cursorline','cursorline'),
('cwh','cwh'),
('debug','debug'),
('deco','deco'),
('def','def'),
('define','define'),
('delcombine','delcombine'),
('dex','dex'),
('dg','dg'),
('dict','dict'),
('dictionary','dictionary'),
('diff','diff'),
('diffexpr','diffexpr'),
('diffopt','diffopt'),
('digraph','digraph'),
('dip','dip'),
('dir','dir'),
('directory','directory'),
('display','display'),
('dy','dy'),
('ea','ea'),
('ead','ead'),
('eadirection','eadirection'),
('eb','eb'),
('ed','ed'),
('edcompatible','edcompatible'),
('ef','ef'),
('efm','efm'),
('ei','ei'),
('ek','ek'),
('enc','enc'),
('encoding','encoding'),
('endofline','endofline'),
('eol','eol'),
('ep','ep'),
('equalalways','equalalways'),
('equalprg','equalprg'),
('errorbells','errorbells'),
('errorfile','errorfile'),
('errorformat','errorformat'),
('esckeys','esckeys'),
('et','et'),
('eventignore','eventignore'),
('ex','ex'),
('expandtab','expandtab'),
('exrc','exrc'),
('fcl','fcl'),
('fcs','fcs'),
('fdc','fdc'),
('fde','fde'),
('fdi','fdi'),
('fdl','fdl'),
('fdls','fdls'),
('fdm','fdm'),
('fdn','fdn'),
('fdo','fdo'),
('fdt','fdt'),
('fen','fen'),
('fenc','fenc'),
('fencs','fencs'),
('fex','fex'),
('ff','ff'),
('ffs','ffs'),
('fic','fic'),
('fileencoding','fileencoding'),
('fileencodings','fileencodings'),
('fileformat','fileformat'),
('fileformats','fileformats'),
('fileignorecase','fileignorecase'),
('filetype','filetype'),
('fillchars','fillchars'),
('fk','fk'),
('fkmap','fkmap'),
('flp','flp'),
('fml','fml'),
('fmr','fmr'),
('fo','fo'),
('foldclose','foldclose'),
('foldcolumn','foldcolumn'),
('foldenable','foldenable'),
('foldexpr','foldexpr'),
('foldignore','foldignore'),
('foldlevel','foldlevel'),
('foldlevelstart','foldlevelstart'),
('foldmarker','foldmarker'),
('foldmethod','foldmethod'),
('foldminlines','foldminlines'),
('foldnestmax','foldnestmax'),
('foldopen','foldopen'),
('foldtext','foldtext'),
('formatexpr','formatexpr'),
('formatlistpat','formatlistpat'),
('formatoptions','formatoptions'),
('formatprg','formatprg'),
('fp','fp'),
('fs','fs'),
('fsync','fsync'),
('ft','ft'),
('gcr','gcr'),
('gd','gd'),
('gdefault','gdefault'),
('gfm','gfm'),
('gfn','gfn'),
('gfs','gfs'),
('gfw','gfw'),
('ghr','ghr'),
('go','go'),
('gp','gp'),
('grepformat','grepformat'),
('grepprg','grepprg'),
('gtl','gtl'),
('gtt','gtt'),
('guicursor','guicursor'),
('guifont','guifont'),
('guifontset','guifontset'),
('guifontwide','guifontwide'),
('guiheadroom','guiheadroom'),
('guioptions','guioptions'),
('guipty','guipty'),
('guitablabel','guitablabel'),
('guitabtooltip','guitabtooltip'),
('helpfile','helpfile'),
('helpheight','helpheight'),
('helplang','helplang'),
('hf','hf'),
('hh','hh'),
('hi','hi'),
('hid','hid'),
('hidden','hidden'),
('highlight','highlight'),
('history','history'),
('hk','hk'),
('hkmap','hkmap'),
('hkmapp','hkmapp'),
('hkp','hkp'),
('hl','hl'),
('hlg','hlg'),
('hls','hls'),
('hlsearch','hlsearch'),
('ic','ic'),
('icon','icon'),
('iconstring','iconstring'),
('ignorecase','ignorecase'),
('im','im'),
('imactivatefunc','imactivatefunc'),
('imactivatekey','imactivatekey'),
('imaf','imaf'),
('imak','imak'),
('imc','imc'),
('imcmdline','imcmdline'),
('imd','imd'),
('imdisable','imdisable'),
('imi','imi'),
('iminsert','iminsert'),
('ims','ims'),
('imsearch','imsearch'),
('imsf','imsf'),
('imstatusfunc','imstatusfunc'),
('inc','inc'),
('include','include'),
('includeexpr','includeexpr'),
('incsearch','incsearch'),
('inde','inde'),
('indentexpr','indentexpr'),
('indentkeys','indentkeys'),
('indk','indk'),
('inex','inex'),
('inf','inf'),
('infercase','infercase'),
('inoremap','inoremap'),
('insertmode','insertmode'),
('invacd','invacd'),
('invai','invai'),
('invakm','invakm'),
('invallowrevins','invallowrevins'),
('invaltkeymap','invaltkeymap'),
('invanti','invanti'),
('invantialias','invantialias'),
('invar','invar'),
('invarab','invarab'),
('invarabic','invarabic'),
('invarabicshape','invarabicshape'),
('invari','invari'),
('invarshape','invarshape'),
('invautochdir','invautochdir'),
('invautoindent','invautoindent'),
('invautoread','invautoread'),
('invautowrite','invautowrite'),
('invautowriteall','invautowriteall'),
('invaw','invaw'),
('invawa','invawa'),
('invbackup','invbackup'),
('invballooneval','invballooneval'),
('invbeval','invbeval'),
('invbin','invbin'),
('invbinary','invbinary'),
('invbiosk','invbiosk'),
('invbioskey','invbioskey'),
('invbk','invbk'),
('invbl','invbl'),
('invbomb','invbomb'),
('invbuflisted','invbuflisted'),
('invcf','invcf'),
('invci','invci'),
('invcin','invcin'),
('invcindent','invcindent'),
('invcompatible','invcompatible'),
('invconfirm','invconfirm'),
('invconsk','invconsk'),
('invconskey','invconskey'),
('invcopyindent','invcopyindent'),
('invcp','invcp'),
('invcrb','invcrb'),
('invcscoperelative','invcscoperelative'),
('invcscopetag','invcscopetag'),
('invcscopeverbose','invcscopeverbose'),
('invcsre','invcsre'),
('invcst','invcst'),
('invcsverb','invcsverb'),
('invcuc','invcuc'),
('invcul','invcul'),
('invcursorbind','invcursorbind'),
('invcursorcolumn','invcursorcolumn'),
('invcursorline','invcursorline'),
('invdeco','invdeco'),
('invdelcombine','invdelcombine'),
('invdg','invdg'),
('invdiff','invdiff'),
('invdigraph','invdigraph'),
('invea','invea'),
('inveb','inveb'),
('inved','inved'),
('invedcompatible','invedcompatible'),
('invek','invek'),
('invendofline','invendofline'),
('inveol','inveol'),
('invequalalways','invequalalways'),
('inverrorbells','inverrorbells'),
('invesckeys','invesckeys'),
('invet','invet'),
('invex','invex'),
('invexpandtab','invexpandtab'),
('invexrc','invexrc'),
('invfen','invfen'),
('invfic','invfic'),
('invfileignorecase','invfileignorecase'),
('invfk','invfk'),
('invfkmap','invfkmap'),
('invfoldenable','invfoldenable'),
('invgd','invgd'),
('invgdefault','invgdefault'),
('invguipty','invguipty'),
('invhid','invhid'),
('invhidden','invhidden'),
('invhk','invhk'),
('invhkmap','invhkmap'),
('invhkmapp','invhkmapp'),
('invhkp','invhkp'),
('invhls','invhls'),
('invhlsearch','invhlsearch'),
('invic','invic'),
('invicon','invicon'),
('invignorecase','invignorecase'),
('invim','invim'),
('invimc','invimc'),
('invimcmdline','invimcmdline'),
('invimd','invimd'),
('invimdisable','invimdisable'),
('invincsearch','invincsearch'),
('invinf','invinf'),
('invinfercase','invinfercase'),
('invinsertmode','invinsertmode'),
('invis','invis'),
('invjoinspaces','invjoinspaces'),
('invjs','invjs'),
('invlazyredraw','invlazyredraw'),
('invlbr','invlbr'),
('invlinebreak','invlinebreak'),
('invlisp','invlisp'),
('invlist','invlist'),
('invloadplugins','invloadplugins'),
('invlpl','invlpl'),
('invlz','invlz'),
('invma','invma'),
('invmacatsui','invmacatsui'),
('invmagic','invmagic'),
('invmh','invmh'),
('invml','invml'),
('invmod','invmod'),
('invmodeline','invmodeline'),
('invmodifiable','invmodifiable'),
('invmodified','invmodified'),
('invmore','invmore'),
('invmousef','invmousef'),
('invmousefocus','invmousefocus'),
('invmousehide','invmousehide'),
('invnu','invnu'),
('invnumber','invnumber'),
('invodev','invodev'),
('invopendevice','invopendevice'),
('invpaste','invpaste'),
('invpi','invpi'),
('invpreserveindent','invpreserveindent'),
('invpreviewwindow','invpreviewwindow'),
('invprompt','invprompt'),
('invpvw','invpvw'),
('invreadonly','invreadonly'),
('invrelativenumber','invrelativenumber'),
('invremap','invremap'),
('invrestorescreen','invrestorescreen'),
('invrevins','invrevins'),
('invri','invri'),
('invrightleft','invrightleft'),
('invrl','invrl'),
('invrnu','invrnu'),
('invro','invro'),
('invrs','invrs'),
('invru','invru'),
('invruler','invruler'),
('invsb','invsb'),
('invsc','invsc'),
('invscb','invscb'),
('invscrollbind','invscrollbind'),
('invscs','invscs'),
('invsecure','invsecure'),
('invsft','invsft'),
('invshellslash','invshellslash'),
('invshelltemp','invshelltemp'),
('invshiftround','invshiftround'),
('invshortname','invshortname'),
('invshowcmd','invshowcmd'),
('invshowfulltag','invshowfulltag'),
('invshowmatch','invshowmatch'),
('invshowmode','invshowmode'),
('invsi','invsi'),
('invsm','invsm'),
('invsmartcase','invsmartcase'),
('invsmartindent','invsmartindent'),
('invsmarttab','invsmarttab'),
('invsmd','invsmd'),
('invsn','invsn'),
('invsol','invsol'),
('invspell','invspell'),
('invsplitbelow','invsplitbelow'),
('invsplitright','invsplitright'),
('invspr','invspr'),
('invsr','invsr'),
('invssl','invssl'),
('invsta','invsta'),
('invstartofline','invstartofline'),
('invstmp','invstmp'),
('invswapfile','invswapfile'),
('invswf','invswf'),
('invta','invta'),
('invtagbsearch','invtagbsearch'),
('invtagrelative','invtagrelative'),
('invtagstack','invtagstack'),
('invtbi','invtbi'),
('invtbidi','invtbidi'),
('invtbs','invtbs'),
('invtermbidi','invtermbidi'),
('invterse','invterse'),
('invtextauto','invtextauto'),
('invtextmode','invtextmode'),
('invtf','invtf'),
('invtgst','invtgst'),
('invtildeop','invtildeop'),
('invtimeout','invtimeout'),
('invtitle','invtitle'),
('invto','invto'),
('invtop','invtop'),
('invtr','invtr'),
('invttimeout','invttimeout'),
('invttybuiltin','invttybuiltin'),
('invttyfast','invttyfast'),
('invtx','invtx'),
('invudf','invudf'),
('invundofile','invundofile'),
('invvb','invvb'),
('invvisualbell','invvisualbell'),
('invwa','invwa'),
('invwarn','invwarn'),
('invwb','invwb'),
('invweirdinvert','invweirdinvert'),
('invwfh','invwfh'),
('invwfw','invwfw'),
('invwic','invwic'),
('invwildignorecase','invwildignorecase'),
('invwildmenu','invwildmenu'),
('invwinfixheight','invwinfixheight'),
('invwinfixwidth','invwinfixwidth'),
('invwiv','invwiv'),
('invwmnu','invwmnu'),
('invwrap','invwrap'),
('invwrapscan','invwrapscan'),
('invwrite','invwrite'),
('invwriteany','invwriteany'),
('invwritebackup','invwritebackup'),
('invws','invws'),
('is','is'),
('isf','isf'),
('isfname','isfname'),
('isi','isi'),
('isident','isident'),
('isk','isk'),
('iskeyword','iskeyword'),
('isp','isp'),
('isprint','isprint'),
('joinspaces','joinspaces'),
('js','js'),
('key','key'),
('keymap','keymap'),
('keymodel','keymodel'),
('keywordprg','keywordprg'),
('km','km'),
('kmp','kmp'),
('kp','kp'),
('langmap','langmap'),
('langmenu','langmenu'),
('laststatus','laststatus'),
('lazyredraw','lazyredraw'),
('lbr','lbr'),
('lcs','lcs'),
('linebreak','linebreak'),
('lines','lines'),
('linespace','linespace'),
('lisp','lisp'),
('lispwords','lispwords'),
('list','list'),
('listchars','listchars'),
('lm','lm'),
('lmap','lmap'),
('loadplugins','loadplugins'),
('lpl','lpl'),
('ls','ls'),
('lsp','lsp'),
('lw','lw'),
('lz','lz'),
('ma','ma'),
('macatsui','macatsui'),
('magic','magic'),
('makeef','makeef'),
('makeprg','makeprg'),
('mat','mat'),
('matchpairs','matchpairs'),
('matchtime','matchtime'),
('maxcombine','maxcombine'),
('maxfuncdepth','maxfuncdepth'),
('maxmapdepth','maxmapdepth'),
('maxmem','maxmem'),
('maxmempattern','maxmempattern'),
('maxmemtot','maxmemtot'),
('mco','mco'),
('mef','mef'),
('menuitems','menuitems'),
('mfd','mfd'),
('mh','mh'),
('mis','mis'),
('mkspellmem','mkspellmem'),
('ml','ml'),
('mls','mls'),
('mm','mm'),
('mmd','mmd'),
('mmp','mmp'),
('mmt','mmt'),
('mod','mod'),
('modeline','modeline'),
('modelines','modelines'),
('modifiable','modifiable'),
('modified','modified'),
('more','more'),
('mouse','mouse'),
('mousef','mousef'),
('mousefocus','mousefocus'),
('mousehide','mousehide'),
('mousem','mousem'),
('mousemodel','mousemodel'),
('mouses','mouses'),
('mouseshape','mouseshape'),
('mouset','mouset'),
('mousetime','mousetime'),
('mp','mp'),
('mps','mps'),
('msm','msm'),
('mzq','mzq'),
('mzquantum','mzquantum'),
('nf','nf'),
('nnoremap','nnoremap'),
('noacd','noacd'),
('noai','noai'),
('noakm','noakm'),
('noallowrevins','noallowrevins'),
('noaltkeymap','noaltkeymap'),
('noanti','noanti'),
('noantialias','noantialias'),
('noar','noar'),
('noarab','noarab'),
('noarabic','noarabic'),
('noarabicshape','noarabicshape'),
('noari','noari'),
('noarshape','noarshape'),
('noautochdir','noautochdir'),
('noautoindent','noautoindent'),
('noautoread','noautoread'),
('noautowrite','noautowrite'),
('noautowriteall','noautowriteall'),
('noaw','noaw'),
('noawa','noawa'),
('nobackup','nobackup'),
('noballooneval','noballooneval'),
('nobeval','nobeval'),
('nobin','nobin'),
('nobinary','nobinary'),
('nobiosk','nobiosk'),
('nobioskey','nobioskey'),
('nobk','nobk'),
('nobl','nobl'),
('nobomb','nobomb'),
('nobuflisted','nobuflisted'),
('nocf','nocf'),
('noci','noci'),
('nocin','nocin'),
('nocindent','nocindent'),
('nocompatible','nocompatible'),
('noconfirm','noconfirm'),
('noconsk','noconsk'),
('noconskey','noconskey'),
('nocopyindent','nocopyindent'),
('nocp','nocp'),
('nocrb','nocrb'),
('nocscoperelative','nocscoperelative'),
('nocscopetag','nocscopetag'),
('nocscopeverbose','nocscopeverbose'),
('nocsre','nocsre'),
('nocst','nocst'),
('nocsverb','nocsverb'),
('nocuc','nocuc'),
('nocul','nocul'),
('nocursorbind','nocursorbind'),
('nocursorcolumn','nocursorcolumn'),
('nocursorline','nocursorline'),
('nodeco','nodeco'),
('nodelcombine','nodelcombine'),
('nodg','nodg'),
('nodiff','nodiff'),
('nodigraph','nodigraph'),
('noea','noea'),
('noeb','noeb'),
('noed','noed'),
('noedcompatible','noedcompatible'),
('noek','noek'),
('noendofline','noendofline'),
('noeol','noeol'),
('noequalalways','noequalalways'),
('noerrorbells','noerrorbells'),
('noesckeys','noesckeys'),
('noet','noet'),
('noex','noex'),
('noexpandtab','noexpandtab'),
('noexrc','noexrc'),
('nofen','nofen'),
('nofic','nofic'),
('nofileignorecase','nofileignorecase'),
('nofk','nofk'),
('nofkmap','nofkmap'),
('nofoldenable','nofoldenable'),
('nogd','nogd'),
('nogdefault','nogdefault'),
('noguipty','noguipty'),
('nohid','nohid'),
('nohidden','nohidden'),
('nohk','nohk'),
('nohkmap','nohkmap'),
('nohkmapp','nohkmapp'),
('nohkp','nohkp'),
('nohls','nohls'),
('nohlsearch','nohlsearch'),
('noic','noic'),
('noicon','noicon'),
('noignorecase','noignorecase'),
('noim','noim'),
('noimc','noimc'),
('noimcmdline','noimcmdline'),
('noimd','noimd'),
('noimdisable','noimdisable'),
('noincsearch','noincsearch'),
('noinf','noinf'),
('noinfercase','noinfercase'),
('noinsertmode','noinsertmode'),
('nois','nois'),
('nojoinspaces','nojoinspaces'),
('nojs','nojs'),
('nolazyredraw','nolazyredraw'),
('nolbr','nolbr'),
('nolinebreak','nolinebreak'),
('nolisp','nolisp'),
('nolist','nolist'),
('noloadplugins','noloadplugins'),
('nolpl','nolpl'),
('nolz','nolz'),
('noma','noma'),
('nomacatsui','nomacatsui'),
('nomagic','nomagic'),
('nomh','nomh'),
('noml','noml'),
('nomod','nomod'),
('nomodeline','nomodeline'),
('nomodifiable','nomodifiable'),
('nomodified','nomodified'),
('nomore','nomore'),
('nomousef','nomousef'),
('nomousefocus','nomousefocus'),
('nomousehide','nomousehide'),
('nonu','nonu'),
('nonumber','nonumber'),
('noodev','noodev'),
('noopendevice','noopendevice'),
('nopaste','nopaste'),
('nopi','nopi'),
('nopreserveindent','nopreserveindent'),
('nopreviewwindow','nopreviewwindow'),
('noprompt','noprompt'),
('nopvw','nopvw'),
('noreadonly','noreadonly'),
('norelativenumber','norelativenumber'),
('noremap','noremap'),
('norestorescreen','norestorescreen'),
('norevins','norevins'),
('nori','nori'),
('norightleft','norightleft'),
('norl','norl'),
('nornu','nornu'),
('noro','noro'),
('nors','nors'),
('noru','noru'),
('noruler','noruler'),
('nosb','nosb'),
('nosc','nosc'),
('noscb','noscb'),
('noscrollbind','noscrollbind'),
('noscs','noscs'),
('nosecure','nosecure'),
('nosft','nosft'),
('noshellslash','noshellslash'),
('noshelltemp','noshelltemp'),
('noshiftround','noshiftround'),
('noshortname','noshortname'),
('noshowcmd','noshowcmd'),
('noshowfulltag','noshowfulltag'),
('noshowmatch','noshowmatch'),
('noshowmode','noshowmode'),
('nosi','nosi'),
('nosm','nosm'),
('nosmartcase','nosmartcase'),
('nosmartindent','nosmartindent'),
('nosmarttab','nosmarttab'),
('nosmd','nosmd'),
('nosn','nosn'),
('nosol','nosol'),
('nospell','nospell'),
('nosplitbelow','nosplitbelow'),
('nosplitright','nosplitright'),
('nospr','nospr'),
('nosr','nosr'),
('nossl','nossl'),
('nosta','nosta'),
('nostartofline','nostartofline'),
('nostmp','nostmp'),
('noswapfile','noswapfile'),
('noswf','noswf'),
('nota','nota'),
('notagbsearch','notagbsearch'),
('notagrelative','notagrelative'),
('notagstack','notagstack'),
('notbi','notbi'),
('notbidi','notbidi'),
('notbs','notbs'),
('notermbidi','notermbidi'),
('noterse','noterse'),
('notextauto','notextauto'),
('notextmode','notextmode'),
('notf','notf'),
('notgst','notgst'),
('notildeop','notildeop'),
('notimeout','notimeout'),
('notitle','notitle'),
('noto','noto'),
('notop','notop'),
('notr','notr'),
('nottimeout','nottimeout'),
('nottybuiltin','nottybuiltin'),
('nottyfast','nottyfast'),
('notx','notx'),
('noudf','noudf'),
('noundofile','noundofile'),
('novb','novb'),
('novisualbell','novisualbell'),
('nowa','nowa'),
('nowarn','nowarn'),
('nowb','nowb'),
('noweirdinvert','noweirdinvert'),
('nowfh','nowfh'),
('nowfw','nowfw'),
('nowic','nowic'),
('nowildignorecase','nowildignorecase'),
('nowildmenu','nowildmenu'),
('nowinfixheight','nowinfixheight'),
('nowinfixwidth','nowinfixwidth'),
('nowiv','nowiv'),
('nowmnu','nowmnu'),
('nowrap','nowrap'),
('nowrapscan','nowrapscan'),
('nowrite','nowrite'),
('nowriteany','nowriteany'),
('nowritebackup','nowritebackup'),
('nows','nows'),
('nrformats','nrformats'),
('nu','nu'),
('number','number'),
('numberwidth','numberwidth'),
('nuw','nuw'),
('odev','odev'),
('oft','oft'),
('ofu','ofu'),
('omnifunc','omnifunc'),
('opendevice','opendevice'),
('operatorfunc','operatorfunc'),
('opfunc','opfunc'),
('osfiletype','osfiletype'),
('pa','pa'),
('para','para'),
('paragraphs','paragraphs'),
('paste','paste'),
('pastetoggle','pastetoggle'),
('patchexpr','patchexpr'),
('patchmode','patchmode'),
('path','path'),
('pdev','pdev'),
('penc','penc'),
('pex','pex'),
('pexpr','pexpr'),
('pfn','pfn'),
('ph','ph'),
('pheader','pheader'),
('pi','pi'),
('pm','pm'),
('pmbcs','pmbcs'),
('pmbfn','pmbfn'),
('popt','popt'),
('preserveindent','preserveindent'),
('previewheight','previewheight'),
('previewwindow','previewwindow'),
('printdevice','printdevice'),
('printencoding','printencoding'),
('printexpr','printexpr'),
('printfont','printfont'),
('printheader','printheader'),
('printmbcharset','printmbcharset'),
('printmbfont','printmbfont'),
('printoptions','printoptions'),
('prompt','prompt'),
('pt','pt'),
('pumheight','pumheight'),
('pvh','pvh'),
('pvw','pvw'),
('qe','qe'),
('quoteescape','quoteescape'),
('rdt','rdt'),
('re','re'),
('readonly','readonly'),
('redrawtime','redrawtime'),
('regexpengine','regexpengine'),
('relativenumber','relativenumber'),
('remap','remap'),
('report','report'),
('restorescreen','restorescreen'),
('revins','revins'),
('ri','ri'),
('rightleft','rightleft'),
('rightleftcmd','rightleftcmd'),
('rl','rl'),
('rlc','rlc'),
('rnu','rnu'),
('ro','ro'),
('rs','rs'),
('rtp','rtp'),
('ru','ru'),
('ruf','ruf'),
('ruler','ruler'),
('rulerformat','rulerformat'),
('runtimepath','runtimepath'),
('sb','sb'),
('sbo','sbo'),
('sbr','sbr'),
('sc','sc'),
('scb','scb'),
('scr','scr'),
('scroll','scroll'),
('scrollbind','scrollbind'),
('scrolljump','scrolljump'),
('scrolloff','scrolloff'),
('scrollopt','scrollopt'),
('scs','scs'),
('sect','sect'),
('sections','sections'),
('secure','secure'),
('sel','sel'),
('selection','selection'),
('selectmode','selectmode'),
('sessionoptions','sessionoptions'),
('sft','sft'),
('sh','sh'),
('shcf','shcf'),
('shell','shell'),
('shellcmdflag','shellcmdflag'),
('shellpipe','shellpipe'),
('shellquote','shellquote'),
('shellredir','shellredir'),
('shellslash','shellslash'),
('shelltemp','shelltemp'),
('shelltype','shelltype'),
('shellxescape','shellxescape'),
('shellxquote','shellxquote'),
('shiftround','shiftround'),
('shiftwidth','shiftwidth'),
('shm','shm'),
('shortmess','shortmess'),
('shortname','shortname'),
('showbreak','showbreak'),
('showcmd','showcmd'),
('showfulltag','showfulltag'),
('showmatch','showmatch'),
('showmode','showmode'),
('showtabline','showtabline'),
('shq','shq'),
('si','si'),
('sidescroll','sidescroll'),
('sidescrolloff','sidescrolloff'),
('siso','siso'),
('sj','sj'),
('slm','slm'),
('sm','sm'),
('smartcase','smartcase'),
('smartindent','smartindent'),
('smarttab','smarttab'),
('smc','smc'),
('smd','smd'),
('sn','sn'),
('so','so'),
('softtabstop','softtabstop'),
('sol','sol'),
('sp','sp'),
('spc','spc'),
('spell','spell'),
('spellcapcheck','spellcapcheck'),
('spellfile','spellfile'),
('spelllang','spelllang'),
('spellsuggest','spellsuggest'),
('spf','spf'),
('spl','spl'),
('splitbelow','splitbelow'),
('splitright','splitright'),
('spr','spr'),
('sps','sps'),
('sr','sr'),
('srr','srr'),
('ss','ss'),
('ssl','ssl'),
('ssop','ssop'),
('st','st'),
('sta','sta'),
('stal','stal'),
('startofline','startofline'),
('statusline','statusline'),
('stl','stl'),
('stmp','stmp'),
('sts','sts'),
('su','su'),
('sua','sua'),
('suffixes','suffixes'),
('suffixesadd','suffixesadd'),
('sw','sw'),
('swapfile','swapfile'),
('swapsync','swapsync'),
('swb','swb'),
('swf','swf'),
('switchbuf','switchbuf'),
('sws','sws'),
('sxe','sxe'),
('sxq','sxq'),
('syn','syn'),
('synmaxcol','synmaxcol'),
('syntax','syntax'),
('t_AB','t_AB'),
('t_AF','t_AF'),
('t_AL','t_AL'),
('t_CS','t_CS'),
('t_CV','t_CV'),
('t_Ce','t_Ce'),
('t_Co','t_Co'),
('t_Cs','t_Cs'),
('t_DL','t_DL'),
('t_EI','t_EI'),
('t_F1','t_F1'),
('t_F2','t_F2'),
('t_F3','t_F3'),
('t_F4','t_F4'),
('t_F5','t_F5'),
('t_F6','t_F6'),
('t_F7','t_F7'),
('t_F8','t_F8'),
('t_F9','t_F9'),
('t_IE','t_IE'),
('t_IS','t_IS'),
('t_K1','t_K1'),
('t_K3','t_K3'),
('t_K4','t_K4'),
('t_K5','t_K5'),
('t_K6','t_K6'),
('t_K7','t_K7'),
('t_K8','t_K8'),
('t_K9','t_K9'),
('t_KA','t_KA'),
('t_KB','t_KB'),
('t_KC','t_KC'),
('t_KD','t_KD'),
('t_KE','t_KE'),
('t_KF','t_KF'),
('t_KG','t_KG'),
('t_KH','t_KH'),
('t_KI','t_KI'),
('t_KJ','t_KJ'),
('t_KK','t_KK'),
('t_KL','t_KL'),
('t_RI','t_RI'),
('t_RV','t_RV'),
('t_SI','t_SI'),
('t_Sb','t_Sb'),
('t_Sf','t_Sf'),
('t_WP','t_WP'),
('t_WS','t_WS'),
('t_ZH','t_ZH'),
('t_ZR','t_ZR'),
('t_al','t_al'),
('t_bc','t_bc'),
('t_cd','t_cd'),
('t_ce','t_ce'),
('t_cl','t_cl'),
('t_cm','t_cm'),
('t_cs','t_cs'),
('t_da','t_da'),
('t_db','t_db'),
('t_dl','t_dl'),
('t_fs','t_fs'),
('t_k1','t_k1'),
('t_k2','t_k2'),
('t_k3','t_k3'),
('t_k4','t_k4'),
('t_k5','t_k5'),
('t_k6','t_k6'),
('t_k7','t_k7'),
('t_k8','t_k8'),
('t_k9','t_k9'),
('t_kB','t_kB'),
('t_kD','t_kD'),
('t_kI','t_kI'),
('t_kN','t_kN'),
('t_kP','t_kP'),
('t_kb','t_kb'),
('t_kd','t_kd'),
('t_ke','t_ke'),
('t_kh','t_kh'),
('t_kl','t_kl'),
('t_kr','t_kr'),
('t_ks','t_ks'),
('t_ku','t_ku'),
('t_le','t_le'),
('t_mb','t_mb'),
('t_md','t_md'),
('t_me','t_me'),
('t_mr','t_mr'),
('t_ms','t_ms'),
('t_nd','t_nd'),
('t_op','t_op'),
('t_se','t_se'),
('t_so','t_so'),
('t_sr','t_sr'),
('t_te','t_te'),
('t_ti','t_ti'),
('t_ts','t_ts'),
('t_u7','t_u7'),
('t_ue','t_ue'),
('t_us','t_us'),
('t_ut','t_ut'),
('t_vb','t_vb'),
('t_ve','t_ve'),
('t_vi','t_vi'),
('t_vs','t_vs'),
('t_xs','t_xs'),
('ta','ta'),
('tabline','tabline'),
('tabpagemax','tabpagemax'),
('tabstop','tabstop'),
('tag','tag'),
('tagbsearch','tagbsearch'),
('taglength','taglength'),
('tagrelative','tagrelative'),
('tags','tags'),
('tagstack','tagstack'),
('tal','tal'),
('tb','tb'),
('tbi','tbi'),
('tbidi','tbidi'),
('tbis','tbis'),
('tbs','tbs'),
('tenc','tenc'),
('term','term'),
('termbidi','termbidi'),
('termencoding','termencoding'),
('terse','terse'),
('textauto','textauto'),
('textmode','textmode'),
('textwidth','textwidth'),
('tf','tf'),
('tgst','tgst'),
('thesaurus','thesaurus'),
('tildeop','tildeop'),
('timeout','timeout'),
('timeoutlen','timeoutlen'),
('title','title'),
('titlelen','titlelen'),
('titleold','titleold'),
('titlestring','titlestring'),
('tl','tl'),
('tm','tm'),
('to','to'),
('toolbar','toolbar'),
('toolbariconsize','toolbariconsize'),
('top','top'),
('tpm','tpm'),
('tr','tr'),
('ts','ts'),
('tsl','tsl'),
('tsr','tsr'),
('ttimeout','ttimeout'),
('ttimeoutlen','ttimeoutlen'),
('ttm','ttm'),
('tty','tty'),
('ttybuiltin','ttybuiltin'),
('ttyfast','ttyfast'),
('ttym','ttym'),
('ttymouse','ttymouse'),
('ttyscroll','ttyscroll'),
('ttytype','ttytype'),
('tw','tw'),
('tx','tx'),
('uc','uc'),
('udf','udf'),
('udir','udir'),
('ul','ul'),
('undodir','undodir'),
('undofile','undofile'),
('undolevels','undolevels'),
('undoreload','undoreload'),
('updatecount','updatecount'),
('updatetime','updatetime'),
('ur','ur'),
('ut','ut'),
('vb','vb'),
('vbs','vbs'),
('vdir','vdir'),
('ve','ve'),
('verbose','verbose'),
('verbosefile','verbosefile'),
('vfile','vfile'),
('vi','vi'),
('viewdir','viewdir'),
('viewoptions','viewoptions'),
('viminfo','viminfo'),
('virtualedit','virtualedit'),
('visualbell','visualbell'),
('vnoremap','vnoremap'),
('vop','vop'),
('wa','wa'),
('wak','wak'),
('warn','warn'),
('wb','wb'),
('wc','wc'),
('wcm','wcm'),
('wd','wd'),
('weirdinvert','weirdinvert'),
('wfh','wfh'),
('wfw','wfw'),
('wh','wh'),
('whichwrap','whichwrap'),
('wi','wi'),
('wic','wic'),
('wig','wig'),
('wildchar','wildchar'),
('wildcharm','wildcharm'),
('wildignore','wildignore'),
('wildignorecase','wildignorecase'),
('wildmenu','wildmenu'),
('wildmode','wildmode'),
('wildoptions','wildoptions'),
('wim','wim'),
('winaltkeys','winaltkeys'),
('window','window'),
('winfixheight','winfixheight'),
('winfixwidth','winfixwidth'),
('winheight','winheight'),
('winminheight','winminheight'),
('winminwidth','winminwidth'),
('winwidth','winwidth'),
('wiv','wiv'),
('wiw','wiw'),
('wm','wm'),
('wmh','wmh'),
('wmnu','wmnu'),
('wmw','wmw'),
('wop','wop'),
('wrap','wrap'),
('wrapmargin','wrapmargin'),
('wrapscan','wrapscan'),
('write','write'),
('writeany','writeany'),
('writebackup','writebackup'),
('writedelay','writedelay'),
('ws','ws'),
('ww','ww'),
)
return var
option = _getoption()
| apache-2.0 |
tudorvio/nova | nova/api/openstack/compute/schemas/v3/image_metadata.py | 95 | 1177 | # Copyright 2014 IBM Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'metadata': parameter_types.metadata
},
'required': ['metadata'],
'additionalProperties': False,
}
single_metadata = copy.deepcopy(parameter_types.metadata)
single_metadata.update({
'minProperties': 1,
'maxProperties': 1
})
update = {
'type': 'object',
'properties': {
'meta': single_metadata
},
'required': ['meta'],
'additionalProperties': False,
}
update_all = create
| apache-2.0 |
PHOTOX/fuase | ase/ase/dft/pars_beefvdw.py | 7 | 23666 | import numpy as np
"""
BEEF-vdW ensemble matrix
"""
uiOmega = np.array([
[ 9.238289896663336e-02 , 1.573812432079919e-01 , 1.029935738540308e-01 , 1.366003143143216e-02 , -2.170819634832974e-02 , -1.971473025898487e-03 , 6.694499988752175e-03 , -1.436837103528228e-03 , -1.894288263659829e-03 , 1.620730202731354e-03 , 3.342742083591797e-05 , -8.935288190655010e-04 , 5.660396510944252e-04 , 1.092640880494036e-04 , -3.909536572033999e-04 , 2.271387694573118e-04 , 4.720081507064245e-05 , -1.728805247746040e-04 , 1.161095890105822e-04 , 1.632569772443308e-05 , -9.505329207480296e-05 , 5.966357079138161e-05 , 3.909940118293563e-05 , -9.094078397503243e-05 , 3.979403197298154e-05 , 5.883724662690913e-05 , -8.868728142026543e-05 , 1.649195968392651e-05 , 3.986378541237102e-05 , -2.080734204109696e-05 , -5.210020320050114e-02 ],
[ 1.573812432080020e-01 , 3.194503568212250e-01 , 2.330350019456029e-01 , 3.539526885754365e-02 , -4.398162505429017e-02 , -7.870052015456349e-03 , 1.288386845762548e-02 , -1.452985165647521e-03 , -3.414852982913958e-03 , 2.242106483095301e-03 , 2.411666744826487e-04 , -1.065238741066354e-03 , 4.135880276069384e-04 , 2.536775346693924e-04 , -2.530397572915468e-04 , -5.690638693892032e-05 , 1.673844673999724e-04 , -9.944997873568069e-06 , -1.718953440120930e-04 , 1.760399953825598e-04 , -4.156338135631344e-06 , -1.832004402941794e-04 , 2.147464735651294e-04 , -6.193272093284920e-05 , -1.319710553323893e-04 , 1.948452573660156e-04 , -5.101630490846988e-05 , -9.176394513865211e-05 , 4.717722996545362e-05 , 7.111249931485782e-06 , -1.890906559696380e-02 ],
[ 1.029935738540465e-01 , 2.330350019456185e-01 , 1.906771663140688e-01 , 4.596131842244390e-02 , -2.792908137436464e-02 , -1.240232492150593e-02 , 5.672917933168648e-03 , 1.434385697982085e-03 , -9.455904542077782e-04 , 3.036359098459168e-05 , 1.161828188486106e-04 , 7.937359374341367e-05 , -1.452498186750268e-04 , 1.384058476815110e-05 , 1.530299855805981e-04 , -1.908370243275392e-04 , 5.614920168522352e-05 , 1.448595900033545e-04 , -2.366731351667913e-04 , 1.303628937641542e-04 , 8.403491035544659e-05 , -2.162539474930004e-04 , 1.579894933576654e-04 , 1.853443013110853e-05 , -1.453365923716440e-04 , 1.270119640983266e-04 , 1.393651877686879e-05 , -8.735349638010247e-05 , 1.562163815156337e-05 , 1.819382613180743e-05 , 1.382668594717776e-02 ],
[ 1.366003143144247e-02 , 3.539526885755911e-02 , 4.596131842245237e-02 , 3.412600355844948e-02 , 5.788002236623282e-03 , -9.314441356035262e-03 , -5.276305980529734e-03 , 2.351769282262449e-03 , 1.746899840570664e-03 , -1.053810170761046e-03 , -2.902616086744972e-04 , 5.752547360555607e-04 , -8.857003353891879e-05 , -2.395794347875841e-04 , 1.413569388536142e-04 , 5.605747482892052e-05 , -9.488998643296934e-05 , 2.026963310534137e-05 , 3.772638762355388e-05 , -4.067190865485931e-05 , 1.321492117521963e-05 , 1.940880629107831e-05 , -3.480998018498056e-05 , 1.778991053651829e-05 , 1.586887875776044e-05 , -3.017037178432038e-05 , 6.647594986708508e-06 , 1.545376441325688e-05 , -5.578313586587479e-06 , -2.498675358121092e-06 , -7.076421937394695e-03 ],
[ -2.170819634832771e-02 , -4.398162505428508e-02 , -2.792908137435959e-02 , 5.788002236625639e-03 , 1.599472206930952e-02 , 1.608917143245890e-03 , -5.597384471167169e-03 , -1.499164748509191e-03 , 1.031475806000458e-03 , 5.332996506181574e-04 , -2.489713532023827e-04 , -1.029965243518429e-04 , 1.699409468310518e-04 , -5.189717276078564e-05 , -6.126197146900113e-05 , 8.454620554637730e-05 , -2.898403340456230e-05 , -4.695866195676658e-05 , 7.705234549813160e-05 , -3.658438803802928e-05 , -3.319317982415972e-05 , 6.573717163798472e-05 , -3.698152620572900e-05 , -1.629294629181860e-05 , 4.241341573520274e-05 , -2.624727597577873e-05 , -1.229090821564833e-05 , 2.348090332681114e-05 , -2.215657597169080e-07 , -6.444872622959645e-06 , 7.322667111791249e-04 ],
[ -1.971473025900972e-03 , -7.870052015460869e-03 , -1.240232492150907e-02 , -9.314441356035836e-03 , 1.608917143246348e-03 , 7.634754660592785e-03 , 2.015667017611551e-03 , -3.623574339977459e-03 , -1.474755821692741e-03 , 1.127995802260326e-03 , 4.639737083120432e-04 , -4.567637545650261e-04 , -2.016876766012911e-05 , 2.508509815496272e-04 , -1.147671414054848e-04 , -7.415040892571524e-05 , 9.932046149486572e-05 , -1.325820303664777e-05 , -5.028147494244732e-05 , 4.435536803388949e-05 , -2.227553213442618e-06 , -3.139708798837062e-05 , 3.307650446358692e-05 , -6.558671845195734e-06 , -2.123041867524418e-05 , 2.397646436678162e-05 , 9.138618011606733e-07 , -1.527849014454442e-05 , 2.261408120954423e-06 , 3.617283769859004e-06 , 2.325697711871941e-03 ],
[ 6.694499988750638e-03 , 1.288386845762195e-02 , 5.672917933165492e-03 , -5.276305980530938e-03 , -5.597384471167074e-03 , 2.015667017611739e-03 , 4.377508336814056e-03 , 4.100359917331289e-04 , -1.876150671093797e-03 , -7.271917289430953e-04 , 4.632933527994722e-04 , 2.963398987389869e-04 , -1.506945170950558e-04 , -5.149346314745077e-05 , 9.215110292974351e-05 , -3.132804577761338e-05 , -2.100641270393858e-05 , 3.506730172274297e-05 , -2.465494126635098e-05 , 1.240900749825681e-06 , 2.076535734347166e-05 , -2.285062874633954e-05 , 4.208354769194986e-06 , 1.425348474305690e-05 , -1.526811061895161e-05 , 3.047660598079506e-06 , 9.299255727538788e-06 , -8.183025849838069e-06 , -2.016271133614633e-06 , 3.118202698102115e-06 , -1.983005807705875e-03 ],
[ -1.436837103527614e-03 , -1.452985165646303e-03 , 1.434385697983009e-03 , 2.351769282262657e-03 , -1.499164748509336e-03 , -3.623574339977513e-03 , 4.100359917331572e-04 , 3.388139698932502e-03 , 4.194131188659545e-04 , -1.640686728848097e-03 , -4.535159587025243e-04 , 5.155942974268080e-04 , 1.219637950738874e-04 , -1.881362361335498e-04 , 5.406677887798438e-05 , 6.730117550948196e-05 , -6.826604522477651e-05 , -7.600076704978491e-08 , 4.545041141091276e-05 , -3.434406804211548e-05 , -5.396753498031206e-06 , 3.160900890445868e-05 , -2.489184945477622e-05 , -2.480536094745677e-06 , 2.230938441981598e-05 , -1.767486060639981e-05 , -6.845063675872953e-06 , 1.581526117380142e-05 , 2.198506926484949e-07 , -4.837425950871762e-06 , -2.819410239268639e-05 ],
[ -1.894288263659430e-03 , -3.414852982912986e-03 , -9.455904542068480e-04 , 1.746899840571073e-03 , 1.031475806000471e-03 , -1.474755821692797e-03 , -1.876150671093806e-03 , 4.194131188659666e-04 , 2.016821929004358e-03 , 2.913183096117767e-04 , -1.031831612901280e-03 , -3.523961692265613e-04 , 3.020345263188065e-04 , 1.358462914820522e-04 , -1.115872186939481e-04 , 4.093795217439325e-06 , 4.590005891560275e-05 , -2.788695451888706e-05 , -4.445454868386084e-06 , 1.774618276396958e-05 , -1.122137909788981e-05 , -3.231227423595720e-06 , 1.210473810098234e-05 , -7.926468935313864e-06 , -3.432017428898823e-06 , 8.827938351713780e-06 , -2.192391060027345e-06 , -4.171466247118773e-06 , 1.331053824099077e-06 , 8.121122753847691e-07 , 1.468573793837378e-03 ],
[ 1.620730202730968e-03 , 2.242106483094428e-03 , 3.036359098381830e-05 , -1.053810170761330e-03 , 5.332996506181955e-04 , 1.127995802260379e-03 , -7.271917289430953e-04 , -1.640686728848104e-03 , 2.913183096117794e-04 , 1.618640260028683e-03 , 1.578833514403573e-04 , -8.684832913376226e-04 , -1.835212360942493e-04 , 2.681276727854413e-04 , 3.285354767345348e-05 , -7.506050741939204e-05 , 4.030911032027864e-05 , 1.270499721233960e-05 , -3.550009040339185e-05 , 2.093845130027192e-05 , 6.936412133339431e-06 , -2.092061019101916e-05 , 1.263627438389547e-05 , 5.132905197400893e-06 , -1.410173385828192e-05 , 8.068421998377687e-06 , 6.590533164499491e-06 , -9.628875957888051e-06 , -1.186884523575427e-06 , 3.379003341108947e-06 , -1.318935000558665e-03 ],
[ 3.342742083582248e-05 , 2.411666744824321e-04 , 1.161828188484188e-04 , -2.902616086745682e-04 , -2.489713532023758e-04 , 4.639737083120528e-04 , 4.632933527994702e-04 , -4.535159587025258e-04 , -1.031831612901280e-03 , 1.578833514403571e-04 , 1.126887798536041e-03 , 1.596306400901984e-04 , -6.262219982793480e-04 , -1.832949555936158e-04 , 2.062011811517906e-04 , 5.639579837834072e-05 , -7.429445085205222e-05 , 1.947674856272851e-05 , 2.925850101283131e-05 , -3.392404367734551e-05 , 7.606268115327377e-06 , 1.774935646371143e-05 , -2.076809415497982e-05 , 3.678275105655822e-06 , 1.351664987117452e-05 , -1.391917758734145e-05 , -3.264922954751679e-06 , 1.128720431864021e-05 , -1.552278484090616e-07 , -3.464691582178041e-06 , 2.259380952893320e-04 ],
[ -8.935288190652161e-04 , -1.065238741065750e-03 , 7.937359374391768e-05 , 5.752547360557256e-04 , -1.029965243518811e-04 , -4.567637545650542e-04 , 2.963398987389943e-04 , 5.155942974268113e-04 , -3.523961692265653e-04 , -8.684832913376213e-04 , 1.596306400901987e-04 , 9.274502975544414e-04 , 4.771446682359326e-05 , -5.007069662988802e-04 , -7.942270207742560e-05 , 1.322450571128168e-04 , 2.441262913064850e-05 , -2.756468125262591e-05 , 6.943645566973078e-06 , 1.041750480940249e-05 , -1.187862037244014e-05 , 1.702364109770825e-06 , 7.400825614557900e-06 , -6.767501859886680e-06 , -7.456805310854244e-07 , 5.695968329623519e-06 , -2.204234030240727e-06 , -2.458146094280224e-06 , 1.077364537604088e-06 , 4.312391512705764e-07 , 5.884326361165565e-04 ],
[ 5.660396510942980e-04 , 4.135880276066762e-04 , -1.452498186752349e-04 , -8.857003353897563e-05 , 1.699409468310743e-04 , -2.016876766011903e-05 , -1.506945170950608e-04 , 1.219637950738874e-04 , 3.020345263188087e-04 , -1.835212360942504e-04 , -6.262219982793482e-04 , 4.771446682359360e-05 , 7.353511125371758e-04 , 8.054171359132859e-05 , -4.354044149858314e-04 , -6.575758219487838e-05 , 1.322779340443631e-04 , 4.893233447412187e-06 , -2.860359932846397e-05 , 1.985815168274937e-05 , 1.407122212777636e-06 , -1.355631776270834e-05 , 9.804336837952511e-06 , 1.705077595669618e-06 , -8.448838581047592e-06 , 5.271239541237292e-06 , 3.753161433794400e-06 , -5.679341230392703e-06 , -7.297839478992945e-07 , 1.996414791054073e-06 , -5.689656491774725e-04 ],
[ 1.092640880493588e-04 , 2.536775346692864e-04 , 1.384058476804722e-05 , -2.395794347876363e-04 , -5.189717276079290e-05 , 2.508509815496312e-04 , -5.149346314745000e-05 , -1.881362361335514e-04 , 1.358462914820523e-04 , 2.681276727854418e-04 , -1.832949555936157e-04 , -5.007069662988805e-04 , 8.054171359132875e-05 , 5.670985721529502e-04 , 4.105350281394086e-05 , -3.243779076268346e-04 , -5.693079967475888e-05 , 9.476238507687856e-05 , 1.671992883730651e-05 , -2.625490072653236e-05 , 1.094711235171939e-05 , 8.092095182176009e-06 , -1.368592923368957e-05 , 4.725521343618847e-06 , 6.462723202671019e-06 , -8.176454311340966e-06 , -1.037965911726869e-06 , 5.963104944027835e-06 , -2.287646204875769e-07 , -1.804397982061943e-06 , 6.675499678278738e-05 ],
[ -3.909536572033257e-04 , -2.530397572913827e-04 , 1.530299855807417e-04 , 1.413569388536693e-04 , -6.126197146900289e-05 , -1.147671414054899e-04 , 9.215110292974495e-05 , 5.406677887798494e-05 , -1.115872186939490e-04 , 3.285354767345385e-05 , 2.062011811517907e-04 , -7.942270207742549e-05 , -4.354044149858315e-04 , 4.105350281394089e-05 , 5.023053531078210e-04 , 1.395753202566780e-05 , -2.794248341066854e-04 , -2.462616877967573e-05 , 7.014950575686348e-05 , 7.678983396148418e-06 , -1.200073137869544e-05 , 4.735853628377502e-06 , 3.823008200476699e-06 , -5.632608045337210e-06 , 1.401726052082347e-06 , 2.631914429094741e-06 , -1.879900165857796e-06 , -6.802392260490853e-07 , 6.412891565621652e-07 , 5.793723170821993e-08 , 2.979440856739876e-04 ],
[ 2.271387694572524e-04 , -5.690638693903491e-05 , -1.908370243276230e-04 , 5.605747482890452e-05 , 8.454620554639201e-05 , -7.415040892571150e-05 , -3.132804577761707e-05 , 6.730117550948228e-05 , 4.093795217440853e-06 , -7.506050741939299e-05 , 5.639579837834042e-05 , 1.322450571128173e-04 , -6.575758219487839e-05 , -3.243779076268348e-04 , 1.395753202566789e-05 , 4.086277915281374e-04 , 2.438181614175771e-05 , -2.406201469878970e-04 , -2.063418073175250e-05 , 6.468348516289834e-05 , 1.651842998945461e-06 , -1.016330205472771e-05 , 7.380837404491689e-06 , 7.876901704903023e-07 , -5.693055610174383e-06 , 3.898194171094561e-06 , 1.890193310260514e-06 , -3.494268997347222e-06 , -2.097250054628417e-07 , 1.107934512468949e-06 , -2.578053969849174e-04 ],
[ 4.720081507065945e-05 , 1.673844673999971e-04 , 5.614920168523253e-05 , -9.488998643297809e-05 , -2.898403340457248e-05 , 9.932046149486507e-05 , -2.100641270393638e-05 , -6.826604522477717e-05 , 4.590005891560220e-05 , 4.030911032027912e-05 , -7.429445085205212e-05 , 2.441262913064812e-05 , 1.322779340443633e-04 , -5.693079967475883e-05 , -2.794248341066855e-04 , 2.438181614175779e-05 , 3.367003211899217e-04 , 1.421493027932063e-05 , -1.961053122230117e-04 , -1.831760815509797e-05 , 5.249705849097755e-05 , 4.009767661794436e-06 , -9.222615132968448e-06 , 4.447935971545765e-06 , 2.844605015203588e-06 , -4.927439995523699e-06 , 2.779858179450743e-07 , 2.890920446156232e-06 , -3.536840533005166e-07 , -7.989052895188473e-07 , -2.873774500946350e-05 ],
[ -1.728805247745767e-04 , -9.944997873510153e-06 , 1.448595900034050e-04 , 2.026963310536173e-05 , -4.695866195676680e-05 , -1.325820303664937e-05 , 3.506730172274367e-05 , -7.600076704937241e-08 , -2.788695451888763e-05 , 1.270499721233979e-05 , 1.947674856272868e-05 , -2.756468125262590e-05 , 4.893233447412072e-06 , 9.476238507687867e-05 , -2.462616877967574e-05 , -2.406201469878971e-04 , 1.421493027932067e-05 , 2.919803798609199e-04 , 7.292181033176667e-06 , -1.680274842794751e-04 , -1.103641130738799e-05 , 4.275283346882578e-05 , 1.839573029824585e-06 , -5.092906646915116e-06 , 2.996296133918005e-06 , 5.026786485483826e-07 , -1.803524706078249e-06 , 7.612853881615933e-07 , 3.175194859018497e-07 , -2.524196216716103e-07 , 2.671139718648832e-04 ],
[ 1.161095890105204e-04 , -1.718953440122134e-04 , -2.366731351668826e-04 , 3.772638762353110e-05 , 7.705234549814230e-05 , -5.028147494244480e-05 , -2.465494126635465e-05 , 4.545041141091324e-05 , -4.445454868384867e-06 , -3.550009040339265e-05 , 2.925850101283112e-05 , 6.943645566973460e-06 , -2.860359932846412e-05 , 1.671992883730641e-05 , 7.014950575686358e-05 , -2.063418073175254e-05 , -1.961053122230117e-04 , 7.292181033176704e-06 , 2.476672606367232e-04 , 8.122604369362667e-06 , -1.452133704846186e-04 , -9.497391478575562e-06 , 3.809665940899583e-05 , 1.059672833862896e-06 , -5.566702444135148e-06 , 4.241342392780321e-06 , 1.125163314158913e-06 , -3.300826353062116e-06 , 2.381295916739009e-07 , 8.492464195141368e-07 , -2.789569803656198e-04 ],
[ 1.632569772446249e-05 , 1.760399953826087e-04 , 1.303628937641828e-04 , -4.067190865486029e-05 , -3.658438803803874e-05 , 4.435536803388934e-05 , 1.240900749828609e-06 , -3.434406804211623e-05 , 1.774618276396873e-05 , 2.093845130027264e-05 , -3.392404367734537e-05 , 1.041750480940207e-05 , 1.985815168274956e-05 , -2.625490072653231e-05 , 7.678983396148288e-06 , 6.468348516289841e-05 , -1.831760815509795e-05 , -1.680274842794751e-04 , 8.122604369362710e-06 , 2.112966630126243e-04 , 5.363176092207731e-06 , -1.235778898069599e-04 , -7.709953870959738e-06 , 3.098655427549614e-05 , 2.634638058314591e-06 , -4.584365006125596e-06 , 7.784307399132289e-07 , 2.345452381285535e-06 , -6.188482408032955e-07 , -4.998403651495349e-07 , 8.079312086264899e-05 ],
[ -9.505329207477657e-05 , -4.156338135574478e-06 , 8.403491035549607e-05 , 1.321492117523870e-05 , -3.319317982416059e-05 , -2.227553213444590e-06 , 2.076535734347213e-05 , -5.396753498031014e-06 , -1.122137909789006e-05 , 6.936412133339521e-06 , 7.606268115327406e-06 , -1.187862037244012e-05 , 1.407122212777626e-06 , 1.094711235171940e-05 , -1.200073137869545e-05 , 1.651842998945439e-06 , 5.249705849097757e-05 , -1.103641130738799e-05 , -1.452133704846186e-04 , 5.363176092207760e-06 , 1.841513653060571e-04 , 4.008684964031859e-06 , -1.088327175419565e-04 , -4.436272922923257e-06 , 2.663616882515994e-05 , 4.441129647729434e-07 , -1.823900470977472e-06 , 9.131027910925659e-07 , 3.423181895869568e-07 , -3.248030257457939e-07 , 1.565114731653676e-04 ],
[ 5.966357079134110e-05 , -1.832004402942522e-04 , -2.162539474930512e-04 , 1.940880629106866e-05 , 6.573717163799288e-05 , -3.139708798836991e-05 , -2.285062874634257e-05 , 3.160900890445919e-05 , -3.231227423594649e-06 , -2.092061019101990e-05 , 1.774935646371122e-05 , 1.702364109771204e-06 , -1.355631776270847e-05 , 8.092095182175919e-06 , 4.735853628377626e-06 , -1.016330205472776e-05 , 4.009767661794407e-06 , 4.275283346882582e-05 , -9.497391478575592e-06 , -1.235778898069599e-04 , 4.008684964031889e-06 , 1.585945240480566e-04 , 4.814276592252276e-06 , -9.505942249560426e-05 , -5.269885642910686e-06 , 2.508762233822088e-05 , 1.002347324957512e-06 , -3.233685256439425e-06 , 3.615248228908033e-07 , 7.731232588721100e-07 , -2.364008973553363e-04 ],
[ 3.909940118295615e-05 , 2.147464735651595e-04 , 1.579894933576790e-04 , -3.480998018498535e-05 , -3.698152620573602e-05 , 3.307650446358831e-05 , 4.208354769197900e-06 , -2.489184945477703e-05 , 1.210473810098150e-05 , 1.263627438389614e-05 , -2.076809415497966e-05 , 7.400825614557483e-06 , 9.804336837952683e-06 , -1.368592923368950e-05 , 3.823008200476585e-06 , 7.380837404491765e-06 , -9.222615132968445e-06 , 1.839573029824542e-06 , 3.809665940899589e-05 , -7.709953870959746e-06 , -1.088327175419565e-04 , 4.814276592252303e-06 , 1.387884209137800e-04 , 2.113244593212237e-06 , -8.153912579909763e-05 , -4.652337820383065e-06 , 1.937304772679640e-05 , 2.478096542996087e-06 , -8.169606503678209e-07 , -4.287488876009555e-07 , 1.035998031439656e-04 ],
[ -9.094078397502061e-05 , -6.193272093282151e-05 , 1.853443013113500e-05 , 1.778991053653038e-05 , -1.629294629181825e-05 , -6.558671845197636e-06 , 1.425348474305646e-05 , -2.480536094745301e-06 , -7.926468935313898e-06 , 5.132905197400817e-06 , 3.678275105655839e-06 , -6.767501859886567e-06 , 1.705077595669545e-06 , 4.725521343618848e-06 , -5.632608045337194e-06 , 7.876901704902667e-07 , 4.447935971545785e-06 , -5.092906646915108e-06 , 1.059672833862867e-06 , 3.098655427549616e-05 , -4.436272922923254e-06 , -9.505942249560430e-05 , 2.113244593212259e-06 , 1.241068277448159e-04 , 1.324825159079387e-06 , -7.356715084057034e-05 , -1.785631352650215e-06 , 1.695100826863567e-05 , 5.774682432637083e-07 , -3.303613432465353e-07 , 9.651449332646128e-05 ],
[ 3.979403197295345e-05 , -1.319710553324410e-04 , -1.453365923716808e-04 , 1.586887875775279e-05 , 4.241341573520792e-05 , -2.123041867524383e-05 , -1.526811061895372e-05 , 2.230938441981634e-05 , -3.432017428898139e-06 , -1.410173385828241e-05 , 1.351664987117440e-05 , -7.456805310851761e-07 , -8.448838581047687e-06 , 6.462723202670970e-06 , 1.401726052082422e-06 , -5.693055610174417e-06 , 2.844605015203572e-06 , 2.996296133918029e-06 , -5.566702444135167e-06 , 2.634638058314581e-06 , 2.663616882515997e-05 , -5.269885642910686e-06 , -8.153912579909767e-05 , 1.324825159079404e-06 , 1.082133675166925e-04 , 2.990415878922840e-06 , -6.513246311773947e-05 , -2.759724213714544e-06 , 1.484095638923724e-05 , 7.424809301046746e-07 , -1.617594954504215e-04 ],
[ 5.883724662691994e-05 , 1.948452573660281e-04 , 1.270119640983281e-04 , -3.017037178432670e-05 , -2.624727597578309e-05 , 2.397646436678337e-05 , 3.047660598081647e-06 , -1.767486060640050e-05 , 8.827938351713212e-06 , 8.068421998378197e-06 , -1.391917758734134e-05 , 5.695968329623178e-06 , 5.271239541237441e-06 , -8.176454311340913e-06 , 2.631914429094653e-06 , 3.898194171094623e-06 , -4.927439995523706e-06 , 5.026786485483527e-07 , 4.241342392780371e-06 , -4.584365006125614e-06 , 4.441129647729196e-07 , 2.508762233822091e-05 , -4.652337820383076e-06 , -7.356715084057034e-05 , 2.990415878922861e-06 , 9.541694080046339e-05 , 5.311088722428387e-07 , -5.655395254747548e-05 , -7.544356044794082e-07 , 1.269980847624510e-05 , 4.696018935268347e-05 ],
[ -8.868728142024831e-05 , -5.101630490843126e-05 , 1.393651877690296e-05 , 6.647594986721235e-06 , -1.229090821564965e-05 , 9.138618011586676e-07 , 9.299255727538887e-06 , -6.845063675872692e-06 , -2.192391060027468e-06 , 6.590533164499501e-06 , -3.264922954751675e-06 , -2.204234030240666e-06 , 3.753161433794360e-06 , -1.037965911726866e-06 , -1.879900165857787e-06 , 1.890193310260486e-06 , 2.779858179450956e-07 , -1.803524706078243e-06 , 1.125163314158881e-06 , 7.784307399132557e-07 , -1.823900470977467e-06 , 1.002347324957483e-06 , 1.937304772679643e-05 , -1.785631352650217e-06 , -6.513246311773947e-05 , 5.311088722428587e-07 , 7.440208775369848e-05 , 7.311641032314037e-07 , -2.774078047441206e-05 , -4.408828958294675e-07 , 1.075017250578020e-04 ],
[ 1.649195968391140e-05 , -9.176394513867907e-05 , -8.735349638012086e-05 , 1.545376441325374e-05 , 2.348090332681419e-05 , -1.527849014454438e-05 , -8.183025849839297e-06 , 1.581526117380169e-05 , -4.171466247118380e-06 , -9.628875957888362e-06 , 1.128720431864013e-05 , -2.458146094280058e-06 , -5.679341230392763e-06 , 5.963104944027804e-06 , -6.802392260490372e-07 , -3.494268997347246e-06 , 2.890920446156225e-06 , 7.612853881616096e-07 , -3.300826353062134e-06 , 2.345452381285531e-06 , 9.131027910925789e-07 , -3.233685256439427e-06 , 2.478096542996079e-06 , 1.695100826863569e-05 , -2.759724213714545e-06 , -5.655395254747549e-05 , 7.311641032314153e-07 , 6.559666484932615e-05 , 1.240877065411180e-07 , -2.470688255280269e-05 , -9.189338863514660e-05 ],
[ 3.986378541236639e-05 , 4.717722996544147e-05 , 1.562163815155139e-05 , -5.578313586592747e-06 , -2.215657597169136e-07 , 2.261408120955417e-06 , -2.016271133614381e-06 , 2.198506926483088e-07 , 1.331053824099042e-06 , -1.186884523575363e-06 , -1.552278484090472e-07 , 1.077364537604021e-06 , -7.297839478992591e-07 , -2.287646204875707e-07 , 6.412891565621495e-07 , -2.097250054628229e-07 , -3.536840533005254e-07 , 3.175194859018434e-07 , 2.381295916739206e-07 , -6.188482408033085e-07 , 3.423181895869513e-07 , 3.615248228908187e-07 , -8.169606503678325e-07 , 5.774682432637071e-07 , 1.484095638923725e-05 , -7.544356044794156e-07 , -2.774078047441205e-05 , 1.240877065411238e-07 , 1.330905767924987e-05 , 8.884104622005010e-08 , -3.158609279173533e-05 ],
[ -2.080734204109082e-05 , 7.111249931498269e-06 , 1.819382613181743e-05 , -2.498675358118083e-06 , -6.444872622960494e-06 , 3.617283769858598e-06 , 3.118202698102355e-06 , -4.837425950871769e-06 , 8.121122753846729e-07 , 3.379003341109011e-06 , -3.464691582178025e-06 , 4.312391512705559e-07 , 1.996414791054076e-06 , -1.804397982061937e-06 , 5.793723170821257e-08 , 1.107934512468949e-06 , -7.989052895188420e-07 , -2.524196216716127e-07 , 8.492464195141338e-07 , -4.998403651495291e-07 , -3.248030257457955e-07 , 7.731232588721048e-07 , -4.287488876009484e-07 , -3.303613432465375e-07 , 7.424809301046709e-07 , 1.269980847624510e-05 , -4.408828958294696e-07 , -2.470688255280269e-05 , 8.884104622005171e-08 , 1.197542910948322e-05 , 3.878501241188344e-05 ],
[ -5.210020320049051e-02 , -1.890906559693971e-02 , 1.382668594719924e-02 , -7.076421937386331e-03 , 7.322667111787697e-04 , 2.325697711870943e-03 , -1.983005807705755e-03 , -2.819410239254837e-05 , 1.468573793837301e-03 , -1.318935000558654e-03 , 2.259380952893342e-04 , 5.884326361165944e-04 , -5.689656491774901e-04 , 6.675499678278620e-05 , 2.979440856739906e-04 , -2.578053969849344e-04 , -2.873774500945195e-05 , 2.671139718648887e-04 , -2.789569803656384e-04 , 8.079312086266559e-05 , 1.565114731653709e-04 , -2.364008973553556e-04 , 1.035998031439817e-04 , 9.651449332646111e-05 , -1.617594954504337e-04 , 4.696018935269557e-05 , 1.075017250578020e-04 , -9.189338863515410e-05 , -3.158609279173351e-05 , 3.878501241188487e-05 , 2.121632678397157e-01 ]])
| gpl-2.0 |
Asquera/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/packaging/msi.py | 61 | 20261 | """SCons.Tool.packaging.msi
The msi packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/msi.py 5134 2010/08/16 23:02:40 bdeegan"
import os
import SCons
from SCons.Action import Action
from SCons.Builder import Builder
from xml.dom.minidom import *
from xml.sax.saxutils import escape
from SCons.Tool.packaging import stripinstallbuilder
#
# Utility functions
#
def convert_to_id(s, id_set):
""" Some parts of .wxs need an Id attribute (for example: The File and
Directory directives. The charset is limited to A-Z, a-z, digits,
underscores, periods. Each Id must begin with a letter or with a
underscore. Google for "CNDL0015" for information about this.
Requirements:
* the string created must only contain chars from the target charset.
* the string created must have a minimal editing distance from the
original string.
* the string created must be unique for the whole .wxs file.
Observation:
* There are 62 chars in the charset.
Idea:
* filter out forbidden characters. Check for a collision with the help
of the id_set. Add the number of the number of the collision at the
end of the created string. Furthermore care for a correct start of
the string.
"""
charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'
if s[0] in '0123456789.':
s += '_'+s
id = [c for c in s if c in charset]
# did we already generate an id for this file?
try:
return id_set[id][s]
except KeyError:
# no we did not so initialize with the id
if id not in id_set: id_set[id] = { s : id }
# there is a collision, generate an id which is unique by appending
# the collision number
else: id_set[id][s] = id + str(len(id_set[id]))
return id_set[id][s]
def is_dos_short_file_name(file):
""" examine if the given file is in the 8.3 form.
"""
fname, ext = os.path.splitext(file)
proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot
proper_fname = file.isupper() and len(fname) <= 8
return proper_ext and proper_fname
def gen_dos_short_file_name(file, filename_set):
""" see http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982
These are no complete 8.3 dos short names. The ~ char is missing and
replaced with one character from the filename. WiX warns about such
filenames, since a collision might occur. Google for "CNDL1014" for
more information.
"""
# guard this to not confuse the generation
if is_dos_short_file_name(file):
return file
fname, ext = os.path.splitext(file) # ext contains the dot
# first try if it suffices to convert to upper
file = file.upper()
if is_dos_short_file_name(file):
return file
# strip forbidden characters.
forbidden = '."/[]:;=, '
fname = [c for c in fname if c not in forbidden]
# check if we already generated a filename with the same number:
# thisis1.txt, thisis2.txt etc.
duplicate, num = not None, 1
while duplicate:
shortname = "%s%s" % (fname[:8-len(str(num))].upper(),\
str(num))
if len(ext) >= 2:
shortname = "%s%s" % (shortname, ext[:4].upper())
duplicate, num = shortname in filename_set, num+1
assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file)
filename_set.append(shortname)
return shortname
def create_feature_dict(files):
""" X_MSI_FEATURE and doc FileTag's can be used to collect files in a
hierarchy. This function collects the files into this hierarchy.
"""
dict = {}
def add_to_dict( feature, file ):
if not SCons.Util.is_List( feature ):
feature = [ feature ]
for f in feature:
if f not in dict:
dict[ f ] = [ file ]
else:
dict[ f ].append( file )
for file in files:
if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):
add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)
elif hasattr( file, 'PACKAGING_DOC' ):
add_to_dict( 'PACKAGING_DOC', file )
else:
add_to_dict( 'default', file )
return dict
def generate_guids(root):
""" generates globally unique identifiers for parts of the xml which need
them.
Component tags have a special requirement. Their UUID is only allowed to
change if the list of their contained resources has changed. This allows
for clean removal and proper updates.
To handle this requirement, the uuid is generated with an md5 hashing the
whole subtree of a xml node.
"""
from hashlib import md5
# specify which tags need a guid and in which attribute this should be stored.
needs_id = { 'Product' : 'Id',
'Package' : 'Id',
'Component' : 'Guid',
}
# find all XMl nodes matching the key, retrieve their attribute, hash their
# subtree, convert hash to string and add as a attribute to the xml node.
for (key,value) in needs_id.items():
node_list = root.getElementsByTagName(key)
attribute = value
for node in node_list:
hash = md5(node.toxml()).hexdigest()
hash_str = '%s-%s-%s-%s-%s' % ( hash[:8], hash[8:12], hash[12:16], hash[16:20], hash[20:] )
node.attributes[attribute] = hash_str
def string_wxsfile(target, source, env):
return "building WiX file %s"%( target[0].path )
def build_wxsfile(target, source, env):
""" compiles a .wxs file from the keywords given in env['msi_spec'] and
by analyzing the tree of source nodes and their tags.
"""
file = open(target[0].abspath, 'w')
try:
# Create a document with the Wix root tag
doc = Document()
root = doc.createElement( 'Wix' )
root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi'
doc.appendChild( root )
filename_set = [] # this is to circumvent duplicates in the shortnames
id_set = {} # this is to circumvent duplicates in the ids
# Create the content
build_wxsfile_header_section(root, env)
build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set)
generate_guids(root)
build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set)
build_wxsfile_default_gui(root)
build_license_file(target[0].get_dir(), env)
# write the xml to a file
file.write( doc.toprettyxml() )
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] )
#
# setup function
#
def create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set):
""" Create the wix default target directory layout and return the innermost
directory.
We assume that the XML tree delivered in the root argument already contains
the Product tag.
Everything is put under the PFiles directory property defined by WiX.
After that a directory with the 'VENDOR' tag is placed and then a
directory with the name of the project and its VERSION. This leads to the
following TARGET Directory Layout:
C:\<PFiles>\<Vendor>\<Projectname-Version>\
Example: C:\Programme\Company\Product-1.2\
"""
doc = Document()
d1 = doc.createElement( 'Directory' )
d1.attributes['Id'] = 'TARGETDIR'
d1.attributes['Name'] = 'SourceDir'
d2 = doc.createElement( 'Directory' )
d2.attributes['Id'] = 'ProgramFilesFolder'
d2.attributes['Name'] = 'PFiles'
d3 = doc.createElement( 'Directory' )
d3.attributes['Id'] = 'VENDOR_folder'
d3.attributes['Name'] = escape( gen_dos_short_file_name( VENDOR, filename_set ) )
d3.attributes['LongName'] = escape( VENDOR )
d4 = doc.createElement( 'Directory' )
project_folder = "%s-%s" % ( NAME, VERSION )
d4.attributes['Id'] = 'MY_DEFAULT_FOLDER'
d4.attributes['Name'] = escape( gen_dos_short_file_name( project_folder, filename_set ) )
d4.attributes['LongName'] = escape( project_folder )
d1.childNodes.append( d2 )
d2.childNodes.append( d3 )
d3.childNodes.append( d4 )
root.getElementsByTagName('Product')[0].childNodes.append( d1 )
return d4
#
# mandatory and optional file tags
#
def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set):
""" builds the Component sections of the wxs file with their included files.
Files need to be specified in 8.3 format and in the long name format, long
filenames will be converted automatically.
Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag.
"""
root = create_default_directory_layout( root, NAME, VERSION, VENDOR, filename_set )
components = create_feature_dict( files )
factory = Document()
def get_directory( node, dir ):
""" returns the node under the given node representing the directory.
Returns the component node if dir is None or empty.
"""
if dir == '' or not dir:
return node
Directory = node
dir_parts = dir.split(os.path.sep)
# to make sure that our directory ids are unique, the parent folders are
# consecutively added to upper_dir
upper_dir = ''
# walk down the xml tree finding parts of the directory
dir_parts = [d for d in dir_parts if d != '']
for d in dir_parts[:]:
already_created = [c for c in Directory.childNodes
if c.nodeName == 'Directory'
and c.attributes['LongName'].value == escape(d)]
if already_created != []:
Directory = already_created[0]
dir_parts.remove(d)
upper_dir += d
else:
break
for d in dir_parts:
nDirectory = factory.createElement( 'Directory' )
nDirectory.attributes['LongName'] = escape( d )
nDirectory.attributes['Name'] = escape( gen_dos_short_file_name( d, filename_set ) )
upper_dir += d
nDirectory.attributes['Id'] = convert_to_id( upper_dir, id_set )
Directory.childNodes.append( nDirectory )
Directory = nDirectory
return Directory
for file in files:
drive, path = os.path.splitdrive( file.PACKAGING_INSTALL_LOCATION )
filename = os.path.basename( path )
dirname = os.path.dirname( path )
h = {
# tagname : default value
'PACKAGING_X_MSI_VITAL' : 'yes',
'PACKAGING_X_MSI_FILEID' : convert_to_id(filename, id_set),
'PACKAGING_X_MSI_LONGNAME' : filename,
'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name(filename, filename_set),
'PACKAGING_X_MSI_SOURCE' : file.get_path(),
}
# fill in the default tags given above.
for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]:
setattr( file, k, v )
File = factory.createElement( 'File' )
File.attributes['LongName'] = escape( file.PACKAGING_X_MSI_LONGNAME )
File.attributes['Name'] = escape( file.PACKAGING_X_MSI_SHORTNAME )
File.attributes['Source'] = escape( file.PACKAGING_X_MSI_SOURCE )
File.attributes['Id'] = escape( file.PACKAGING_X_MSI_FILEID )
File.attributes['Vital'] = escape( file.PACKAGING_X_MSI_VITAL )
# create the <Component> Tag under which this file should appear
Component = factory.createElement('Component')
Component.attributes['DiskId'] = '1'
Component.attributes['Id'] = convert_to_id( filename, id_set )
# hang the component node under the root node and the file node
# under the component node.
Directory = get_directory( root, dirname )
Directory.childNodes.append( Component )
Component.childNodes.append( File )
#
# additional functions
#
def build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set):
""" This function creates the <features> tag based on the supplied xml tree.
This is achieved by finding all <component>s and adding them to a default target.
It should be called after the tree has been built completly. We assume
that a MY_DEFAULT_FOLDER Property is defined in the wxs file tree.
Furthermore a top-level with the name and VERSION of the software will be created.
An PACKAGING_X_MSI_FEATURE can either be a string, where the feature
DESCRIPTION will be the same as its title or a Tuple, where the first
part will be its title and the second its DESCRIPTION.
"""
factory = Document()
Feature = factory.createElement('Feature')
Feature.attributes['Id'] = 'complete'
Feature.attributes['ConfigurableDirectory'] = 'MY_DEFAULT_FOLDER'
Feature.attributes['Level'] = '1'
Feature.attributes['Title'] = escape( '%s %s' % (NAME, VERSION) )
Feature.attributes['Description'] = escape( SUMMARY )
Feature.attributes['Display'] = 'expand'
for (feature, files) in create_feature_dict(files).items():
SubFeature = factory.createElement('Feature')
SubFeature.attributes['Level'] = '1'
if SCons.Util.is_Tuple(feature):
SubFeature.attributes['Id'] = convert_to_id( feature[0], id_set )
SubFeature.attributes['Title'] = escape(feature[0])
SubFeature.attributes['Description'] = escape(feature[1])
else:
SubFeature.attributes['Id'] = convert_to_id( feature, id_set )
if feature=='default':
SubFeature.attributes['Description'] = 'Main Part'
SubFeature.attributes['Title'] = 'Main Part'
elif feature=='PACKAGING_DOC':
SubFeature.attributes['Description'] = 'Documentation'
SubFeature.attributes['Title'] = 'Documentation'
else:
SubFeature.attributes['Description'] = escape(feature)
SubFeature.attributes['Title'] = escape(feature)
# build the componentrefs. As one of the design decision is that every
# file is also a component we walk the list of files and create a
# reference.
for f in files:
ComponentRef = factory.createElement('ComponentRef')
ComponentRef.attributes['Id'] = convert_to_id( os.path.basename(f.get_path()), id_set )
SubFeature.childNodes.append(ComponentRef)
Feature.childNodes.append(SubFeature)
root.getElementsByTagName('Product')[0].childNodes.append(Feature)
def build_wxsfile_default_gui(root):
""" this function adds a default GUI to the wxs file
"""
factory = Document()
Product = root.getElementsByTagName('Product')[0]
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_Mondo'
Product.childNodes.append(UIRef)
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_ErrorProgressText'
Product.childNodes.append(UIRef)
def build_license_file(directory, spec):
""" creates a License.rtf file with the content of "X_MSI_LICENSE_TEXT"
in the given directory
"""
name, text = '', ''
try:
name = spec['LICENSE']
text = spec['X_MSI_LICENSE_TEXT']
except KeyError:
pass # ignore this as X_MSI_LICENSE_TEXT is optional
if name!='' or text!='':
file = open( os.path.join(directory.get_path(), 'License.rtf'), 'w' )
file.write('{\\rtf')
if text!='':
file.write(text.replace('\n', '\\par '))
else:
file.write(name+'\\par\\par')
file.write('}')
file.close()
#
# mandatory and optional package tags
#
def build_wxsfile_header_section(root, spec):
""" Adds the xml file node which define the package meta-data.
"""
# Create the needed DOM nodes and add them at the correct position in the tree.
factory = Document()
Product = factory.createElement( 'Product' )
Package = factory.createElement( 'Package' )
root.childNodes.append( Product )
Product.childNodes.append( Package )
# set "mandatory" default values
if 'X_MSI_LANGUAGE' not in spec:
spec['X_MSI_LANGUAGE'] = '1033' # select english
# mandatory sections, will throw a KeyError if the tag is not available
Product.attributes['Name'] = escape( spec['NAME'] )
Product.attributes['Version'] = escape( spec['VERSION'] )
Product.attributes['Manufacturer'] = escape( spec['VENDOR'] )
Product.attributes['Language'] = escape( spec['X_MSI_LANGUAGE'] )
Package.attributes['Description'] = escape( spec['SUMMARY'] )
# now the optional tags, for which we avoid the KeyErrror exception
if 'DESCRIPTION' in spec:
Package.attributes['Comments'] = escape( spec['DESCRIPTION'] )
if 'X_MSI_UPGRADE_CODE' in spec:
Package.attributes['X_MSI_UPGRADE_CODE'] = escape( spec['X_MSI_UPGRADE_CODE'] )
# We hardcode the media tag as our current model cannot handle it.
Media = factory.createElement('Media')
Media.attributes['Id'] = '1'
Media.attributes['Cabinet'] = 'default.cab'
Media.attributes['EmbedCab'] = 'yes'
root.getElementsByTagName('Product')[0].childNodes.append(Media)
# this builder is the entry-point for .wxs file compiler.
wxs_builder = Builder(
action = Action( build_wxsfile, string_wxsfile ),
ensure_suffix = '.wxs' )
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
DESCRIPTION, SUMMARY, VENDOR, X_MSI_LANGUAGE, **kw):
# make sure that the Wix Builder is in the environment
SCons.Tool.Tool('wix').generate(env)
# get put the keywords for the specfile compiler. These are the arguments
# given to the package function and all optional ones stored in kw, minus
# the the source, target and env one.
loc = locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# strip the install builder from the source files
target, source = stripinstallbuilder(target, source, env)
# put the arguments into the env and call the specfile builder.
env['msi_spec'] = kw
specfile = wxs_builder(* [env, target, source], **kw)
# now call the WiX Tool with the built specfile added as a source.
msifile = env.WiX(target, specfile)
# return the target and source tuple.
return (msifile, source+[specfile])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
johnkit/vtk-dev | Filters/Hybrid/Testing/Python/TestGridWarp3D.py | 20 | 2154 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Image pipeline
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetDataOrigin(-100.8,-100.8,-69)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
reader.Update()
p1 = vtk.vtkPoints()
p2 = vtk.vtkPoints()
p1.InsertNextPoint(0,0,0)
p2.InsertNextPoint(-60,10,20)
p1.InsertNextPoint(-100,-100,-50)
p2.InsertNextPoint(-100,-100,-50)
p1.InsertNextPoint(-100,-100,50)
p2.InsertNextPoint(-100,-100,50)
p1.InsertNextPoint(-100,100,-50)
p2.InsertNextPoint(-100,100,-50)
p1.InsertNextPoint(-100,100,50)
p2.InsertNextPoint(-100,100,50)
p1.InsertNextPoint(100,-100,-50)
p2.InsertNextPoint(100,-100,-50)
p1.InsertNextPoint(100,-100,50)
p2.InsertNextPoint(100,-100,50)
p1.InsertNextPoint(100,100,-50)
p2.InsertNextPoint(100,100,-50)
p1.InsertNextPoint(100,100,50)
p2.InsertNextPoint(100,100,50)
transform = vtk.vtkThinPlateSplineTransform()
transform.SetSourceLandmarks(p1)
transform.SetTargetLandmarks(p2)
transform.SetBasisToR()
gridThinPlate = vtk.vtkTransformToGrid()
gridThinPlate.SetInput(transform)
gridThinPlate.SetGridExtent(0,64,0,64,0,50)
gridThinPlate.SetGridSpacing(3.2,3.2,3.0)
gridThinPlate.SetGridOrigin(-102.4,-102.4,-75)
gridThinPlate.SetGridScalarTypeToUnsignedChar()
gridThinPlate.Update()
gridTransform = vtk.vtkGridTransform()
gridTransform.SetDisplacementGridData(gridThinPlate.GetOutput())
gridTransform.SetDisplacementShift(gridThinPlate.GetDisplacementShift())
gridTransform.SetDisplacementScale(gridThinPlate.GetDisplacementScale())
reslice = vtk.vtkImageReslice()
reslice.SetInputConnection(reader.GetOutputPort())
reslice.SetResliceTransform(gridTransform)
reslice.SetInterpolationModeToLinear()
reslice.SetOutputSpacing(1,1,1)
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(reslice.GetOutputPort())
viewer.SetZSlice(70)
viewer.SetColorWindow(2000)
viewer.SetColorLevel(1000)
viewer.Render()
# --- end of script --
| bsd-3-clause |
cossacklabs/acra | wrappers/python/acrawriter/django/__init__.py | 1 | 3334 | # Copyright 2016, Cossack Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from django.core import validators
from django.db import models
from django import forms
from django.utils import six
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
import acrawriter
__all__ = ('CharField', 'EmailField', 'TextField')
class CharField(models.CharField):
def __init__(self, public_key=None, encoding='utf-8',
encoding_errors='ignore', *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self._encoding = encoding
self._encoding_errors = encoding_errors
if not (public_key or settings.ACRA_SERVER_PUBLIC_KEY):
raise ValueError("Set public key arg or settings.ACRA_SERVER_PUBLIC_KEY")
self._public_key = public_key or settings.ACRA_SERVER_PUBLIC_KEY
def from_db_value(self, value, *args, **kwargs):
if isinstance(value, memoryview):
value = value.tobytes()
if isinstance(value, six.binary_type):
return value.decode(self._encoding, errors=self._encoding_errors)
else:
return value
def get_db_prep_value(self, value, connection, prepared=False):
value = super(CharField, self).get_db_prep_value(
value, connection, prepared)
if value == '':
return b''
elif value is None:
return None
else:
return acrawriter.create_acrastruct(value.encode(self._encoding), self._public_key)
def get_internal_type(self):
return 'BinaryField'
def to_python(self, value):
value = super(CharField, self).to_python(value)
if isinstance(value, six.binary_type):
return value.decode(self._encoding, errors=self._encoding_errors)
else:
return value
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 254)
super(EmailField, self).__init__(*args, **kwargs)
class TextField(CharField):
description = _("Text")
def __init__(self, *args, **kwargs):
super(TextField, self).__init__(*args, **kwargs)
self.validators = []
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length, 'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
def check(self, **kwargs):
return []
| apache-2.0 |
ds-hwang/chromium-crosswalk | tools/android/loading/resource_sack_display.py | 3 | 4182 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for displaying a ResourceSack.
When run standalone, takes traces on the command line and produces a dot file to
stdout.
"""
def ToDot(sack, output, prune=-1, long_edge_msec=2000):
"""Output as a dot file.
Args:
sack: (ResourceSack) the sack to convert to dot.
output: a file-like output stream.
prune: if positive, prune & coalesce nodes under the specified threshold
of repeated views, as fraction node views / total graphs. All pruned
nodes are represented by a single node, and an edge is connected only if
the view count is greater than 1.
long_edge_msec: if positive, the definition of a long edge. Long edges are
distinguished in graph.
"""
output.write("""digraph dependencies {
rankdir = LR;
""")
pruned = set()
num_graphs = len(sack.graph_info)
for bag in sack.bags:
if prune > 0 and float(len(bag.graphs)) / num_graphs < prune:
pruned.add(bag)
continue
output.write('%d [label="%s (%d)\n(%d, %d)\n(%.2f, %.2f)" shape=%s; '
'style=filled; fillcolor=%s];\n' % (
bag.Index(), bag.label, len(bag.graphs),
min(bag.total_costs), max(bag.total_costs),
min(bag.relative_costs), max(bag.relative_costs),
_CriticalToShape(bag),
_AmountToNodeColor(len(bag.graphs), num_graphs)))
if pruned:
pruned_index = num_graphs
output.write('%d [label="Pruned at %.0f%%\n(%d)"; '
'shape=polygon; style=dotted];\n' %
(pruned_index, 100 * prune, len(pruned)))
for bag in sack.bags:
if bag in pruned:
for succ in bag.Successors():
if succ not in pruned:
output.write('%d -> %d [style=dashed];\n' % (
pruned_index, succ.Index()))
for succ in bag.Successors():
if succ in pruned:
if len(bag.successor_sources[succ]) > 1:
output.write('%d -> %d [label="%d"; style=dashed];\n' % (
bag.Index(), pruned_index, len(bag.successor_sources[succ])))
else:
num_succ = len(bag.successor_sources[succ])
num_long = 0
for graph, source, target in bag.successor_sources[succ]:
if graph.EdgeCost(source, target) > long_edge_msec:
num_long += 1
if num_long > 0:
long_frac = float(num_long) / num_succ
long_edge_style = '; penwidth=%f' % (2 + 6.0 * long_frac)
if long_frac < 0.75:
long_edge_style += '; style=dashed'
else:
long_edge_style = ''
min_edge = min(bag.successor_edge_costs[succ])
max_edge = max(bag.successor_edge_costs[succ])
output.write('%d -> %d [label="%d\n(%f,%f)"; color=%s %s];\n' % (
bag.Index(), succ.Index(), num_succ, min_edge, max_edge,
_AmountToEdgeColor(num_succ, len(bag.graphs)),
long_edge_style))
output.write('}')
def _CriticalToShape(bag):
frac = float(bag.num_critical) / bag.num_nodes
if frac < 0.4:
return 'oval'
elif frac < 0.7:
return 'polygon'
elif frac < 0.9:
return 'trapezium'
return 'box'
def _AmountToNodeColor(numer, denom):
if denom <= 0:
return 'grey72'
ratio = 1.0 * numer / denom
if ratio < .3:
return 'white'
elif ratio < .6:
return 'yellow'
elif ratio < .8:
return 'orange'
return 'green'
def _AmountToEdgeColor(numer, denom):
color = _AmountToNodeColor(numer, denom)
if color == 'white' or color == 'grey72':
return 'black'
return color
def _Main():
import json
import logging
import sys
import loading_model
import loading_trace
import resource_sack
sack = resource_sack.GraphSack()
for fname in sys.argv[1:]:
trace = loading_trace.LoadingTrace.FromJsonDict(
json.load(open(fname)))
logging.info('Making graph from %s', fname)
model = loading_model.ResourceGraph(trace, content_lens=None)
sack.ConsumeGraph(model)
logging.info('Finished %s', fname)
ToDot(sack, sys.stdout, prune=.1)
if __name__ == '__main__':
_Main()
| bsd-3-clause |
overra/node-gyp | gyp/pylib/gyp/MSVSToolFile.py | 2736 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| mit |
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/vm/manual/custom.py | 1 | 1298 | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
def sshkey_create(client,
resource_group_name,
ssh_public_key_name,
location,
tags=None,
public_key=None):
parameters = {
'location': location,
'tags': tags,
'public_key': public_key
}
client.create(resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
parameters=parameters)
if public_key is None: # Generate one if public key is None
client.generate_key_pair(resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name)
return client.get(resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name)
| mit |
neiudemo1/django | docs/conf.py | 54 | 11938 | # -*- coding: utf-8 -*-
#
# Django documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 27 09:06:53 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import unicode_literals
import sys
from os.path import abspath, dirname, join
# Make sure we get the version of this copy of Django
sys.path.insert(1, dirname(dirname(abspath(__file__))))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(abspath(join(dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"djangodocs",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"ticket_role",
]
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
if 'spelling' in sys.argv:
extensions.append("sphinxcontrib.spelling")
# Spelling language.
spelling_lang = 'en_US'
# Location of word list.
spelling_word_list_filename = 'spelling_wordlist'
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Django'
copyright = 'Django Software Foundation and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.10'
# The full version, including alpha/beta/rc tags.
try:
from django import VERSION, get_version
except ImportError:
release = version
else:
def django_release():
pep386ver = get_version()
if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep386ver:
return pep386ver + '.dev'
return pep386ver
release = django_release()
# The "development version" of Django
django_next_version = '1.10'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# Location for .po/.mo translation files used when language is set
locale_dirs = ['locale/']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Links to Python's docs should reference the most recent version of the 3.x
# branch, which is located at this URL.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'sphinx': ('http://sphinx-doc.org/', None),
'six': ('http://pythonhosted.org/six/', None),
'formtools': ('http://django-formtools.readthedocs.org/en/latest/', None),
'psycopg2': ('http://initd.org/psycopg/docs/', None),
}
# Python's docs don't change every week.
intersphinx_cache_limit = 90 # days
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "djangodocs"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# HTML translator class for the builder
html_translator_class = "djangodocs.DjangoHTMLTranslator"
# Content template for the index page.
# html_index = ''
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Djangodoc'
modindex_common_prefix = ["django."]
# Appended to every page
rst_epilog = """
.. |django-users| replace:: :ref:`django-users <django-users-mailing-list>`
.. |django-core-mentorship| replace:: :ref:`django-core-mentorship <django-core-mentorship-mailing-list>`
.. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>`
.. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>`
.. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>`
"""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'preamble': ('\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}'
'\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}')
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
# latex_documents = []
latex_documents = [
('contents', 'django.tex', 'Django Documentation',
'Django Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(
'ref/django-admin',
'django-admin',
'Utility script for the Django Web framework',
['Django Software Foundation'],
1
), ]
# -- Options for Texinfo output ------------------------------------------------
# List of tuples (startdocname, targetname, title, author, dir_entry,
# description, category, toctree_only)
texinfo_documents = [(
master_doc, "django", "", "", "Django",
"Documentation of the Django framework", "Web development", False
)]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = 'Django Software Foundation'
epub_publisher = 'Django Software Foundation'
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = 'Django'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
epub_theme = 'djangodocs-epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ('', 'epub-cover.html')
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# -- ticket options ------------------------------------------------------------
ticket_url = 'https://code.djangoproject.com/ticket/%s'
| bsd-3-clause |
oskopek/devassistant | test/test_actions.py | 1 | 9352 | import os
import subprocess
import pytest
from flexmock import flexmock
from devassistant import actions, exceptions
from devassistant.dapi import dapicli
from test.logger import LoggingHandler
class TestActions(object):
def setup_class(self):
self.ha = actions.HelpAction
def test_get_help_contains_task_keywords(self):
gh = self.ha().get_help()
assert 'crt' in gh
assert 'twk' in gh
assert 'prep' in gh
assert 'extra' in gh
def test_get_help_contains_action_name(self):
a = actions.Action()
a.name = 'foobar_action_name'
a.description = 'foobar_action_description'
actions.register_action(a)
assert 'foobar_action_name' in self.ha().get_help()
assert 'foobar_action_description' in self.ha().get_help()
def test_format_text_returns_original_text_on_bogus_formatting(self):
assert self.ha().format_text('aaa', 'foo', 'bar') == 'aaa'
assert self.ha().format_text('', 'foo', 'bar') == ''
def test_format_text_returns_bold(self):
assert self.ha().format_text('aaa', 'bold', 'ascii') == '\033[1maaa\033[0m'
def test_version_action(self, capsys):
va = actions.VersionAction()
from devassistant import __version__ as VERSION
va.run()
assert VERSION in capsys.readouterr()[0]
class TestDocAction(object):
def setup_method(self, method):
self.da = actions.DocAction
self.tlh = LoggingHandler.create_fresh_handler()
def test_no_docs(self):
self.da(dap='f').run()
assert ('INFO', 'DAP f has no documentation.') in self.tlh.msgs
def test_lists_docs(self):
self.da(dap='c').run()
assert self.tlh.msgs == [
('INFO', 'DAP c has these docs:'),
('INFO', 'LICENSE'),
('INFO', 'doc1'),
('INFO', 'something/foo/doc2'),
('INFO', 'Use "da doc c <DOC>" to see a specific document')
]
def test_displays_docs(self):
# we only test displaying without "less" - e.g. simple logging
flexmock(subprocess).should_receive('check_call').\
and_raise(subprocess.CalledProcessError, None, None)
self.da(dap='c', doc='doc1').run()
assert ('INFO', 'Bar!\n') in self.tlh.msgs
class TestPkgSearchAction(object):
@pytest.mark.parametrize('exc', [exceptions.DapiCommError, exceptions.DapiLocalError])
def test_raising_exceptions(self, exc):
flexmock(dapicli).should_receive('print_search').and_raise(exc)
with pytest.raises(exceptions.ExecutionException):
actions.PkgSearchAction(query='foo', noassistants=False, unstable=False,
deactivated=False, minrank=0, mincount=0,
allplatforms=False).run()
class TestPkgInstallAction(object):
def setup_class(self):
self.pkg = 'foo'
self.exc_string = 'bar'
@pytest.mark.parametrize(('isfile', 'method'), [
(True, 'install_dap_from_path'),
(False, 'install_dap')
])
def test_pkg_install(self, isfile, method):
flexmock(os.path).should_receive('isfile').with_args(self.pkg)\
.and_return(isfile).at_least().once()
flexmock(dapicli).should_receive(method)\
.and_return([self.pkg]).at_least().once()
# Install from path, everything goes well
actions.PkgInstallAction(package=[self.pkg], force=False,
reinstall=False, nodeps=False).run()
def test_pkg_install_fails(self):
flexmock(os.path).should_receive('isfile').with_args(self.pkg)\
.and_return(True).at_least().once()
flexmock(dapicli).should_receive('install_dap_from_path')\
.and_raise(exceptions.DapiLocalError(self.exc_string)).at_least().once()
with pytest.raises(exceptions.ExecutionException) as excinfo:
actions.PkgInstallAction(package=[self.pkg], force=False,
reinstall=False, nodeps=False).run()
assert self.exc_string in str(excinfo.value)
class TestPkgUpdateAction(object):
def test_pkg_update_all(self):
'''Run update without args to update all, but everything is up to-date'''
flexmock(dapicli).should_receive('get_installed_daps')\
.and_return(['foo']).at_least().once()
flexmock(dapicli).should_receive('install_dap')\
.and_return([]).at_least().once()
# Update all, everything is up to date
actions.PkgUpdateAction(force=False, allpaths=False).run()
def test_pkg_update_no_dapi(self):
'''Run update of package that is not on Dapi'''
flexmock(dapicli).should_receive('metadap')\
.and_return(None).at_least().once()
with pytest.raises(exceptions.ExecutionException) as excinfo:
actions.PkgUpdateAction(package=['foo'], force=False, allpaths=False).run()
assert 'foo not found' in str(excinfo.value)
def test_pkg_update_no_installed(self):
'''Run update of package that is not installed'''
flexmock(dapicli).should_receive('_get_metadap_dap')\
.and_return(({}, {'version': '0.0.1'})).at_least().once()
flexmock(dapicli).should_receive('get_installed_version_of')\
.and_return(None).at_least().once()
with pytest.raises(exceptions.ExecutionException) as excinfo:
actions.PkgUpdateAction(package=['foo'], force=False, allpaths=False).run()
assert 'Cannot update not yet installed DAP' in str(excinfo.value)
@pytest.mark.parametrize('action', [
actions.PkgUninstallAction,
actions.PkgRemoveAction
])
class TestPkgUninstallAction(object):
def test_pkg_uninstall_dependent(self, action):
'''Uninstall two packages, but the first depend on the latter'''
flexmock(dapicli).should_receive('uninstall_dap')\
.and_return(['first', 'second']).at_least().once()
action(package=['first', 'second'], force=True, allpaths=False).run()
def test_pkg_uninstall_not_installed(self, action):
'''Uninstall package that is not installed'''
flexmock(dapicli).should_receive('get_installed_daps')\
.and_return(['bar']).at_least().once()
with pytest.raises(exceptions.ExecutionException) as excinfo:
action(package=['foo'], force=True, allpaths=False).run()
assert 'Cannot uninstall DAP foo' in str(excinfo.value)
class TestAutoCompleteAction(object):
def setup_class(self):
self.aca = actions.AutoCompleteAction
self.fake_desc = [flexmock(name=n,
get_subassistants=lambda: [],
args=[]) for n in ['foo', 'bar', 'baz']]
self.fake_arg = flexmock(flags=('--qux',), kwargs=dict())
self.fake_crt = flexmock(name='crt',
get_subassistants=lambda: self.fake_desc,
args=[self.fake_arg])
@pytest.mark.parametrize('path', ['', '--debug', '__debug'])
def test_root_path(self, path, capsys):
expected = set(['--debug', '--help', 'create', 'doc', 'extra', 'help',
'pkg', 'prepare', 'tweak', 'version'])
self.aca(path=path).run()
stdout, _ = capsys.readouterr()
assert stdout
assert expected.issubset(set(stdout.split()))
@pytest.mark.parametrize('obj', [
flexmock(get_subassistants=lambda: []),
flexmock(get_subactions=lambda: [])
])
def test_get_descendants(self, obj):
self.aca._get_descendants(obj)
@pytest.mark.parametrize('obj', [
flexmock(get_subassistants=''),
flexmock()
])
def test_get_descendants_fails(self, obj):
with pytest.raises(TypeError):
self.aca._get_descendants(obj)
@pytest.mark.parametrize('path', ['crt', 'crt --qux'])
def test_assistants(self, path, capsys):
aca = self.aca(path=path)
flexmock(aca).should_receive('_assistants').and_return([self.fake_crt])
aca.run()
stdout, _ = capsys.readouterr()
assert not _
assert set([a.name for a in self.fake_desc] + \
[f for f in self.fake_arg.flags]).issubset(set(stdout.split()))
@pytest.mark.parametrize(('long_name', 'short_name'), [
('create', 'crt'),
('tweak', 'twk'),
('twk', 'mod'),
('prepare', 'prep'),
('extra', 'task'),
])
def test_aliases(self, long_name, short_name, capsys):
self.aca(path=long_name).run()
long_stdout, _ = capsys.readouterr()
assert long_stdout
self.aca(path=short_name).run()
short_stdout, _ = capsys.readouterr()
assert short_stdout
assert long_stdout == short_stdout
def test_filenames(self, capsys):
self.aca(path='pkg info').run()
stdout, _ = capsys.readouterr()
assert '_FILENAMES' in stdout.split()
def test_bad_input(self, capsys):
self.aca(path='foo bar baz').run()
stdout, _ = capsys.readouterr()
assert not stdout.split()
| gpl-2.0 |
jjingrong/PONUS-1.2 | venv/build/django/django/db/models/options.py | 104 | 24269 | from __future__ import unicode_literals
import re
from bisect import bisect
import warnings
from django.conf import settings
from django.db.models.fields.related import ManyToManyRel
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.proxy import OrderWrt
from django.db.models.loading import get_models, app_cache_ready
from django.utils import six
from django.utils.functional import cached_property
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_text, smart_text, python_2_unicode_compatible
from django.utils.translation import activate, deactivate_all, get_language, string_concat
# Calculate the verbose_name by converting from InitialCaps to "lowercase with spaces".
get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip()
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together', 'select_on_save')
@python_2_unicode_compatible
class Options(object):
def __init__(self, meta, app_label=None):
self.local_fields, self.local_many_to_many = [], []
self.virtual_fields = []
self.model_name, self.verbose_name = None, None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.permissions = []
self.object_name, self.app_label = None, app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.meta = meta
self.pk = None
self.has_auto_field, self.auto_field = False, None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = SortedDict()
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes).
self.abstract_managers = []
self.concrete_managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.util import truncate_name
cls._meta = self
self.model = cls
self.installed = re.sub('\.models$', '', cls.__module__) in settings.INSTALLED_APPS
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = get_verbose_name(self.object_name)
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
# unique_together can be either a tuple of tuples, or a single
# tuple of two strings. Normalize it to a tuple of tuples, so that
# calling code can uniformly expect that.
ut = meta_attrs.pop('unique_together', self.unique_together)
if ut and not isinstance(ut[0], (tuple, list)):
ut = (ut,)
self.unique_together = ut
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
@property
def module_name(self):
"""
This property has been deprecated in favor of `model_name`. refs #19689
"""
warnings.warn(
"Options.module_name has been deprecated in favor of model_name",
PendingDeprecationWarning, stacklevel=2)
return self.model_name
def _prepare(self, model):
if self.order_with_respect_to:
self.order_with_respect_to = self.get_field(self.order_with_respect_to)
self.ordering = ('_order',)
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
def add_field(self, field):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if field.rel and isinstance(field.rel, ManyToManyRel):
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
if hasattr(self, '_m2m_cache'):
del self._m2m_cache
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
if hasattr(self, '_field_cache'):
del self._field_cache
del self._field_name_cache
# The fields, concrete_fields and local_concrete_fields are
# implemented as cached properties for performance reasons.
# The attrs will not exists if the cached property isn't
# accessed yet, hence the try-excepts.
try:
del self.fields
except AttributeError:
pass
try:
del self.concrete_fields
except AttributeError:
pass
try:
del self.local_concrete_fields
except AttributeError:
pass
if hasattr(self, '_name_map'):
del self._name_map
def add_virtual_field(self, field):
self.virtual_fields.append(field)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def pk_index(self):
"""
Returns the index of the primary key field in the self.concrete_fields
list.
"""
return self.concrete_fields.index(self.pk)
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name))
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
lang = get_language()
deactivate_all()
raw = force_text(self.verbose_name)
activate(lang)
return raw
verbose_name_raw = property(verbose_name_raw)
def _swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
model_label = '%s.%s' % (self.app_label, self.model_name)
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label):
return swapped_for
return None
swapped = property(_swapped)
@cached_property
def fields(self):
"""
The getter for self.fields. This returns the list of field objects
available to this model (including through parent models).
Callers are not permitted to modify this list, since it's a reference
to this instance (not a copy).
"""
try:
self._field_name_cache
except AttributeError:
self._fill_fields_cache()
return self._field_name_cache
@cached_property
def concrete_fields(self):
return [f for f in self.fields if f.column is not None]
@cached_property
def local_concrete_fields(self):
return [f for f in self.local_fields if f.column is not None]
def get_fields_with_model(self):
"""
Returns a sequence of (field, model) pairs for all fields. The "model"
element is None for fields on the current model. Mostly of use when
constructing queries so that we know which model a field belongs to.
"""
try:
self._field_cache
except AttributeError:
self._fill_fields_cache()
return self._field_cache
def get_concrete_fields_with_model(self):
return [(field, model) for field, model in self.get_fields_with_model() if
field.column is not None]
def _fill_fields_cache(self):
cache = []
for parent in self.parents:
for field, model in parent._meta.get_fields_with_model():
if model:
cache.append((field, model))
else:
cache.append((field, parent))
cache.extend([(f, None) for f in self.local_fields])
self._field_cache = tuple(cache)
self._field_name_cache = [x for x, _ in cache]
def _many_to_many(self):
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return list(self._m2m_cache)
many_to_many = property(_many_to_many)
def get_m2m_with_model(self):
"""
The many-to-many version of get_fields_with_model().
"""
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return list(six.iteritems(self._m2m_cache))
def _fill_m2m_cache(self):
cache = SortedDict()
for parent in self.parents:
for field, model in parent._meta.get_m2m_with_model():
if model:
cache[field] = model
else:
cache[field] = parent
for field in self.local_many_to_many:
cache[field] = None
self._m2m_cache = cache
def get_field(self, name, many_to_many=True):
"""
Returns the requested field by name. Raises FieldDoesNotExist on error.
"""
to_search = (self.fields + self.many_to_many) if many_to_many else self.fields
for f in to_search:
if f.name == name:
return f
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name))
def get_field_by_name(self, name):
"""
Returns the (field_object, model, direct, m2m), where field_object is
the Field instance for the given name, model is the model containing
this field (None for local fields), direct is True if the field exists
on this model, and m2m is True for many-to-many relations. When
'direct' is False, 'field_object' is the corresponding RelatedObject
for this field (since the field doesn't have an instance associated
with it).
Uses a cache internally, so after the first access, this is very fast.
"""
try:
try:
return self._name_map[name]
except AttributeError:
cache = self.init_name_map()
return cache[name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r'
% (self.object_name, name))
def get_all_field_names(self):
"""
Returns a list of all field names that are possible for this model
(including reverse relation names). This is used for pretty printing
debugging output (a list of choices), so any internal-only field names
are not included.
"""
try:
cache = self._name_map
except AttributeError:
cache = self.init_name_map()
names = sorted(cache.keys())
# Internal-only names end with "+" (symmetrical m2m related names being
# the main example). Trim them.
return [val for val in names if not val.endswith('+')]
def init_name_map(self):
"""
Initialises the field name -> field object mapping.
"""
cache = {}
# We intentionally handle related m2m objects first so that symmetrical
# m2m accessor names can be overridden, if necessary.
for f, model in self.get_all_related_m2m_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, True)
for f, model in self.get_all_related_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, False)
for f, model in self.get_m2m_with_model():
cache[f.name] = (f, model, True, True)
for f, model in self.get_fields_with_model():
cache[f.name] = (f, model, True, False)
for f in self.virtual_fields:
if hasattr(f, 'related'):
cache[f.name] = (f.related, None if f.model == self.model else f.model, True, False)
if app_cache_ready():
self._name_map = cache
return cache
def get_add_permission(self):
"""
This method has been deprecated in favor of
`django.contrib.auth.get_permission_codename`. refs #20642
"""
warnings.warn(
"`Options.get_add_permission` has been deprecated in favor "
"of `django.contrib.auth.get_permission_codename`.",
PendingDeprecationWarning, stacklevel=2)
return 'add_%s' % self.model_name
def get_change_permission(self):
"""
This method has been deprecated in favor of
`django.contrib.auth.get_permission_codename`. refs #20642
"""
warnings.warn(
"`Options.get_change_permission` has been deprecated in favor "
"of `django.contrib.auth.get_permission_codename`.",
PendingDeprecationWarning, stacklevel=2)
return 'change_%s' % self.model_name
def get_delete_permission(self):
"""
This method has been deprecated in favor of
`django.contrib.auth.get_permission_codename`. refs #20642
"""
warnings.warn(
"`Options.get_delete_permission` has been deprecated in favor "
"of `django.contrib.auth.get_permission_codename`.",
PendingDeprecationWarning, stacklevel=2)
return 'delete_%s' % self.model_name
def get_all_related_objects(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
return [k for k, v in self.get_all_related_objects_with_model(
local_only=local_only, include_hidden=include_hidden,
include_proxy_eq=include_proxy_eq)]
def get_all_related_objects_with_model(self, local_only=False,
include_hidden=False,
include_proxy_eq=False):
"""
Returns a list of (related-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
predicates = []
if local_only:
predicates.append(lambda k, v: not v)
if not include_hidden:
predicates.append(lambda k, v: not k.field.rel.is_hidden())
cache = (self._related_objects_proxy_cache if include_proxy_eq
else self._related_objects_cache)
return [t for t in cache.items() if all(p(*t) for p in predicates)]
def _fill_related_objects_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_objects_with_model(include_hidden=True):
if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
# Collect also objects which are in relation to some proxy child/parent of self.
proxy_cache = cache.copy()
for klass in get_models(include_auto_created=True, only_installed=False):
if not klass._meta.swapped:
for f in klass._meta.local_fields:
if f.rel and not isinstance(f.rel.to, six.string_types) and f.generate_reverse_relation:
if self == f.rel.to._meta:
cache[f.related] = None
proxy_cache[f.related] = None
elif self.concrete_model == f.rel.to._meta.concrete_model:
proxy_cache[f.related] = None
self._related_objects_cache = cache
self._related_objects_proxy_cache = proxy_cache
def get_all_related_many_to_many_objects(self, local_only=False):
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
if local_only:
return [k for k, v in cache.items() if not v]
return list(cache)
def get_all_related_m2m_objects_with_model(self):
"""
Returns a list of (related-m2m-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
return list(six.iteritems(cache))
def _fill_related_many_to_many_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_m2m_objects_with_model():
if obj.field.creation_counter < 0 and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models(only_installed=False):
if not klass._meta.swapped:
for f in klass._meta.local_many_to_many:
if (f.rel
and not isinstance(f.rel.to, six.string_types)
and self == f.rel.to._meta):
cache[f.related] = None
if app_cache_ready():
self._related_many_to_many_cache = cache
return cache
def get_base_chain(self, model):
"""
Returns a list of parent classes leading to 'model' (order from closet
to most distant ancestor). This has to handle the case were 'model' is
a granparent or even more distant relation.
"""
if not self.parents:
return None
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return None
def get_parent_list(self):
"""
Returns a list of all the ancestor of this model as a list. Useful for
determining if something is an ancestor, regardless of lineage.
"""
result = set()
for parent in self.parents:
result.add(parent)
result.update(parent._meta.get_parent_list())
return result
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
| mit |
jwalgran/otm-core | opentreemap/treemap/lib/user.py | 4 | 8019 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.db.models import Q
from treemap.audit import Audit, Authorizable, get_auditable_class
from treemap.models import Instance, MapFeature, InstanceUser, User
from treemap.util import get_filterable_audit_models
from treemap.lib.object_caches import udf_defs
from treemap.udf import UDFModel
def _instance_ids_edited_by(user):
return Audit.objects.filter(user=user)\
.values_list('instance_id', flat=True)\
.exclude(instance_id=None)\
.distinct()
PAGE_DEFAULT = 20
ALLOWED_MODELS = get_filterable_audit_models()
def get_audits(logged_in_user, instance, query_vars, user=None,
models=ALLOWED_MODELS, model_id=None, start_id=None,
prev_start_ids=[], page_size=PAGE_DEFAULT, exclude_pending=True,
should_count=False):
if instance:
if instance.is_accessible_by(logged_in_user):
instances = Instance.objects.filter(pk=instance.pk)
else:
instances = Instance.objects.none()
# If we didn't specify an instance we only want to
# show audits where the user has permission
else:
instances = Instance.objects\
.filter(user_accessible_instance_filter(logged_in_user))
if user:
instances = instances.filter(pk__in=_instance_ids_edited_by(user))
instances = instances.distinct()
if not instances.exists():
# Force no results
return {'audits': Audit.objects.none(),
'total_count': 0,
'next_page': None,
'prev_page': None}
map_feature_models = set(MapFeature.subclass_dict().keys())
model_filter = Q()
# We only want to show the TreePhoto's image, not other fields
# and we want to do it automatically if 'Tree' was specified as
# a model. The same goes for MapFeature(s) <-> MapFeaturePhoto
# There is no need to check permissions, because photos are always visible
if 'Tree' in models:
model_filter = model_filter | Q(model='TreePhoto', field='image')
if map_feature_models.intersection(models):
model_filter = model_filter | Q(model='MapFeaturePhoto', field='image')
for inst in instances:
eligible_models = ({'Tree', 'TreePhoto', 'MapFeaturePhoto'} |
set(inst.map_feature_types)) & set(models)
if logged_in_user == user:
eligible_udfs = {'udf:%s' % udf.id for udf in udf_defs(inst)
if udf.model_type in eligible_models
and udf.iscollection}
# The logged-in user can see all their own edits
model_filter = model_filter | Q(
instance=inst, model__in=(eligible_models | eligible_udfs))
else:
# Filter other users' edits by their visibility to the
# logged-in user
for model in eligible_models:
ModelClass = get_auditable_class(model)
fake_model = ModelClass(instance=inst)
if issubclass(ModelClass, Authorizable):
visible_fields = fake_model.visible_fields(logged_in_user)
model_filter = model_filter |\
Q(model=model, field__in=visible_fields, instance=inst)
else:
model_filter = model_filter | Q(model=model, instance=inst)
if issubclass(ModelClass, UDFModel):
model_collection_udfs_audit_names = (
fake_model.visible_collection_udfs_audit_names(
logged_in_user))
model_filter = model_filter | (
Q(model__in=model_collection_udfs_audit_names))
udf_bookkeeping_fields = Q(
model__startswith='udf:',
field__in=('id', 'model_id', 'field_definition'))
audits = (Audit.objects
.filter(model_filter)
.filter(instance__in=instances)
.select_related('instance')
.exclude(udf_bookkeeping_fields)
.exclude(user=User.system_user())
.order_by('-pk'))
if user:
audits = audits.filter(user=user)
if model_id:
audits = audits.filter(model_id=model_id)
if exclude_pending:
audits = audits.exclude(requires_auth=True, ref__isnull=True)
# Slicing the QuerySet uses a SQL Limit, which has proven to be quite slow.
# By relying on the fact the our list is ordered by primary key from newest
# to oldest, we can rely on the index on the primary key, which is faster.
if start_id is not None:
audits = audits.filter(pk__lte=start_id)
total_count = audits.count() if should_count else 0
audits = audits[:page_size]
# Coerce the queryset into a list so we can get the last audit row on the
# current page
audits = list(audits)
# We are using len(audits) instead of audits.count() because we
# have already realized the queryset at this point
if len(audits) == page_size:
query_vars.setlist('prev', prev_start_ids + [audits[0].pk])
query_vars['start'] = audits[-1].pk - 1
next_page = "?" + query_vars.urlencode()
else:
next_page = None
if prev_start_ids:
if len(prev_start_ids) == 1:
del query_vars['prev']
del query_vars['start']
else:
prev_start_id = prev_start_ids.pop()
query_vars.setlist('prev', prev_start_ids)
query_vars['start'] = prev_start_id
prev_page = "?" + query_vars.urlencode()
else:
prev_page = None
return {'audits': audits,
'total_count': total_count,
'next_page': next_page,
'prev_page': prev_page}
def get_audits_params(request):
PAGE_MAX = 100
r = request.GET
page_size = min(int(r.get('page_size', PAGE_DEFAULT)), PAGE_MAX)
start_id = r.get('start', None)
if start_id is not None:
start_id = int(start_id)
prev_start_ids = [int(pk) for pk in r.getlist('prev')]
models = r.getlist('models', default=ALLOWED_MODELS)
if models:
for model in models:
if model not in ALLOWED_MODELS:
raise Exception("Invalid model: %s" % model)
model_id = r.get('model_id', None)
if model_id is not None and len(models) != 1:
raise Exception("You must specific one and only model "
"when looking up by id")
exclude_pending = r.get('exclude_pending', "false") == "true"
return {'start_id': start_id, 'prev_start_ids': prev_start_ids,
'page_size': page_size, 'models': models, 'model_id': model_id,
'exclude_pending': exclude_pending}
def user_accessible_instance_filter(logged_in_user):
public = Q(is_public=True)
if logged_in_user is not None and not logged_in_user.is_anonymous():
private_with_access = Q(instanceuser__user=logged_in_user)
instance_filter = public | private_with_access
else:
instance_filter = public
return instance_filter
def get_user_instances(logged_in_user, user, current_instance=None):
# Which instances can the logged-in user see?
instance_filter = (user_accessible_instance_filter(logged_in_user))
user_instance_ids = (InstanceUser.objects
.filter(user_id=user.pk)
.values_list('instance_id', flat=True))
instance_filter = Q(instance_filter, Q(pk__in=user_instance_ids))
# The logged-in user should see the current instance in their own list
if current_instance and logged_in_user == user:
instance_filter = instance_filter | Q(pk=current_instance.id)
return (Instance.objects
.filter(instance_filter)
.distinct()
.order_by('name'))
| gpl-3.0 |
kienpham2000/ansible-modules-core | packaging/rpm_key.py | 60 | 7339 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Ansible module to import third party repo keys to your rpm db
# (c) 2013, Héctor Acosta <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rpm_key
author: Hector Acosta <[email protected]>
short_description: Adds or removes a gpg key from the rpm db
description:
- Adds or removes (rpm --import) a gpg key to your rpm database.
version_added: "1.3"
options:
key:
required: true
default: null
aliases: []
description:
- Key that will be modified. Can be a url, a file, or a keyid if the key already exists in the database.
state:
required: false
default: "present"
choices: [present, absent]
description:
- Wheather the key will be imported or removed from the rpm db.
validate_certs:
description:
- If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Example action to import a key from a url
- rpm_key: state=present key=http://apt.sw.be/RPM-GPG-KEY.dag.txt
# Example action to import a key from a file
- rpm_key: state=present key=/path/to/key.gpg
# Example action to ensure a key is not present in the db
- rpm_key: state=absent key=DEADB33F
'''
import syslog
import os.path
import re
import tempfile
def is_pubkey(string):
"""Verifies if string is a pubkey"""
pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*"
return re.match(pgp_regex, string, re.DOTALL)
class RpmKey:
def __init__(self, module):
self.syslogging = False
# If the key is a url, we need to check if it's present to be idempotent,
# to do that, we need to check the keyid, which we can get from the armor.
keyfile = None
should_cleanup_keyfile = False
self.module = module
self.rpm = self.module.get_bin_path('rpm', True)
state = module.params['state']
key = module.params['key']
if '://' in key:
keyfile = self.fetch_key(key)
keyid = self.getkeyid(keyfile)
should_cleanup_keyfile = True
elif self.is_keyid(key):
keyid = key
elif os.path.isfile(key):
keyfile = key
keyid = self.getkeyid(keyfile)
else:
self.module.fail_json(msg="Not a valid key %s" % key)
keyid = self.normalize_keyid(keyid)
if state == 'present':
if self.is_key_imported(keyid):
module.exit_json(changed=False)
else:
if not keyfile:
self.module.fail_json(msg="When importing a key, a valid file must be given")
self.import_key(keyfile, dryrun=module.check_mode)
if should_cleanup_keyfile:
self.module.cleanup(keyfile)
module.exit_json(changed=True)
else:
if self.is_key_imported(keyid):
self.drop_key(keyid, dryrun=module.check_mode)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
def fetch_key(self, url):
"""Downloads a key from url, returns a valid path to a gpg key"""
try:
rsp, info = fetch_url(self.module, url)
key = rsp.read()
if not is_pubkey(key):
self.module.fail_json(msg="Not a public key: %s" % url)
tmpfd, tmpname = tempfile.mkstemp()
tmpfile = os.fdopen(tmpfd, "w+b")
tmpfile.write(key)
tmpfile.close()
return tmpname
except urllib2.URLError, e:
self.module.fail_json(msg=str(e))
def normalize_keyid(self, keyid):
"""Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is lowercase"""
ret = keyid.strip().lower()
if ret.startswith('0x'):
return ret[2:]
elif ret.startswith('0X'):
return ret[2:]
else:
return ret
def getkeyid(self, keyfile):
gpg = self.module.get_bin_path('gpg', True)
stdout, stderr = self.execute_command([gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', '--list-packets', keyfile])
for line in stdout.splitlines():
line = line.strip()
if line.startswith(':signature packet:'):
# We want just the last 8 characters of the keyid
keyid = line.split()[-1].strip()[8:]
return keyid
self.json_fail(msg="Unexpected gpg output")
def is_keyid(self, keystr):
"""Verifies if a key, as provided by the user is a keyid"""
return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
def execute_command(self, cmd):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg=stderr)
return stdout, stderr
def is_key_imported(self, keyid):
stdout, stderr = self.execute_command([self.rpm, '-qa', 'gpg-pubkey'])
for line in stdout.splitlines():
line = line.strip()
if not line:
continue
match = re.match('gpg-pubkey-([0-9a-f]+)-([0-9a-f]+)', line)
if not match:
self.module.fail_json(msg="rpm returned unexpected output [%s]" % line)
else:
if keyid == match.group(1):
return True
return False
def import_key(self, keyfile, dryrun=False):
if not dryrun:
self.execute_command([self.rpm, '--import', keyfile])
def drop_key(self, key, dryrun=False):
if not dryrun:
self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % key])
def main():
module = AnsibleModule(
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
key=dict(required=True, type='str'),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
RpmKey(module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
bartQu9/fallenmua | resolvers.py | 1 | 3740 | from urllib.error import URLError, HTTPError
from xml.dom import minidom
from dns import resolver
import urllib.request
import logging
def parse_thunderbird_autoconfig(xml_autoconfig):
mx_servers = []
dom_tree = minidom.parseString(xml_autoconfig)
c_nodes = dom_tree.childNodes
for i in c_nodes[0].getElementsByTagName("outgoingServer"):
try:
curr_hostname = i.getElementsByTagName("hostname")[0].childNodes[0].toxml().lower()
curr_port = int(i.getElementsByTagName("port")[0].childNodes[0].toxml())
curr_sock_type = i.getElementsByTagName("socketType")[0].childNodes[0].toxml().lower()
curr_username_type = i.getElementsByTagName("username")[0].childNodes[0].toxml()
curr_auth_method = i.getElementsByTagName("authentication")[0].childNodes[0].toxml().lower()
except IndexError:
logging.error("Bad autoconfiguration file in ISPDB")
return None
mx_servers.append({'hostname': curr_hostname, 'port': curr_port, 'sock_type': curr_sock_type,
'username_type': curr_username_type, 'auth_method': curr_auth_method})
if mx_servers:
return mx_servers
else:
return None
def get_mx_from_ispdb(domain, _timeout=2):
"""
Search for MX servers in Mozilla ISPDB.
:param _timeout: resource connection timeout
:param domain: a str FQDN
:return: List of tuples consists of mx server and listening port
"""
try:
logging.debug("Connecting to the Mozilla ISPDB")
xml_config = urllib.request.urlopen("https://autoconfig.thunderbird.net/autoconfig/v1.1/{0}".
format(domain), timeout=_timeout).read()
logging.debug("Fetched autoconfigure XML file from Mozilla ISPDB")
except HTTPError:
logging.info("No data for domain {0} in the Mozilla ISPDB".format(domain))
return None
except URLError as err:
logging.warning("Unable to connect with the Mozilla ISPDB, reason: {0}".format(err))
return None
mx_servers = parse_thunderbird_autoconfig(xml_config)
hostnames = [mx['hostname'] for mx in mx_servers]
logging.debug("MX servers from Mozilla ISPDB: {0}".format(hostnames))
return mx_servers
def get_mx_from_isp(domain, _timeout=4):
try:
logging.debug("Connecting to the ISP autoconfig")
xml_config = urllib.request.urlopen("http://autoconfig.{0}/mail/config-v1.1.xml".format(domain),
timeout=_timeout).read()
logging.debug("Fetched autoconfigure XML file from autoconfig.{0}/mail/config-v1.1.xml".format(domain))
except (HTTPError, URLError):
logging.info("No data on autoconfig.{0}".format(domain))
return None
mx_servers = parse_thunderbird_autoconfig(xml_config)
hostnames = [mx['hostname'] for mx in mx_servers]
logging.debug("MX servers from autoconfig.{0}: {1}".format(domain, hostnames))
return mx_servers
def get_mx_from_dns(domain):
mx_servers = []
try:
_tmp_mx = []
for mx in resolver.query(domain, "MX"):
_tmp_mx.append(mx.to_text().split(" "))
logging.info("Found {0} MX servers in DNS zone".format(len(_tmp_mx)))
_tmp_mx.sort() # sort MX's by priority
except resolver.NXDOMAIN:
logging.error("Cannot resolve domain name ".format(domain))
return None
for mx in _tmp_mx:
for port in (587, 465, 25): # Adding commonly known SMTP ports
mx_servers.append({'hostname': mx[1], 'port': port, 'sock_type': None, 'username_type': None,
'auth_method': None})
return mx_servers
| gpl-3.0 |
AnimeshSinha1309/WebsiteEdunet | WebsiteEdunet/env/Lib/site-packages/django/shortcuts.py | 135 | 7957 | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
import warnings
from django.core import urlresolvers
from django.db.models.base import ModelBase
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template import RequestContext, loader
from django.template.context import _current_app_undefined
from django.template.engine import (
_context_instance_undefined, _dictionary_undefined, _dirs_undefined,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
from django.utils.functional import Promise
def render_to_response(template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, dirs=_dirs_undefined,
dictionary=_dictionary_undefined, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
if (context_instance is _context_instance_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
content = loader.render_to_string(template_name, context, using=using)
else:
# Some deprecated arguments were passed - use the legacy code path
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary,
using=using)
return HttpResponse(content, content_type, status)
def render(request, template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, current_app=_current_app_undefined,
dirs=_dirs_undefined, dictionary=_dictionary_undefined,
using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
if (context_instance is _context_instance_undefined
and current_app is _current_app_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
# In Django 1.10, request should become a positional argument.
content = loader.render_to_string(
template_name, context, request=request, using=using)
else:
# Some deprecated arguments were passed - use the legacy code path
if context_instance is not _context_instance_undefined:
if current_app is not _current_app_undefined:
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
context_instance = RequestContext(request)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of render is deprecated. "
"Set the current_app attribute of request instead.",
RemovedInDjango110Warning, stacklevel=2)
request.current_app = current_app
# Directly set the private attribute to avoid triggering the
# warning in RequestContext.__init__.
context_instance._current_app = current_app
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary,
using=using)
return HttpResponse(content, content_type, status)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Raises a ValueError if klass is not a Model, Manager, or QuerySet.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
elif isinstance(klass, ModelBase):
manager = klass._default_manager
else:
if isinstance(klass, type):
klass__name = klass.__name__
else:
klass__name = klass.__class__.__name__
raise ValueError("Object is of type '%s', but must be a Django Model, "
"Manager, or QuerySet" % klass__name)
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = force_text(to)
if isinstance(to, six.string_types):
# Handle relative URLs
if to.startswith(('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return urlresolvers.reverse(to, args=args, kwargs=kwargs)
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
| mit |
aperigault/ansible | lib/ansible/modules/cloud/azure/azure_rm_sqlserver.py | 24 | 10519 | #!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_sqlserver
version_added: "2.5"
short_description: Manage SQL Server instance
description:
- Create, update and delete instance of SQL Server.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
name:
description:
- The name of the server.
required: True
location:
description:
- Resource location.
admin_username:
description:
- Administrator username for the server. Once created it cannot be changed.
admin_password:
description:
- The administrator login password (required for server creation).
version:
description:
- The version of the server. For example C(12.0).
identity:
description:
- The identity type. Set this to C(SystemAssigned) in order to automatically create and assign an Azure Active Directory principal for the resource.
- Possible values include C(SystemAssigned).
state:
description:
- State of the SQL server. Use C(present) to create or update a server and use C(absent) to delete a server.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Create (or update) SQL Server
azure_rm_sqlserver:
resource_group: myResourceGroup
name: server_name
location: westus
admin_username: mylogin
admin_password: Testpasswordxyz12!
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/sqlcrudtest-4645
version:
description:
- The version of the server.
returned: always
type: str
sample: 12.0
state:
description:
- The state of the server.
returned: always
type: str
sample: state
fully_qualified_domain_name:
description:
- The fully qualified domain name of the server.
returned: always
type: str
sample: sqlcrudtest-4645.database.windows.net
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from azure.mgmt.sql import SqlManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMSqlServer(AzureRMModuleBase):
"""Configuration class for an Azure RM SQL Server resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
admin_username=dict(
type='str'
),
admin_password=dict(
type='str',
no_log=True
),
version=dict(
type='str'
),
identity=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.name = None
self.parameters = dict()
self.tags = None
self.results = dict(changed=False)
self.state = None
self.to_do = Actions.NoAction
super(AzureRMSqlServer, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
if key == "location":
self.parameters.update({"location": kwargs[key]})
elif key == "admin_username":
self.parameters.update({"administrator_login": kwargs[key]})
elif key == "admin_password":
self.parameters.update({"administrator_login_password": kwargs[key]})
elif key == "version":
self.parameters.update({"version": kwargs[key]})
elif key == "identity":
self.parameters.update({"identity": {"type": kwargs[key]}})
old_response = None
response = None
results = dict()
resource_group = self.get_resource_group(self.resource_group)
if "location" not in self.parameters:
self.parameters["location"] = resource_group.location
old_response = self.get_sqlserver()
if not old_response:
self.log("SQL Server instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("SQL Server instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if SQL Server instance has to be deleted or may be updated")
update_tags, newtags = self.update_tags(old_response.get('tags', dict()))
if update_tags:
self.tags = newtags
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the SQL Server instance")
if self.check_mode:
self.results['changed'] = True
return self.results
self.parameters['tags'] = self.tags
response = self.create_update_sqlserver()
response.pop('administrator_login_password', None)
if not old_response:
self.results['changed'] = True
else:
self.results['changed'] = old_response.__ne__(response)
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("SQL Server instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_sqlserver()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_sqlserver():
time.sleep(20)
else:
self.log("SQL Server instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
self.results["version"] = response["version"]
self.results["state"] = response["state"]
self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"]
return self.results
def create_update_sqlserver(self):
'''
Creates or updates SQL Server with the specified configuration.
:return: deserialized SQL Server instance state dictionary
'''
self.log("Creating / Updating the SQL Server instance {0}".format(self.name))
try:
response = self.sql_client.servers.create_or_update(self.resource_group,
self.name,
self.parameters)
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the SQL Server instance.')
self.fail("Error creating the SQL Server instance: {0}".format(str(exc)))
return response.as_dict()
def delete_sqlserver(self):
'''
Deletes specified SQL Server instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the SQL Server instance {0}".format(self.name))
try:
response = self.sql_client.servers.delete(self.resource_group,
self.name)
except CloudError as e:
self.log('Error attempting to delete the SQL Server instance.')
self.fail("Error deleting the SQL Server instance: {0}".format(str(e)))
return True
def get_sqlserver(self):
'''
Gets the properties of the specified SQL Server.
:return: deserialized SQL Server instance state dictionary
'''
self.log("Checking if the SQL Server instance {0} is present".format(self.name))
found = False
try:
response = self.sql_client.servers.get(self.resource_group,
self.name)
found = True
self.log("Response : {0}".format(response))
self.log("SQL Server instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the SQL Server instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMSqlServer()
if __name__ == '__main__':
main()
| gpl-3.0 |
britcey/ansible | lib/ansible/modules/network/dellos9/dellos9_command.py | 46 | 7781 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <[email protected]>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = """
---
module: dellos9_command
version_added: "2.2"
author: "Dhivya P (@dhivyap)"
short_description: Run commands on remote devices running Dell OS9
description:
- Sends arbitrary commands to a Dell OS9 node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(dellos9_config) to configure Dell OS9 devices.
extends_documentation_fragment: dellos9
options:
commands:
description:
- List of commands to send to the remote dellos9 device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of I(retries), the task fails.
See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
notes:
- This module requires Dell OS9 version 9.10.0.1P13 or above.
- This module requires to increase the ssh connection rate limit.
Use the following command I(ip ssh connection-rate-limit 60)
to configure the same. This can be done via M(dellos9_config) module
as well.
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
tasks:
- name: run show version on remote devices
dellos9_command:
commands: show version
provider: "{{ cli }}"
- name: run show version and check to see if output contains OS9
dellos9_command:
commands: show version
wait_for: result[0] contains OS9
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
dellos9_command:
commands:
- show version
- show interfaces
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
dellos9_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains OS9
- result[1] contains Loopback
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.dellos9 import run_commands
from ansible.module_utils.dellos9 import dellos9_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.netcli import Conditional
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='dellos9_command does not support running config mode '
'commands. Please use dellos9_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
# { command: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(dellos9_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result = {
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
}
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
emonty/vhd-util | tools/python/logging/logging-0.4.9.2/test/log_test11.py | 42 | 2993 | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""Test harness for the logging module. Tests BufferingSMTPHandler, an alternative implementation
of SMTPHandler.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import string, logging, logging.handlers
MAILHOST = 'beta'
FROM = '[email protected]'
TO = ['arkadi_renko']
SUBJECT = 'Test Logging email from Python logging module (buffering)'
class BufferingSMTPHandler(logging.handlers.BufferingHandler):
def __init__(self, mailhost, fromaddr, toaddrs, subject, capacity):
logging.handlers.BufferingHandler.__init__(self, capacity)
self.mailhost = mailhost
self.mailport = None
self.fromaddr = fromaddr
self.toaddrs = toaddrs
self.subject = subject
self.setFormatter(logging.Formatter("%(asctime)s %(levelname)-5s %(message)s"))
def flush(self):
if len(self.buffer) > 0:
try:
import smtplib
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\n\r\n" % (self.fromaddr, string.join(self.toaddrs, ","), self.subject)
for record in self.buffer:
s = self.format(record)
print s
msg = msg + s + "\r\n"
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except:
self.handleError(None) # no particular record
self.buffer = []
def test():
logger = logging.getLogger("")
logger.setLevel(logging.DEBUG)
logger.addHandler(BufferingSMTPHandler(MAILHOST, FROM, TO, SUBJECT, 10))
for i in xrange(102):
logger.info("Info index = %d", i)
logging.shutdown()
if __name__ == "__main__":
test() | gpl-2.0 |
kooksee/TIOT | test/project/src/app/proto/protocol/XBeeProtocol.py | 1 | 11235 | # encoding=utf-8
import binascii
import json
from twisted.internet.protocol import Protocol
from app.proto.controller.XbeeController import XBeeController
class XBeeProtocol(Protocol):
def __init__(self):
self.ip = ''
self.port = ''
def connectionMade(self):
#import socket
#self.transport.socket._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.ip = str(self.transport.client[0])
self.port = str(self.transport.client[1])
self.factory.numProtocols += 1
print 'conn build From ip:' + self.ip + ' port:' + self.port
print 'current conn num is ' + str(self.factory.numProtocols) + "\n"
self.divName = self.ip +":"+ self.port+"##"+self.__class__.__name__
# self.divName = repr(self) + "##" + self.__class__.__name__
self.factory.controller.add_client(self.divName, self.transport)
return
def connectionLost(self, reason):
print 'conn lost reason --> '+str(reason)
self.factory.numProtocols -= 1
print 'conn lost. ip:' + self.ip + ' port:' + self.port
print 'current conn num is ' + str(self.factory.numProtocols) + "\n"
self.factory.controller.del_client(self.divName)
return
def dataReceived(self, data):
xbeeController = XBeeController()
# print 'recv data from'+self.divName + "\n" + binascii.b2a_hex(data)
print 'recv data from ip:' + self.ip + ' port:' + self.port + ' data:' + "\n" + binascii.b2a_hex(data)
kdiv = self.factory.controller.online_session
for div in kdiv:
if div == self.divName:
print "设备" + div + "正在把数据-->"
data1 = binascii.b2a_hex(data)
print data1
data2 = xbeeController.getPackets(data1).get_import_data()
for div in kdiv:
#print binascii.b2a_hex(data)
# print div.split("##")[-1]," ",self.__class__.__name__
if div.split("##")[-1] == "LightProtocol":
if data2[0].get("hot") or data2[0].get("smog"):
data_hex = ' 7e 00 16 10 00 00 7d 33 a2 00 40 71 54 0a ff fe 00 00 01 00 00 03 00 00 00 00 2a'
data_hex = str(bytearray.fromhex(data_hex))
data_hex1 = '7e 00 16 10 00 00 7d 33 a2 00 40 71 53 bc ff fe 00 00 01 00 00 03 00 00 00 00 79'
data_hex1 = str(bytearray.fromhex(data_hex1))
print data_hex
kdiv[div].write(data_hex)
kdiv[div].write(data_hex1)
if div.split("##")[-1] == self.__class__.__name__:
# data = xbeeController.getPackets(
# "7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 66 66 A6 41 02 00 02 00 1F 85 17 42 44").get_import_data()
# print data
str_data = json.dumps(data2)
print str_data
kdiv[div].write(str_data)
print "传递给:" + div
print "\n"
return
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 B8 1E 0A 42 E9
# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 9F 41 11 22 33 44 11 22 33 44 26
# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 80 9E 41 11 22 33 44 11 22 33 44 A7
# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 80 9F 41 11 22 33 44 11 22 33 44 A6
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 B8 1E 0A 42 E9
# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 9E 41 11 22 33 44 11 22 33 44 27
# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 9F 41 11 22 33 44 11 22 33 44 26
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 AE 47 0A 42 CA
# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 9E 41 11 22 33 44 11 22 33 44 27
# 7E 00 13 20 19 01 00 00 02 11 00 03 50 01 0E 00 0E 00 00 00 80 3F 83
# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 80 9E 41 11 22 33 44 11 22 33 44 A7
# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 80 9F 41 11 22 33 44 11 22 33 44 A6
# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 CB 88 BB 54 DD
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 AE 47 0A 42 CA
# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 BC 41 11 22 33 44 11 22 33 44 09
# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 80 BE 41 11 22 33 44 11 22 33 44 87
# 7E 00 13 20 19 01 00 00 02 11 00 03 50 01 0E 00 0E 00 00 00 80 3F 83
# 7E 00 13 20 19 01 00 00 02 11 00 03 50 01 0E 00 0E 00 00 00 80 3F 83
# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 B4 41 11 22 33 44 11 22 33 44 11
# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 B7 41 11 22 33 44 11 22 33 44 0E
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 AE 47 0A 42 CA
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 AE 47 0A 42 CA
# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 E9 88 BB 54 BF
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AA 42 82
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AA 42 82
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 B8 1E 0A 42 E9
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B2 41 7B
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 AE 47 0A 42 CA
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AD 41 80
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 B2 41 FB
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 8A C1 23
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3
# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 07 89 BB 54 A0
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AE 41 FF
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 FA 40 34
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 B5 41 F8
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AE 41 FF
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AF 41 7D 5E
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AF 41 7D 5E
# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 25 89 BB 54 82
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AC 41 01
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AF 41 7D 5E
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AD 41 80
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AF 41 7D 5E
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AC 41 01
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AF 41 FE
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AD 41 80
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AF 41 FE
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00
# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 43 89 BB 54 64
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AF 41 FE
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AF 41 FE
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AD 41 80
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00
# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 61 89 BB 54 46
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AE 41 7F
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 5C 8F 0A 42 D4
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AE 41 7F
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 14 AE 0A 42 FD
# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 7F 89 BB 54 28
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 B0 41 FD
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AE 41 7F
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 28 41 05
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 14 AE 0A 42 FD
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 A8 C1 85
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 5C 8F 0A 42 D4
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 A1 C1 8C
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 A3 C1 8A
# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 9D 89 BB 54 0A
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 14 AE 0A 42 FD
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 9D C1 90
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 14 AE 0A 42 FD
# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 A5 C1 08
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 14 AE 0A 42 FD
# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 BB 89 BB 54 EC
# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 14 AE 0A 42 FD
| gpl-2.0 |
ruthger/Archipel | ArchipelAgent/archipel-agent-action-scheduler/archipelagentactionscheduler/__init__.py | 5 | 2236 | # -*- coding: utf-8 -*-
#
# __init__.py
#
# Copyright (C) 2010 Antoine Mercadal <[email protected]>
# Copyright, 2011 - Franck Villaume <[email protected]>
# This file is part of ArchipelProject
# http://archipelproject.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import actionscheduler
def make_archipel_plugin(configuration, entity, group):
"""
This function is the plugin factory. It will be called by the object you want
to be plugged in. It must return a list whit at least on dictionary containing
a key for the the plugin informations, and a key for the plugin object.
@type configuration: Config Object
@param configuration: the general configuration object
@type entity: L{TNArchipelEntity}
@param entity: the entity that has load the plugin
@type group: string
@param group: the entry point group name in which the plugin has been loaded
@rtype: array
@return: array of dictionary containing the plugins informations and objects
"""
return [{"info": actionscheduler.TNActionScheduler.plugin_info(),
"plugin": actionscheduler.TNActionScheduler(configuration, entity, group)}]
def version():
"""
This function can be called runarchipel -v in order to get the version of the
installed plugin. You only should have to change the egg name.
@rtype: tupple
@return: tupple containing the package name and the version
"""
import pkg_resources
return (__name__, pkg_resources.get_distribution("archipel-agent-action-scheduler").version, [actionscheduler.TNActionScheduler.plugin_info()]) | agpl-3.0 |
LeandroRoberto/sapl | sapl/relatorios/templates/pdf_pauta_sessao_preparar_pysc.py | 1 | 9023 | import os
request=context.REQUEST
response=request.RESPONSE
session= request.SESSION
if context.REQUEST['data']!='':
dat_inicio_sessao = context.REQUEST['data']
pauta = [] # lista contendo a pauta da ordem do dia a ser impressa
data = context.pysc.data_converter_pysc(dat_inicio_sessao) # converte data para formato yyyy/mm/dd
codigo = context.REQUEST['cod_sessao_plen']
# seleciona as matérias que compõem a pauta na data escolhida
for sessao in context.zsql.sessao_plenaria_obter_zsql(dat_inicio_sessao=data, cod_sessao_plen=codigo, ind_excluido=0):
inf_basicas_dic = {} # dicionário que armazenará as informacoes basicas da sessao plenaria
# seleciona o tipo da sessao plenaria
tipo_sessao = context.zsql.tipo_sessao_plenaria_obter_zsql(tip_sessao=sessao.tip_sessao,ind_excluido=0)[0]
inf_basicas_dic["nom_sessao"] = tipo_sessao.nom_sessao
inf_basicas_dic["num_sessao_plen"] = sessao.num_sessao_plen
inf_basicas_dic["nom_sessao"] = tipo_sessao.nom_sessao
inf_basicas_dic["num_legislatura"] = sessao.num_legislatura
inf_basicas_dic["num_sessao_leg"] = sessao.num_sessao_leg
inf_basicas_dic["dat_inicio_sessao"] = sessao.dat_inicio_sessao
inf_basicas_dic["hr_inicio_sessao"] = sessao.hr_inicio_sessao
inf_basicas_dic["dat_fim_sessao"] = sessao.dat_fim_sessao
inf_basicas_dic["hr_fim_sessao"] = sessao.hr_fim_sessao
# Lista das matérias do Expediente, incluindo o status da tramitação
lst_expediente_materia=[]
for expediente_materia in context.zsql.votacao_expediente_materia_obter_zsql(dat_ordem=data,cod_sessao_plen=codigo,ind_excluido=0):
# seleciona os detalhes de uma matéria
materia = context.zsql.materia_obter_zsql(cod_materia=expediente_materia.cod_materia)[0]
dic_expediente_materia = {}
dic_expediente_materia["num_ordem"] = expediente_materia.num_ordem
dic_expediente_materia["id_materia"] = materia.sgl_tipo_materia+" - "+materia.des_tipo_materia+" No. "+str(materia.num_ident_basica)+"/"+str(materia.ano_ident_basica)
dic_expediente_materia["txt_ementa"] = materia.txt_ementa
dic_expediente_materia["ordem_observacao"] = expediente_materia.ordem_observacao
dic_expediente_materia["des_numeracao"]=""
numeracao = context.zsql.numeracao_obter_zsql(cod_materia=expediente_materia.cod_materia)
if len(numeracao):
numeracao = numeracao[0]
dic_expediente_materia["des_numeracao"] = str(numeracao.num_materia)+"/"+str(numeracao.ano_materia)
dic_expediente_materia["nom_autor"] = ''
autoria = context.zsql.autoria_obter_zsql(cod_materia=expediente_materia.cod_materia, ind_primeiro_autor=1)
if len(autoria) > 0: # se existe autor
autoria = autoria[0]
autor = context.zsql.autor_obter_zsql(cod_autor=autoria.cod_autor)
if len(autor) > 0:
autor = autor[0]
if autor.des_tipo_autor == "Parlamentar":
parlamentar = context.zsql.parlamentar_obter_zsql(cod_parlamentar=autor.cod_parlamentar)[0]
dic_expediente_materia["nom_autor"] = parlamentar.nom_parlamentar
elif autor.des_tipo_autor == "Comissao":
comissao = context.zsql.comissao_obter_zsql(cod_comissao=autor.cod_comissao)[0]
dic_expediente_materia["nom_autor"] = comissao.nom_comissao
else:
dic_expediente_materia["nom_autor"] = autor.nom_autor
dic_expediente_materia["des_turno"]=""
dic_expediente_materia["des_situacao"] = ""
tramitacao = context.zsql.tramitacao_obter_zsql(cod_materia=expediente_materia.cod_materia, ind_ult_tramitacao=1)
if len(tramitacao):
tramitacao = tramitacao[0]
if tramitacao.sgl_turno != "":
for turno in [("P","Primeiro"), ("S","Segundo"), ("U","Único"), ("F","Final"), ("L","Suplementar"), ("A","Votação Única em Regime de Urgência"), ("B","1ª Votação"), ("C","2ª e 3ª Votações")]:
if tramitacao.sgl_turno == turno[0]:
dic_expediente_materia["des_turno"] = turno[1]
dic_expediente_materia["des_situacao"] = tramitacao.des_status
if dic_expediente_materia["des_situacao"]==None:
dic_expediente_materia["des_situacao"] = " "
lst_expediente_materia.append(dic_expediente_materia)
# Lista das matérias da Ordem do Dia, incluindo o status da tramitação
lst_votacao=[]
for votacao in context.zsql.votacao_ordem_dia_obter_zsql(dat_ordem=data,cod_sessao_plen=codigo,ind_excluido=0):
# seleciona os detalhes de uma matéria
materia = context.zsql.materia_obter_zsql(cod_materia=votacao.cod_materia)[0]
dic_votacao = {}
dic_votacao["num_ordem"] = votacao.num_ordem
dic_votacao["id_materia"] = materia.sgl_tipo_materia+" - "+materia.des_tipo_materia+" No. "+str(materia.num_ident_basica)+"/"+str(materia.ano_ident_basica)
dic_votacao["txt_ementa"] = materia.txt_ementa
dic_votacao["ordem_observacao"] = votacao.ordem_observacao
dic_votacao["des_numeracao"]=""
numeracao = context.zsql.numeracao_obter_zsql(cod_materia=votacao.cod_materia)
if len(numeracao):
numeracao = numeracao[0]
dic_votacao["des_numeracao"] = str(numeracao.num_materia)+"/"+str(numeracao.ano_materia)
dic_votacao["nom_autor"] = ''
autoria = context.zsql.autoria_obter_zsql(cod_materia=votacao.cod_materia, ind_primeiro_autor=1)
if len(autoria) > 0: # se existe autor
autoria = autoria[0]
autor = context.zsql.autor_obter_zsql(cod_autor=autoria.cod_autor)
if len(autor) > 0:
autor = autor[0]
if autor.des_tipo_autor == "Parlamentar":
parlamentar = context.zsql.parlamentar_obter_zsql(cod_parlamentar=autor.cod_parlamentar)[0]
dic_votacao["nom_autor"] = parlamentar.nom_parlamentar
elif autor.des_tipo_autor == "Comissao":
comissao = context.zsql.comissao_obter_zsql(cod_comissao=autor.cod_comissao)[0]
dic_votacao["nom_autor"] = comissao.nom_comissao
else:
dic_votacao["nom_autor"] = autor.nom_autor
dic_votacao["des_turno"]=""
dic_votacao["des_situacao"] = ""
tramitacao = context.zsql.tramitacao_obter_zsql(cod_materia=votacao.cod_materia, ind_ult_tramitacao=1)
if len(tramitacao):
tramitacao = tramitacao[0]
if tramitacao.sgl_turno != "":
for turno in [("P","Primeiro"), ("S","Segundo"), ("U","Único"), ("L","Suplementar"), ("A","Votação Única em Regime de Urgência"), ("B","1ª Votação"), ("C","2ª e 3ª Votações")]:
if tramitacao.sgl_turno == turno[0]:
dic_votacao["des_turno"] = turno[1]
dic_votacao["des_situacao"] = tramitacao.des_status
if dic_votacao["des_situacao"]==None:
dic_votacao["des_situacao"] = " "
lst_votacao.append(dic_votacao)
# obtém as propriedades da casa legislativa para montar o cabeçalho e o rodapé da página
cabecalho={}
# tenta buscar o logotipo da casa LOGO_CASA
if hasattr(context.sapl_documentos.props_sapl,'logo_casa.gif'):
imagem = context.sapl_documentos.props_sapl['logo_casa.gif'].absolute_url()
else:
imagem = context.imagens.absolute_url() + "/brasao_transp.gif"
#Abaixo é gerado o dic do rodapé da página (linha 7)
casa={}
aux=context.sapl_documentos.props_sapl.propertyItems()
for item in aux:
casa[item[0]]=item[1]
localidade=context.zsql.localidade_obter_zsql(cod_localidade=casa["cod_localidade"])
data_emissao= DateTime().strftime("%d/%m/%Y")
rodape= casa
rodape['data_emissao']= data_emissao
inf_basicas_dic['nom_camara']= casa['nom_casa']
REQUEST=context.REQUEST
for local in context.zsql.localidade_obter_zsql(cod_localidade = casa['cod_localidade']):
rodape['nom_localidade']= " "+local.nom_localidade
rodape['sgl_uf']= local.sgl_uf
# return lst_votacao
sessao=session.id
caminho = context.pdf_pauta_sessao_gerar(rodape, sessao, imagem, inf_basicas_dic, lst_votacao, lst_expediente_materia)
if caminho=='aviso':
return response.redirect('mensagem_emitir_proc')
else:
response.redirect(caminho)
| gpl-3.0 |
dwaynebailey/pootle | pootle/apps/pootle_word/utils.py | 5 | 2811 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import re
import Levenshtein
import translate
from django.utils.functional import cached_property
from pootle.core.delegate import stemmer, stopwords
class Stopwords(object):
@cached_property
def words(self):
ttk_path = translate.__path__[0]
fpath = (
os.path.join(ttk_path, "share", "stoplist-en")
if "share" in os.listdir(ttk_path)
else os.path.join(ttk_path, "..", "share", "stoplist-en"))
words = set()
with open(fpath) as f:
for line in f.read().split("\n"):
if not line:
continue
if line[0] in "<>=@":
words.add(line[1:].strip().lower())
return words
class TextStemmer(object):
def __init__(self, context):
self.context = context
def split(self, words):
return re.split(u"[^\w'-]+", words)
@property
def stopwords(self):
return stopwords.get().words
@property
def tokens(self):
return [
t.lower()
for t
in self.split(self.text)
if (len(t) > 2
and t.lower() not in self.stopwords)]
@property
def text(self):
return self.context.source_f
@property
def stemmer(self):
return stemmer.get()
@property
def stems(self):
return self.get_stems(self.tokens)
def get_stems(self, tokens):
return set(self.stemmer(t) for t in tokens)
class TextComparison(TextStemmer):
@property
def text(self):
return self.context
def jaccard_similarity(self, other):
return (
len(other.stems.intersection(self.stems))
/ float(len(set(other.stems).union(self.stems))))
def levenshtein_distance(self, other):
return (
Levenshtein.distance(self.text, other.text)
/ max(len(self.text), len(other.text)))
def tokens_present(self, other):
return (
len(set(self.tokens).intersection(other.tokens))
/ float(len(other.tokens)))
def stems_present(self, other):
return (
len(set(self.stems).intersection(other.stems))
/ float(len(other.stems)))
def similarity(self, other):
other = self.__class__(other)
return (
(self.jaccard_similarity(other)
+ self.levenshtein_distance(other)
+ self.tokens_present(other)
+ self.stems_present(other))
/ 4)
| gpl-3.0 |
yize/grunt-tps | tasks/lib/python/Lib/python2.7/distutils/command/upload.py | 176 | 7002 | """distutils.command.upload
Implements the Distutils 'upload' subcommand (upload package to PyPI)."""
import os
import socket
import platform
from urllib2 import urlopen, Request, HTTPError
from base64 import standard_b64encode
import urlparse
import cStringIO as StringIO
from hashlib import md5
from distutils.errors import DistutilsOptionError
from distutils.core import PyPIRCCommand
from distutils.spawn import spawn
from distutils import log
class upload(PyPIRCCommand):
description = "upload binary package to PyPI"
user_options = PyPIRCCommand.user_options + [
('sign', 's',
'sign files to upload using gpg'),
('identity=', 'i', 'GPG identity used to sign files'),
]
boolean_options = PyPIRCCommand.boolean_options + ['sign']
def initialize_options(self):
PyPIRCCommand.initialize_options(self)
self.username = ''
self.password = ''
self.show_response = 0
self.sign = False
self.identity = None
def finalize_options(self):
PyPIRCCommand.finalize_options(self)
if self.identity and not self.sign:
raise DistutilsOptionError(
"Must use --sign for --identity to have meaning"
)
config = self._read_pypirc()
if config != {}:
self.username = config['username']
self.password = config['password']
self.repository = config['repository']
self.realm = config['realm']
# getting the password from the distribution
# if previously set by the register command
if not self.password and self.distribution.password:
self.password = self.distribution.password
def run(self):
if not self.distribution.dist_files:
raise DistutilsOptionError("No dist file created in earlier command")
for command, pyversion, filename in self.distribution.dist_files:
self.upload_file(command, pyversion, filename)
def upload_file(self, command, pyversion, filename):
# Makes sure the repository URL is compliant
schema, netloc, url, params, query, fragments = \
urlparse.urlparse(self.repository)
if params or query or fragments:
raise AssertionError("Incompatible url %s" % self.repository)
if schema not in ('http', 'https'):
raise AssertionError("unsupported schema " + schema)
# Sign if requested
if self.sign:
gpg_args = ["gpg", "--detach-sign", "-a", filename]
if self.identity:
gpg_args[2:2] = ["--local-user", self.identity]
spawn(gpg_args,
dry_run=self.dry_run)
# Fill in the data - send all the meta-data in case we need to
# register a new release
f = open(filename,'rb')
try:
content = f.read()
finally:
f.close()
meta = self.distribution.metadata
data = {
# action
':action': 'file_upload',
'protcol_version': '1',
# identify release
'name': meta.get_name(),
'version': meta.get_version(),
# file content
'content': (os.path.basename(filename),content),
'filetype': command,
'pyversion': pyversion,
'md5_digest': md5(content).hexdigest(),
# additional meta-data
'metadata_version' : '1.0',
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
comment = ''
if command == 'bdist_rpm':
dist, version, id = platform.dist()
if dist:
comment = 'built for %s %s' % (dist, version)
elif command == 'bdist_dumb':
comment = 'built for %s' % platform.platform(terse=1)
data['comment'] = comment
if self.sign:
data['gpg_signature'] = (os.path.basename(filename) + ".asc",
open(filename+".asc").read())
# set up the authentication
auth = "Basic " + standard_b64encode(self.username + ":" +
self.password)
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if not isinstance(value, list):
value = [value]
for value in value:
if isinstance(value, tuple):
fn = ';filename="%s"' % value[0]
value = value[1]
else:
fn = ""
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write(fn)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO)
# build the Request
headers = {'Content-type':
'multipart/form-data; boundary=%s' % boundary,
'Content-length': str(len(body)),
'Authorization': auth}
request = Request(self.repository, data=body,
headers=headers)
# send the data
try:
result = urlopen(request)
status = result.getcode()
reason = result.msg
if self.show_response:
msg = '\n'.join(('-' * 75, r.read(), '-' * 75))
self.announce(msg, log.INFO)
except socket.error, e:
self.announce(str(e), log.ERROR)
return
except HTTPError, e:
status = e.code
reason = e.msg
if status == 200:
self.announce('Server response (%s): %s' % (status, reason),
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (status, reason),
log.ERROR)
| mit |
fitermay/intellij-community | python/lib/Lib/_threading_local.py | 91 | 6946 | """Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that almost all platforms do have support for
# locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest on most boxes.
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = '_local__key', 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if args or kw and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
currentThread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = currentThread().__dict__.get(key)
if d is None:
d = {}
currentThread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__(self):
import threading
key = object.__getattribute__(self, '_local__key')
try:
threads = list(threading.enumerate())
except:
# If enumerate fails, as it seems to do during
# shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up.
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace.
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
from threading import currentThread, RLock
| apache-2.0 |
virgree/odoo | addons/l10n_uy/__openerp__.py | 260 | 1807 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Openerp.uy <[email protected]>
# Proyecto de Localización de OperERP para Uruguay
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Uruguay - Chart of Accounts',
'version': '0.1',
'author': 'Uruguay l10n Team & Guillem Barba',
'category': 'Localization/Account Charts',
'website': 'https://launchpad.net/openerp-uruguay',
'description': """
General Chart of Accounts.
==========================
Provide Templates for Chart of Accounts, Taxes for Uruguay.
""",
'license': 'AGPL-3',
'depends': ['account'],
'data': [
'account_types.xml',
'taxes_code_template.xml',
'account_chart_template.xml',
'taxes_template.xml',
'l10n_uy_wizard.xml',
],
'demo': [],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
simonwydooghe/ansible | test/units/modules/storage/netapp/test_na_ontap_nvme_namespace.py | 48 | 7361 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit tests ONTAP Ansible module: na_ontap_nvme_namespace'''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_nvme_namespace \
import NetAppONTAPNVMENamespace as my_module
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None):
''' save arguments '''
self.type = kind
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'namespace':
xml = self.build_namespace_info()
elif self.type == 'quota_fail':
raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
self.xml_out = xml
return xml
@staticmethod
def build_namespace_info():
''' build xml data for namespace-info '''
xml = netapp_utils.zapi.NaElement('xml')
data = {'num-records': 2,
'attributes-list': [{'nvme-namespace-info': {'path': 'abcd/vol'}},
{'nvme-namespace-info': {'path': 'xyz/vol'}}]}
xml.translate_struct(data)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.onbox = False
def set_default_args(self):
if self.onbox:
hostname = '10.193.75.3'
username = 'admin'
password = 'netapp1!'
vserver = 'ansible'
ostype = 'linux'
path = 'abcd/vol'
size = 20
else:
hostname = 'hostname'
username = 'username'
password = 'password'
vserver = 'vserver'
ostype = 'linux'
path = 'abcd/vol'
size = 20
return dict({
'hostname': hostname,
'username': username,
'password': password,
'ostype': ostype,
'vserver': vserver,
'path': path,
'size': size
})
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
my_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_ensure_get_called(self):
''' test get_namespace() for non-existent namespace'''
set_module_args(self.set_default_args())
my_obj = my_module()
my_obj.server = self.server
assert my_obj.get_namespace() is None
def test_ensure_get_called_existing(self):
''' test get_namespace() for existing namespace'''
set_module_args(self.set_default_args())
my_obj = my_module()
my_obj.server = MockONTAPConnection(kind='namespace')
assert my_obj.get_namespace()
@patch('ansible.modules.storage.netapp.na_ontap_nvme_namespace.NetAppONTAPNVMENamespace.create_namespace')
def test_successful_create(self, create_namespace):
''' creating namespace and testing idempotency '''
set_module_args(self.set_default_args())
my_obj = my_module()
if not self.onbox:
my_obj.server = self.server
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert exc.value.args[0]['changed']
create_namespace.assert_called_with()
# to reset na_helper from remembering the previous 'changed' value
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('namespace')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert not exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_nvme_namespace.NetAppONTAPNVMENamespace.delete_namespace')
def test_successful_delete(self, delete_namespace):
''' deleting namespace and testing idempotency '''
data = self.set_default_args()
data['state'] = 'absent'
set_module_args(data)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('namespace')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert exc.value.args[0]['changed']
delete_namespace.assert_called_with()
# to reset na_helper from remembering the previous 'changed' value
my_obj = my_module()
if not self.onbox:
my_obj.server = self.server
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert not exc.value.args[0]['changed']
def test_if_all_methods_catch_exception(self):
module_args = {}
module_args.update(self.set_default_args())
set_module_args(module_args)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('quota_fail')
with pytest.raises(AnsibleFailJson) as exc:
my_obj.get_namespace()
assert 'Error fetching namespace info:' in exc.value.args[0]['msg']
with pytest.raises(AnsibleFailJson) as exc:
my_obj.create_namespace()
assert 'Error creating namespace for path' in exc.value.args[0]['msg']
with pytest.raises(AnsibleFailJson) as exc:
my_obj.delete_namespace()
assert 'Error deleting namespace for path' in exc.value.args[0]['msg']
| gpl-3.0 |
icereval/osf.io | api/requests/serializers.py | 1 | 3573 | from django.db import IntegrityError
from rest_framework import exceptions
from rest_framework import serializers as ser
from api.base.exceptions import Conflict
from api.base.utils import absolute_reverse, get_user_auth
from api.base.serializers import JSONAPISerializer, LinksField, VersionedDateTimeField, RelationshipField
from osf.models import NodeRequest
from osf.utils.workflows import DefaultStates, RequestTypes
class NodeRequestSerializer(JSONAPISerializer):
class Meta:
type_ = 'node-requests'
filterable_fields = frozenset([
'creator',
'request_type',
'machine_state',
'created',
'id'
])
id = ser.CharField(source='_id', read_only=True)
request_type = ser.ChoiceField(read_only=True, required=False, choices=RequestTypes.choices())
machine_state = ser.ChoiceField(read_only=True, required=False, choices=DefaultStates.choices())
comment = ser.CharField(required=False, allow_blank=True, max_length=65535)
created = VersionedDateTimeField(read_only=True)
modified = VersionedDateTimeField(read_only=True)
date_last_transitioned = VersionedDateTimeField(read_only=True)
target = RelationshipField(
read_only=True,
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<target._id>'},
filter_key='target___id',
)
creator = RelationshipField(
read_only=True,
related_view='users:user-detail',
related_view_kwargs={'user_id': '<creator._id>'},
filter_key='creator___id',
)
links = LinksField({
'self': 'get_absolute_url',
'target': 'get_target_url'
})
def get_absolute_url(self, obj):
return absolute_reverse('requests:node-request-detail', kwargs={'request_id': obj._id, 'version': self.context['request'].parser_context['kwargs']['version']})
def get_target_url(self, obj):
return absolute_reverse('nodes:node-detail', kwargs={'node_id': obj.target._id, 'version': self.context['request'].parser_context['kwargs']['version']})
def create(self, validated_data):
raise NotImplementedError()
class NodeRequestCreateSerializer(NodeRequestSerializer):
request_type = ser.ChoiceField(required=True, choices=RequestTypes.choices())
def create(self, validated_data):
auth = get_user_auth(self.context['request'])
if not auth.user:
raise exceptions.PermissionDenied
try:
node = self.context['view'].get_node()
except exceptions.PermissionDenied:
node = self.context['view'].get_node(check_object_permissions=False)
if auth.user in node.contributors:
raise exceptions.PermissionDenied('You cannot request access to a node you contribute to.')
raise
comment = validated_data.pop('comment', '')
request_type = validated_data.pop('request_type', None)
if not request_type:
raise exceptions.ValidationError('You must specify a valid request_type.')
try:
node_request = NodeRequest.objects.create(
target=node,
creator=auth.user,
comment=comment,
machine_state=DefaultStates.INITIAL.value,
request_type=request_type
)
node_request.save()
except IntegrityError:
raise Conflict('Users may not have more than one {} request per node.'.format(request_type))
node_request.run_submit(auth.user)
return node_request
| apache-2.0 |
shsingh/ansible | lib/ansible/modules/database/postgresql/postgresql_ext.py | 2 | 13576 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_ext
short_description: Add or remove PostgreSQL extensions from a database
description:
- Add or remove PostgreSQL extensions from a database.
version_added: '1.9'
options:
name:
description:
- Name of the extension to add or remove.
required: true
type: str
aliases:
- ext
db:
description:
- Name of the database to add or remove the extension to/from.
required: true
type: str
aliases:
- login_db
schema:
description:
- Name of the schema to add the extension to.
version_added: '2.8'
type: str
session_role:
description:
- Switch to session_role after connecting.
- The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
type: str
version_added: '2.8'
state:
description:
- The database extension state.
default: present
choices: [ absent, present ]
type: str
cascade:
description:
- Automatically install/remove any extensions that this extension depends on
that are not already installed/removed (supported since PostgreSQL 9.6).
type: bool
default: no
version_added: '2.8'
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
type: str
version_added: '2.8'
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
version_added: '2.8'
ca_cert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
type: str
aliases: [ ssl_rootcert ]
version_added: '2.8'
version:
description:
- Extension version to add or update to. Has effect with I(state=present) only.
- If not specified, the latest extension version will be created.
- It can't downgrade an extension version.
When version downgrade is needed, remove the extension and create new one with appropriate version.
- Set I(version=latest) to update the extension to the latest available version.
type: str
version_added: '2.9'
seealso:
- name: PostgreSQL extensions
description: General information about PostgreSQL extensions.
link: https://www.postgresql.org/docs/current/external-extensions.html
- name: CREATE EXTENSION reference
description: Complete reference of the CREATE EXTENSION command documentation.
link: https://www.postgresql.org/docs/current/sql-createextension.html
- name: ALTER EXTENSION reference
description: Complete reference of the ALTER EXTENSION command documentation.
link: https://www.postgresql.org/docs/current/sql-alterextension.html
- name: DROP EXTENSION reference
description: Complete reference of the DROP EXTENSION command documentation.
link: https://www.postgresql.org/docs/current/sql-droppublication.html
notes:
- The default authentication assumes that you are either logging in as
or sudo'ing to the C(postgres) account on the host.
- This module uses I(psycopg2), a Python PostgreSQL database adapter.
- You must ensure that C(psycopg2) is installed on the host before using this module.
- If the remote host is the PostgreSQL server (which is the default case),
then PostgreSQL must also be installed on the remote host.
- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev),
and C(python-psycopg2) packages on the remote host before using this module.
requirements: [ psycopg2 ]
author:
- Daniel Schep (@dschep)
- Thomas O'Donnell (@andytom)
- Sandro Santilli (@strk)
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Adds postgis extension to the database acme in the schema foo
postgresql_ext:
name: postgis
db: acme
schema: foo
- name: Removes postgis extension to the database acme
postgresql_ext:
name: postgis
db: acme
state: absent
- name: Adds earthdistance extension to the database template1 cascade
postgresql_ext:
name: earthdistance
db: template1
cascade: true
# In the example below, if earthdistance extension is installed,
# it will be removed too because it depends on cube:
- name: Removes cube extension from the database acme cascade
postgresql_ext:
name: cube
db: acme
cascade: yes
state: absent
- name: Create extension foo of version 1.2 or update it if it's already created
postgresql_ext:
db: acme
name: foo
version: 1.2
- name: Assuming extension foo is created, update it to the latest version
postgresql_ext:
db: acme
name: foo
version: latest
'''
RETURN = r'''
query:
description: List of executed queries.
returned: always
type: list
sample: ["DROP EXTENSION \"acme\""]
'''
import traceback
from distutils.version import LooseVersion
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.postgres import (
connect_to_db,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils._text import to_native
executed_queries = []
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def ext_exists(cursor, ext):
query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
cursor.execute(query, {'ext': ext})
return cursor.rowcount == 1
def ext_delete(cursor, ext, cascade):
if ext_exists(cursor, ext):
query = "DROP EXTENSION \"%s\"" % ext
if cascade:
query += " CASCADE"
cursor.execute(query)
executed_queries.append(query)
return True
else:
return False
def ext_update_version(cursor, ext, version):
"""Update extension version.
Return True if success.
Args:
cursor (cursor) -- cursor object of psycopg2 library
ext (str) -- extension name
version (str) -- extension version
"""
if version != 'latest':
query = ("ALTER EXTENSION \"%s\"" % ext)
cursor.execute(query + " UPDATE TO %(ver)s", {'ver': version})
executed_queries.append(cursor.mogrify(query + " UPDATE TO %(ver)s", {'ver': version}))
else:
query = ("ALTER EXTENSION \"%s\" UPDATE" % ext)
cursor.execute(query)
executed_queries.append(query)
return True
def ext_create(cursor, ext, schema, cascade, version):
query = "CREATE EXTENSION \"%s\"" % ext
if schema:
query += " WITH SCHEMA \"%s\"" % schema
if version:
query += " VERSION %(ver)s"
if cascade:
query += " CASCADE"
if version:
cursor.execute(query, {'ver': version})
executed_queries.append(cursor.mogrify(query, {'ver': version}))
else:
cursor.execute(query)
executed_queries.append(query)
return True
def ext_get_versions(cursor, ext):
"""
Get the current created extension version and available versions.
Return tuple (current_version, [list of available versions]).
Note: the list of available versions contains only versions
that higher than the current created version.
If the extension is not created, this list will contain all
available versions.
Args:
cursor (cursor) -- cursor object of psycopg2 library
ext (str) -- extension name
"""
# 1. Get the current extension version:
query = ("SELECT extversion FROM pg_catalog.pg_extension "
"WHERE extname = %(ext)s")
current_version = '0'
cursor.execute(query, {'ext': ext})
res = cursor.fetchone()
if res:
current_version = res[0]
# 2. Get available versions:
query = ("SELECT version FROM pg_available_extension_versions "
"WHERE name = %(ext)s")
cursor.execute(query, {'ext': ext})
res = cursor.fetchall()
available_versions = []
if res:
# Make the list of available versions:
for line in res:
if LooseVersion(line[0]) > LooseVersion(current_version):
available_versions.append(line['version'])
if current_version == '0':
current_version = False
return (current_version, available_versions)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type="str", required=True, aliases=["login_db"]),
ext=dict(type="str", required=True, aliases=["name"]),
schema=dict(type="str"),
state=dict(type="str", default="present", choices=["absent", "present"]),
cascade=dict(type="bool", default=False),
session_role=dict(type="str"),
version=dict(type="str"),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
ext = module.params["ext"]
schema = module.params["schema"]
state = module.params["state"]
cascade = module.params["cascade"]
version = module.params["version"]
changed = False
if version and state == 'absent':
module.warn("Parameter version is ignored when state=absent")
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
try:
# Get extension info and available versions:
curr_version, available_versions = ext_get_versions(cursor, ext)
if state == "present":
if version == 'latest':
if available_versions:
version = available_versions[-1]
else:
version = ''
if version:
# If the specific version is passed and it is not available for update:
if version not in available_versions:
if not curr_version:
module.fail_json(msg="Passed version '%s' is not available" % version)
elif LooseVersion(curr_version) == LooseVersion(version):
changed = False
else:
module.fail_json(msg="Passed version '%s' is lower than "
"the current created version '%s' or "
"the passed version is not available" % (version, curr_version))
# If the specific version is passed and it is higher that the current version:
if curr_version and version:
if LooseVersion(curr_version) < LooseVersion(version):
if module.check_mode:
changed = True
else:
changed = ext_update_version(cursor, ext, version)
# If the specific version is passed and it is created now:
if curr_version == version:
changed = False
# If the ext doesn't exist and installed:
elif not curr_version and available_versions:
if module.check_mode:
changed = True
else:
changed = ext_create(cursor, ext, schema, cascade, version)
# If version is not passed:
else:
if not curr_version:
# If the ext doesn't exist and it's installed:
if available_versions:
if module.check_mode:
changed = True
else:
changed = ext_create(cursor, ext, schema, cascade, version)
# If the ext doesn't exist and not installed:
else:
module.fail_json(msg="Extension %s is not installed" % ext)
elif state == "absent":
if curr_version:
if module.check_mode:
changed = True
else:
changed = ext_delete(cursor, ext, cascade)
else:
changed = False
except Exception as e:
db_connection.close()
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
db_connection.close()
module.exit_json(changed=changed, db=module.params["db"], ext=ext, queries=executed_queries)
if __name__ == '__main__':
main()
| gpl-3.0 |
DayGitH/Python-Challenges | DailyProgrammer/DP20160323B.py | 1 | 5877 | """
[2016-03-23] Challenge #259 [Intermediate] Mahjong Hands
https://www.reddit.com/r/dailyprogrammer/comments/4bmdwz/20160323_challenge_259_intermediate_mahjong_hands/
# Description
You are the biggest, baddest mahjong player around. Your enemies tremble at your presence on the battlefield, and you
can barely walk ten steps before a fan begs you for an autograph.
However, you have a dark secret that would ruin you if it ever came to light. You're terrible at determining whether a
hand is a winning hand. For now, you've been able to bluff and bluster your way, but you know that one day you won't be
able to get away with it.
As such, you've decided to write a program to assist you!
## Further Details
Mahjong (not to be confused with [mahjong solitaire](http://en.wikipedia.org/wiki/Mahjong_solitaire)) is a game where
hands are composed from combinations of tiles. There are a number of variants of mahjong, but for this challenge, we
will consider a simplified variant of Japanese Mahjong which is also known as Riichi Mahjong.
## Basic Version
There are three suits in this variant, "Bamboo", "Circle" and "Character". Every tile that belongs to these suits has a
value that ranges from 1 - 9.
To complete a hand, tiles are organised into groups. If every tile in a hand belongs to a single group (and each tile
can only be used once), the hand is a winning hand.
For now, we shall consider the groups "Pair", "Set" and "Sequence". They are composed as follows:
Pair - Two tiles with the same suit and value
Set - Three tiles with the same suit and value
Sequence - Three tiles with the same suit, and which increment in value, such as "Circle 2, Circle 3, Circle 4". There
is no value wrapping so "Circle 9, Circle 1, Circle 2" would not be considered valid.
A hand is composed of 14 tiles.
## Bonus 1 - Adding Quads
There is actually a fourth group called a "Quad". It is just like a pair and a set, except it is composed of four tiles.
What makes this group special is that a hand containing quads will actually have a hand larger than 14, 1 for every
quad. This is fine, as long as there is *1, and only 1 pair*.
## Bonus 2 - Adding Honour Tiles
In addition to the tiles belonging to the three suits, there are 7 additional tiles. These tiles have no value, and are
collectively known as "honour" tiles.
As they have no value, they cannot be members of a sequence. Furthermore, they can only be part of a set or pair with
tiles that are exactly the same. For example, "Red Dragon, Red Dragon, Red Dragon" would be a valid set, but "Red
Dragon, Green Dragon, Red Dragon" would not.
These additional tiles are:
* Green Dragon
* Red Dragon
* White Dragon
* North Wind
* East Wind
* South Wind
* West Wind
## Bonus 3 - Seven Pairs
There are a number of special hands that are an exception to the above rules. One such hand is "Seven Pairs". As the
name suggests, it is a hand composed of seven pairs.
# Formal Inputs & Outputs
## Input description
### Basic
You will be provided with N on a single line, followed by N lines of the following format:
<tile suit>,<value>
### Bonus 2
In addition, the lines may be of the format:
<honour tile>
## Output description
You should output whether the hand is a winning hand or not.
# Sample Inputs and Outputs
## Sample Input (Standard)
14
Circle,4
Circle,5
Circle,6
Bamboo,1
Bamboo,2
Bamboo,3
Character,2
Character,2
Character,2
Circle,1
Circle,1
Bamboo,7
Bamboo,8
Bamboo,9
## Sample Output (Standard)
Winning hand
## Sample Input (Standard)
14
Circle,4
Bamboo,1
Circle,5
Bamboo,2
Character,2
Bamboo,3
Character,2
Circle,6
Character,2
Circle,1
Bamboo,8
Circle,1
Bamboo,7
Bamboo,9
## Sample Output (Standard)
Winning hand
## Sample Input (Standard)
14
Circle,4
Circle,5
Circle,6
Circle,4
Circle,5
Circle,6
Circle,1
Circle,1
Bamboo,7
Bamboo,8
Bamboo,9
Circle,4
Circle,5
Circle,6
## Sample Output (Standard)
Winning hand
## Sample Input (Bonus 1)
15
Circle,4
Circle,5
Circle,6
Bamboo,1
Bamboo,2
Bamboo,3
Character,2
Character,2
Character,2
Character,2
Circle,1
Circle,1
Bamboo,7
Bamboo,8
Bamboo,9
## Sample Output (Bonus 1)
Winning hand
## Sample Input (Bonus 1)
16
Circle,4
Circle,5
Circle,6
Bamboo,1
Bamboo,2
Bamboo,3
Character,2
Character,2
Character,2
Character,2
Circle,1
Circle,1
Circle,1
Bamboo,7
Bamboo,8
Bamboo,9
## Sample Output (Bonus 1)
Not a winning hand
## Sample Input (Bonus 2)
14
Circle,4
Circle,5
Circle,6
Bamboo,1
Bamboo,2
Bamboo,3
Red Dragon
Red Dragon
Red Dragon
Circle,1
Circle,1
Bamboo,7
Bamboo,8
Bamboo,9
## Sample Output (Bonus 2)
Winning hand
## Sample Input (Bonus 2)
14
Circle,4
Circle,5
Circle,6
Bamboo,1
Bamboo,2
Bamboo,3
Red Dragon
Green Dragon
White Dragon
Circle,1
Circle,1
Bamboo,7
Bamboo,8
Bamboo,9
## Sample Output (Bonus 2)
Not a winning hand
## Sample Input (Bonus 3)
14
Circle,4
Circle,4
Character,5
Character,5
Bamboo,5
Bamboo,5
Circle,5
Circle,5
Circle,7
Circle,7
Circle,9
Circle,9
Circle,9
Circle,9
## Sample Output (Bonus 3)
Winning hand
# Notes
None of the bonus components depend on each other, and can be implemented in any order. The test cases do not presume
completion of earlier bonus components. The order is just the recommended implementation order.
Many thanks to Redditor /u/oketa for this submission to /r/dailyprogrammer_ideas. If you have any ideas, please submit
them there!
"""
def main():
pass
if __name__ == "__main__":
main()
| mit |
waldocarter/p2pool | nattraverso/pynupnp/soap.py | 288 | 3547 | """
This module is a SOAP client using twisted's deferreds.
It uses the SOAPpy package.
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{[email protected]<mailto:[email protected]>}
@version: 0.1.0
"""
__revision__ = "$id"
import SOAPpy, logging
from SOAPpy.Config import Config
from twisted.web import client, error
#General config
Config.typed = False
class SoapError(Exception):
"""
This is a SOAP error message, not an HTTP error message.
The content of this error is a SOAPpy structure representing the
SOAP error message.
"""
pass
class SoapProxy:
"""
Proxy for an url to which we send SOAP rpc calls.
"""
def __init__(self, url, prefix):
"""
Init the proxy, it will connect to the given url, using the
given soap namespace.
@param url: The url of the remote host to call
@param prefix: The namespace prefix to use, eg.
'urn:schemas-upnp-org:service:WANIPConnection:1'
"""
logging.debug("Soap Proxy: '%s', prefix: '%s'", url, prefix)
self._url = url
self._prefix = prefix
def call(self, method, **kwargs):
"""
Call the given remote method with the given arguments, as keywords.
Returns a deferred, called with SOAPpy structure representing
the soap response.
@param method: The method name to call, eg. 'GetExternalIP'
@param kwargs: The parameters of the call, as keywords
@return: A deferred called with the external ip address of this host
@rtype: L{twisted.internet.defer.Deferred}
"""
payload = SOAPpy.buildSOAP(method=method, config=Config, namespace=self._prefix, kw=kwargs)
# Here begins the nasty hack
payload = payload.replace(
# Upnp wants s: instead of SOAP-ENV
'SOAP-ENV','s').replace(
# Doesn't seem to like these encoding stuff
'xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"', '').replace(
'SOAP-ENC:root="1"', '').replace(
# And it wants u: instead of ns1 namespace for arguments..
'ns1','u')
logging.debug("SOAP Payload:\n%s", payload)
return client.getPage(self._url, postdata=payload, method="POST",
headers={'content-type': 'text/xml', 'SOAPACTION': '%s#%s' % (self._prefix, method)}
).addCallbacks(self._got_page, self._got_error)
def _got_page(self, result):
"""
The http POST command was successful, we parse the SOAP
answer, and return it.
@param result: the xml content
"""
parsed = SOAPpy.parseSOAPRPC(result)
logging.debug("SOAP Answer:\n%s", result)
logging.debug("SOAP Parsed Answer: %r", parsed)
return parsed
def _got_error(self, res):
"""
The HTTP POST command did not succeed, depending on the error type:
- it's a SOAP error, we parse it and return a L{SoapError}.
- it's another type of error (http, other), we raise it as is
"""
logging.debug("SOAP Error:\n%s", res)
if isinstance(res.value, error.Error):
try:
logging.debug("SOAP Error content:\n%s", res.value.response)
raise SoapError(SOAPpy.parseSOAPRPC(res.value.response)["detail"])
except:
raise
raise Exception(res.value)
| gpl-3.0 |
tiagofrepereira2012/tensorflow | tensorflow/python/debug/cli/readline_ui_test.py | 81 | 5646 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the readline-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tempfile
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import readline_ui
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
class MockReadlineUI(readline_ui.ReadlineUI):
"""Test subclass of ReadlineUI that bypasses terminal manipulations."""
def __init__(self, on_ui_exit=None, command_sequence=None):
readline_ui.ReadlineUI.__init__(self, on_ui_exit=on_ui_exit)
self._command_sequence = command_sequence
self._command_counter = 0
self.observers = {"screen_outputs": []}
def _get_user_command(self):
command = self._command_sequence[self._command_counter]
self._command_counter += 1
return command
def _display_output(self, screen_output):
self.observers["screen_outputs"].append(screen_output)
class CursesTest(test_util.TensorFlowTestCase):
def _babble(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Do babble.", usage=argparse.SUPPRESS)
ap.add_argument(
"-n",
"--num_times",
dest="num_times",
type=int,
default=60,
help="How many times to babble")
parsed = ap.parse_args(args)
lines = ["bar"] * parsed.num_times
return debugger_cli_common.RichTextLines(lines)
def testUIFactoryCreatesReadlineUI(self):
ui = ui_factory.get_ui("readline")
self.assertIsInstance(ui, readline_ui.ReadlineUI)
def testUIFactoryRaisesExceptionOnInvalidUIType(self):
with self.assertRaisesRegexp(ValueError, "Invalid ui_type: 'foobar'"):
ui_factory.get_ui("foobar")
def testUIFactoryRaisesExceptionOnInvalidUITypeGivenAvailable(self):
with self.assertRaisesRegexp(ValueError, "Invalid ui_type: 'readline'"):
ui_factory.get_ui("readline", available_ui_types=["curses"])
def testRunUIExitImmediately(self):
"""Make sure that the UI can exit properly after launch."""
ui = MockReadlineUI(command_sequence=["exit"])
ui.run_ui()
# No screen output should have happened.
self.assertEqual(0, len(ui.observers["screen_outputs"]))
def testRunUIEmptyCommand(self):
"""Issue an empty command then exit."""
ui = MockReadlineUI(command_sequence=["", "exit"])
ui.run_ui()
self.assertEqual(1, len(ui.observers["screen_outputs"]))
def testRunUIWithInitCmd(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=["exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui(init_command="babble")
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(1, len(screen_outputs))
self.assertEqual(["bar"] * 60, screen_outputs[0].lines)
def testRunUIWithValidUsersCommands(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=["babble -n 3", "babble -n 6", "exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(2, len(screen_outputs))
self.assertEqual(["bar"] * 3, screen_outputs[0].lines)
self.assertEqual(["bar"] * 6, screen_outputs[1].lines)
def testRunUIWithInvalidUsersCommands(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=["babble -n 3", "wobble", "exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(2, len(screen_outputs))
self.assertEqual(["bar"] * 3, screen_outputs[0].lines)
self.assertEqual(["ERROR: Invalid command prefix \"wobble\""],
screen_outputs[1].lines)
def testRunUIWithOnUIExitCallback(self):
observer = {"callback_invoked": False}
def callback_for_test():
observer["callback_invoked"] = True
ui = MockReadlineUI(on_ui_exit=callback_for_test, command_sequence=["exit"])
self.assertFalse(observer["callback_invoked"])
ui.run_ui()
self.assertEqual(0, len(ui.observers["screen_outputs"]))
self.assertTrue(observer["callback_invoked"])
def testIncompleteRedirectWorks(self):
output_path = tempfile.mktemp()
ui = MockReadlineUI(
command_sequence=["babble -n 2 > %s" % output_path, "exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(1, len(screen_outputs))
self.assertEqual(["bar"] * 2, screen_outputs[0].lines)
with gfile.Open(output_path, "r") as f:
self.assertEqual("bar\nbar\n", f.read())
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
RaRe-Technologies/gensim | gensim/test/test_lee.py | 5 | 4277 | #!/usr/bin/env python
# encoding: utf-8
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated test to reproduce the results of Lee et al. (2005)
Lee et al. (2005) compares different models for semantic
similarity and verifies the results with similarity judgements from humans.
As a validation of the gensim implementation we reproduced the results
of Lee et al. (2005) in this test.
Many thanks to Michael D. Lee ([email protected]) who provideded us
with his corpus and similarity data.
If you need to reference this dataset, please cite:
Lee, M., Pincombe, B., & Welsh, M. (2005).
An empirical evaluation of models of text document similarity.
Proceedings of the 27th Annual Conference of the Cognitive Science Society
"""
from __future__ import with_statement
import logging
import os.path
import unittest
from functools import partial
import numpy as np
from gensim import corpora, models, utils, matutils
from gensim.parsing.preprocessing import preprocess_documents, preprocess_string, DEFAULT_FILTERS
bg_corpus = None
corpus = None
human_sim_vector = None
class TestLeeTest(unittest.TestCase):
def setUp(self):
"""setup lee test corpora"""
global bg_corpus, corpus, human_sim_vector, bg_corpus2, corpus2
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
bg_corpus_file = 'lee_background.cor'
corpus_file = 'lee.cor'
sim_file = 'similarities0-1.txt'
# read in the corpora
latin1 = partial(utils.to_unicode, encoding='latin1')
with utils.open(os.path.join(pre_path, bg_corpus_file), 'rb') as f:
bg_corpus = preprocess_documents(latin1(line) for line in f)
with utils.open(os.path.join(pre_path, corpus_file), 'rb') as f:
corpus = preprocess_documents(latin1(line) for line in f)
with utils.open(os.path.join(pre_path, bg_corpus_file), 'rb') as f:
bg_corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:-1]) for s in f]
with utils.open(os.path.join(pre_path, corpus_file), 'rb') as f:
corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:-1]) for s in f]
# read the human similarity data
sim_matrix = np.loadtxt(os.path.join(pre_path, sim_file))
sim_m_size = np.shape(sim_matrix)[0]
human_sim_vector = sim_matrix[np.triu_indices(sim_m_size, 1)]
def test_corpus(self):
"""availability and integrity of corpus"""
documents_in_bg_corpus = 300
documents_in_corpus = 50
len_sim_vector = 1225
self.assertEqual(len(bg_corpus), documents_in_bg_corpus)
self.assertEqual(len(corpus), documents_in_corpus)
self.assertEqual(len(human_sim_vector), len_sim_vector)
def test_lee(self):
"""correlation with human data > 0.6
(this is the value which was achieved in the original paper)
"""
global bg_corpus, corpus
# create a dictionary and corpus (bag of words)
dictionary = corpora.Dictionary(bg_corpus)
bg_corpus = [dictionary.doc2bow(text) for text in bg_corpus]
corpus = [dictionary.doc2bow(text) for text in corpus]
# transform the bag of words with log_entropy normalization
log_ent = models.LogEntropyModel(bg_corpus)
bg_corpus_ent = log_ent[bg_corpus]
# initialize an LSI transformation from background corpus
lsi = models.LsiModel(bg_corpus_ent, id2word=dictionary, num_topics=200)
# transform small corpus to lsi bow->log_ent->fold-in-lsi
corpus_lsi = lsi[log_ent[corpus]]
# compute pairwise similarity matrix and extract upper triangular
res = np.zeros((len(corpus), len(corpus)))
for i, par1 in enumerate(corpus_lsi):
for j, par2 in enumerate(corpus_lsi):
res[i, j] = matutils.cossim(par1, par2)
flat = res[np.triu_indices(len(corpus), 1)]
cor = np.corrcoef(flat, human_sim_vector)[0, 1]
logging.info("LSI correlation coefficient is %s", cor)
self.assertTrue(cor > 0.6)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| lgpl-2.1 |
vodik/pytest | testing/acceptance_test.py | 4 | 23849 | import sys
import _pytest._code
import py
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR
class TestGeneralUsage:
def test_config_error(self, testdir):
testdir.makeconftest("""
def pytest_configure(config):
import pytest
raise pytest.UsageError("hello")
""")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
result.stderr.fnmatch_lines([
'*ERROR: hello'
])
def test_root_conftest_syntax_error(self, testdir):
testdir.makepyfile(conftest="raise SyntaxError\n")
result = testdir.runpytest()
result.stderr.fnmatch_lines(["*raise SyntaxError*"])
assert result.ret != 0
def test_early_hook_error_issue38_1(self, testdir):
testdir.makeconftest("""
def pytest_sessionstart():
0 / 0
""")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines([
'*INTERNALERROR*File*conftest.py*line 2*',
'*0 / 0*',
])
result = testdir.runpytest(testdir.tmpdir, "--fulltrace")
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines([
'*INTERNALERROR*def pytest_sessionstart():*',
'*INTERNALERROR*0 / 0*',
])
def test_early_hook_configure_error_issue38(self, testdir):
testdir.makeconftest("""
def pytest_configure():
0 / 0
""")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# here we get it on stderr
result.stderr.fnmatch_lines([
'*INTERNALERROR*File*conftest.py*line 2*',
'*0 / 0*',
])
def test_file_not_found(self, testdir):
result = testdir.runpytest("asd")
assert result.ret != 0
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
def test_file_not_found_unconfigure_issue143(self, testdir):
testdir.makeconftest("""
def pytest_configure():
print("---configure")
def pytest_unconfigure():
print("---unconfigure")
""")
result = testdir.runpytest("-s", "asd")
assert result.ret == 4 # EXIT_USAGEERROR
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
result.stdout.fnmatch_lines([
"*---configure",
"*---unconfigure",
])
def test_config_preparse_plugin_option(self, testdir):
testdir.makepyfile(pytest_xyz="""
def pytest_addoption(parser):
parser.addoption("--xyz", dest="xyz", action="store")
""")
testdir.makepyfile(test_one="""
def test_option(pytestconfig):
assert pytestconfig.option.xyz == "123"
""")
result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines([
'*1 passed*',
])
def test_assertion_magic(self, testdir):
p = testdir.makepyfile("""
def test_this():
x = 0
assert x
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"> assert x",
"E assert 0",
])
assert result.ret == 1
def test_nested_import_error(self, testdir):
p = testdir.makepyfile("""
import import_fails
def test_this():
assert import_fails.a == 1
""")
testdir.makepyfile(import_fails="import does_not_work")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
#XXX on jython this fails: "> import import_fails",
"E ImportError: No module named *does_not_work*",
])
assert result.ret == 1
def test_not_collectable_arguments(self, testdir):
p1 = testdir.makepyfile("")
p2 = testdir.makefile(".pyc", "123")
result = testdir.runpytest(p1, p2)
assert result.ret
result.stderr.fnmatch_lines([
"*ERROR: not found:*%s" %(p2.basename,)
])
def test_issue486_better_reporting_on_conftest_load_failure(self, testdir):
testdir.makepyfile("")
testdir.makeconftest("import qwerty")
result = testdir.runpytest("--help")
result.stdout.fnmatch_lines("""
*--version*
*warning*conftest.py*
""")
result = testdir.runpytest()
result.stderr.fnmatch_lines("""
*ERROR*could not load*conftest.py*
""")
def test_early_skip(self, testdir):
testdir.mkdir("xyz")
testdir.makeconftest("""
import pytest
def pytest_collect_directory():
pytest.skip("early")
""")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines([
"*1 skip*"
])
def test_issue88_initial_file_multinodes(self, testdir):
testdir.makeconftest("""
import pytest
class MyFile(pytest.File):
def collect(self):
return [MyItem("hello", parent=self)]
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyItem(pytest.Item):
pass
""")
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p, "--collect-only")
result.stdout.fnmatch_lines([
"*MyFile*test_issue88*",
"*Module*test_issue88*",
])
def test_issue93_initialnode_importing_capturing(self, testdir):
testdir.makeconftest("""
import sys
print ("should not be seen")
sys.stderr.write("stder42\\n")
""")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
assert "should not be seen" not in result.stdout.str()
assert "stderr42" not in result.stderr.str()
def test_conftest_printing_shows_if_error(self, testdir):
testdir.makeconftest("""
print ("should be seen")
assert 0
""")
result = testdir.runpytest()
assert result.ret != 0
assert "should be seen" in result.stdout.str()
@pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'),
reason="symlink not available on this platform")
def test_chdir(self, testdir):
testdir.tmpdir.join("py").mksymlinkto(py._pydir)
p = testdir.tmpdir.join("main.py")
p.write(_pytest._code.Source("""
import sys, os
sys.path.insert(0, '')
import py
print (py.__file__)
print (py.__path__)
os.chdir(os.path.dirname(os.getcwd()))
print (py.log)
"""))
result = testdir.runpython(p)
assert not result.ret
def test_issue109_sibling_conftests_not_loaded(self, testdir):
sub1 = testdir.tmpdir.mkdir("sub1")
sub2 = testdir.tmpdir.mkdir("sub2")
sub1.join("conftest.py").write("assert 0")
result = testdir.runpytest(sub2)
assert result.ret == EXIT_NOTESTSCOLLECTED
sub2.ensure("__init__.py")
p = sub2.ensure("test_hello.py")
result = testdir.runpytest(p)
assert result.ret == EXIT_NOTESTSCOLLECTED
result = testdir.runpytest(sub1)
assert result.ret == EXIT_USAGEERROR
def test_directory_skipped(self, testdir):
testdir.makeconftest("""
import pytest
def pytest_ignore_collect():
pytest.skip("intentional")
""")
testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines([
"*1 skipped*"
])
def test_multiple_items_per_collector_byid(self, testdir):
c = testdir.makeconftest("""
import pytest
class MyItem(pytest.Item):
def runtest(self):
pass
class MyCollector(pytest.File):
def collect(self):
return [MyItem(name="xyz", parent=self)]
def pytest_collect_file(path, parent):
if path.basename.startswith("conftest"):
return MyCollector(path, parent)
""")
result = testdir.runpytest(c.basename+"::"+"xyz")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*1 pass*",
])
def test_skip_on_generated_funcarg_id(self, testdir):
testdir.makeconftest("""
import pytest
def pytest_generate_tests(metafunc):
metafunc.addcall({'x': 3}, id='hello-123')
def pytest_runtest_setup(item):
print (item.keywords)
if 'hello-123' in item.keywords:
pytest.skip("hello")
assert 0
""")
p = testdir.makepyfile("""def test_func(x): pass""")
res = testdir.runpytest(p)
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 skipped*"])
def test_direct_addressing_selects(self, testdir):
p = testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.addcall({'i': 1}, id="1")
metafunc.addcall({'i': 2}, id="2")
def test_func(i):
pass
""")
res = testdir.runpytest(p.basename + "::" + "test_func[1]")
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_direct_addressing_notfound(self, testdir):
p = testdir.makepyfile("""
def test_func():
pass
""")
res = testdir.runpytest(p.basename + "::" + "test_notfound")
assert res.ret
res.stderr.fnmatch_lines(["*ERROR*not found*"])
def test_docstring_on_hookspec(self):
from _pytest import hookspec
for name, value in vars(hookspec).items():
if name.startswith("pytest_"):
assert value.__doc__, "no docstring for %s" % name
def test_initialization_error_issue49(self, testdir):
testdir.makeconftest("""
def pytest_configure():
x
""")
result = testdir.runpytest()
assert result.ret == 3 # internal error
result.stderr.fnmatch_lines([
"INTERNAL*pytest_configure*",
"INTERNAL*x*",
])
assert 'sessionstarttime' not in result.stderr.str()
@pytest.mark.parametrize('lookfor', ['test_fun.py', 'test_fun.py::test_a'])
def test_issue134_report_syntaxerror_when_collecting_member(self, testdir, lookfor):
testdir.makepyfile(test_fun="""
def test_a():
pass
def""")
result = testdir.runpytest(lookfor)
result.stdout.fnmatch_lines(['*SyntaxError*'])
if '::' in lookfor:
result.stderr.fnmatch_lines([
'*ERROR*',
])
assert result.ret == 4 # usage error only if item not found
def test_report_all_failed_collections_initargs(self, testdir):
testdir.makepyfile(test_a="def", test_b="def")
result = testdir.runpytest("test_a.py::a", "test_b.py::b")
result.stderr.fnmatch_lines([
"*ERROR*test_a.py::a*",
"*ERROR*test_b.py::b*",
])
def test_namespace_import_doesnt_confuse_import_hook(self, testdir):
# Ref #383. Python 3.3's namespace package messed with our import hooks
# Importing a module that didn't exist, even if the ImportError was
# gracefully handled, would make our test crash.
testdir.mkdir('not_a_package')
p = testdir.makepyfile("""
try:
from not_a_package import doesnt_exist
except ImportError:
# We handle the import error gracefully here
pass
def test_whatever():
pass
""")
res = testdir.runpytest(p.basename)
assert res.ret == 0
def test_unknown_option(self, testdir):
result = testdir.runpytest("--qwlkej")
result.stderr.fnmatch_lines("""
*unrecognized*
""")
def test_getsourcelines_error_issue553(self, testdir, monkeypatch):
monkeypatch.setattr("inspect.getsourcelines", None)
p = testdir.makepyfile("""
def raise_error(obj):
raise IOError('source code not available')
import inspect
inspect.getsourcelines = raise_error
def test_foo(invalid_fixture):
pass
""")
res = testdir.runpytest(p)
res.stdout.fnmatch_lines([
"*source code not available*",
"*fixture 'invalid_fixture' not found",
])
def test_plugins_given_as_strings(self, tmpdir, monkeypatch):
"""test that str values passed to main() as `plugins` arg
are interpreted as module names to be imported and registered.
#855.
"""
with pytest.raises(ImportError) as excinfo:
pytest.main([str(tmpdir)], plugins=['invalid.module'])
assert 'invalid' in str(excinfo.value)
p = tmpdir.join('test_test_plugins_given_as_strings.py')
p.write('def test_foo(): pass')
mod = py.std.types.ModuleType("myplugin")
monkeypatch.setitem(sys.modules, 'myplugin', mod)
assert pytest.main(args=[str(tmpdir)], plugins=['myplugin']) == 0
def test_parameterized_with_bytes_regex(self, testdir):
p = testdir.makepyfile("""
import re
import pytest
@pytest.mark.parametrize('r', [re.compile(b'foo')])
def test_stuff(r):
pass
"""
)
res = testdir.runpytest(p)
res.stdout.fnmatch_lines([
'*1 passed*'
])
class TestInvocationVariants:
def test_earlyinit(self, testdir):
p = testdir.makepyfile("""
import pytest
assert hasattr(pytest, 'mark')
""")
result = testdir.runpython(p)
assert result.ret == 0
@pytest.mark.xfail("sys.platform.startswith('java')")
def test_pydoc(self, testdir):
for name in ('py.test', 'pytest'):
result = testdir.runpython_c("import %s;help(%s)" % (name, name))
assert result.ret == 0
s = result.stdout.str()
assert 'MarkGenerator' in s
def test_import_star_py_dot_test(self, testdir):
p = testdir.makepyfile("""
from py.test import *
#collect
#cmdline
#Item
#assert collect.Item is Item
#assert collect.Collector is Collector
main
skip
xfail
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_import_star_pytest(self, testdir):
p = testdir.makepyfile("""
from pytest import *
#Item
#File
main
skip
xfail
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_double_pytestcmdline(self, testdir):
p = testdir.makepyfile(run="""
import pytest
pytest.main()
pytest.main()
""")
testdir.makepyfile("""
def test_hello():
pass
""")
result = testdir.runpython(p)
result.stdout.fnmatch_lines([
"*1 passed*",
"*1 passed*",
])
def test_python_minus_m_invocation_ok(self, testdir):
p1 = testdir.makepyfile("def test_hello(): pass")
res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
def test_python_minus_m_invocation_fail(self, testdir):
p1 = testdir.makepyfile("def test_fail(): 0/0")
res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1))
assert res.ret == 1
def test_python_pytest_package(self, testdir):
p1 = testdir.makepyfile("def test_pass(): pass")
res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_equivalence_pytest_pytest(self):
assert pytest.main == py.test.cmdline.main
def test_invoke_with_string(self, capsys):
retcode = pytest.main("-h")
assert not retcode
out, err = capsys.readouterr()
assert "--help" in out
pytest.raises(ValueError, lambda: pytest.main(0))
def test_invoke_with_path(self, tmpdir, capsys):
retcode = pytest.main(tmpdir)
assert retcode == EXIT_NOTESTSCOLLECTED
out, err = capsys.readouterr()
def test_invoke_plugin_api(self, testdir, capsys):
class MyPlugin:
def pytest_addoption(self, parser):
parser.addoption("--myopt")
pytest.main(["-h"], plugins=[MyPlugin()])
out, err = capsys.readouterr()
assert "--myopt" in out
def test_pyargs_importerror(self, testdir, monkeypatch):
monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write('raise ImportError')
result = testdir.runpytest("--pyargs", "tpkg.test_hello")
assert result.ret != 0
# FIXME: It would be more natural to match NOT
# "ERROR*file*or*package*not*found*".
result.stdout.fnmatch_lines([
"*collected 0 items*"
])
def test_cmdline_python_package(self, testdir, monkeypatch):
monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write("def test_hello(): pass")
path.join("test_world.py").write("def test_world(): pass")
result = testdir.runpytest("--pyargs", "tpkg")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*2 passed*"
])
result = testdir.runpytest("--pyargs", "tpkg.test_hello")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*1 passed*"
])
def join_pythonpath(what):
cur = py.std.os.environ.get('PYTHONPATH')
if cur:
return str(what) + ':' + cur
return what
empty_package = testdir.mkpydir("empty_package")
monkeypatch.setenv('PYTHONPATH', join_pythonpath(empty_package))
result = testdir.runpytest("--pyargs", ".")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*2 passed*"
])
monkeypatch.setenv('PYTHONPATH', join_pythonpath(testdir))
path.join('test_hello.py').remove()
result = testdir.runpytest("--pyargs", "tpkg.test_hello")
assert result.ret != 0
result.stderr.fnmatch_lines([
"*not*found*test_hello*",
])
def test_cmdline_python_package_not_exists(self, testdir):
result = testdir.runpytest("--pyargs", "tpkgwhatv")
assert result.ret
result.stderr.fnmatch_lines([
"ERROR*file*or*package*not*found*",
])
@pytest.mark.xfail(reason="decide: feature or bug")
def test_noclass_discovery_if_not_testcase(self, testdir):
testpath = testdir.makepyfile("""
import unittest
class TestHello(object):
def test_hello(self):
assert self.attr
class RealTest(unittest.TestCase, TestHello):
attr = 42
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=1)
def test_doctest_id(self, testdir):
testdir.makefile('.txt', """
>>> x=3
>>> x
4
""")
result = testdir.runpytest("-rf")
lines = result.stdout.str().splitlines()
for line in lines:
if line.startswith("FAIL "):
testid = line[5:].strip()
break
result = testdir.runpytest(testid, '-rf')
result.stdout.fnmatch_lines([
line,
"*1 failed*",
])
def test_core_backward_compatibility(self):
"""Test backward compatibility for get_plugin_manager function. See #787."""
import _pytest.config
assert type(_pytest.config.get_plugin_manager()) is _pytest.config.PytestPluginManager
def test_has_plugin(self, request):
"""Test hasplugin function of the plugin manager (#932)."""
assert request.config.pluginmanager.hasplugin('python')
class TestDurations:
source = """
import time
frag = 0.002
def test_something():
pass
def test_2():
time.sleep(frag*5)
def test_1():
time.sleep(frag)
def test_3():
time.sleep(frag*10)
"""
def test_calls(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random([
"*durations*",
"*call*test_3*",
"*call*test_2*",
"*call*test_1*",
])
def test_calls_show_2(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=2")
assert result.ret == 0
lines = result.stdout.get_lines_after("*slowest*durations*")
assert "4 passed" in lines[2]
def test_calls_showall(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=0")
assert result.ret == 0
for x in "123":
for y in 'call',: #'setup', 'call', 'teardown':
for line in result.stdout.lines:
if ("test_%s" % x) in line and y in line:
break
else:
raise AssertionError("not found %s %s" % (x,y))
def test_with_deselected(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=2", "-k test_1")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*durations*",
"*call*test_1*",
])
def test_with_failing_collection(self, testdir):
testdir.makepyfile(self.source)
testdir.makepyfile(test_collecterror="""xyz""")
result = testdir.runpytest("--durations=2", "-k test_1")
assert result.ret != 0
result.stdout.fnmatch_lines([
"*durations*",
"*call*test_1*",
])
def test_with_not(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("-k not 1")
assert result.ret == 0
class TestDurationWithFixture:
source = """
import time
frag = 0.001
def setup_function(func):
time.sleep(frag * 3)
def test_1():
time.sleep(frag*2)
def test_2():
time.sleep(frag)
"""
def test_setup_function(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random("""
*durations*
* setup *test_1*
* call *test_1*
""")
| mit |
vasyarv/edx-platform | lms/djangoapps/django_comment_client/management/commands/assign_role.py | 251 | 1144 | from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django_comment_common.models import Role
from django.contrib.auth.models import User
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--remove',
action='store_true',
dest='remove',
default=False,
help='Remove the role instead of adding it'),
)
args = '<user|email> <role> <course_id>'
help = 'Assign a discussion forum role to a user '
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError('Usage is assign_role {0}'.format(self.args))
name_or_email, role, course_id = args
role = Role.objects.get(name=role, course_id=course_id)
if '@' in name_or_email:
user = User.objects.get(email=name_or_email)
else:
user = User.objects.get(username=name_or_email)
if options['remove']:
user.roles.remove(role)
else:
user.roles.add(role)
print 'Success!'
| agpl-3.0 |
thepaul/uftrace | tests/t217_no_libcall_dump.py | 1 | 1525 | #!/usr/bin/env python
from runtest import TestBase
import subprocess as sp
TDIR='xxx'
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'signal', """
uftrace file header: magic = 4674726163652100
uftrace file header: version = 4
uftrace file header: header size = 40
uftrace file header: endian = 1 (little)
uftrace file header: class = 2 (64 bit)
uftrace file header: features = 0x363 (PLTHOOK | TASK_SESSION | SYM_REL_ADDR | MAX_STACK | PERF_EVENT | AUTO_ARGS)
uftrace file header: info = 0x3bff
reading 73755.dat
50895.869952000 73755: [entry] main(400787) depth: 0
50895.869952297 73755: [entry] foo(40071f) depth: 1
50895.869952533 73755: [exit ] foo(40071f) depth: 1
50895.869966333 73755: [entry] sighandler(400750) depth: 2
50895.869966473 73755: [entry] bar(400734) depth: 3
50895.869966617 73755: [exit ] bar(400734) depth: 3
50895.869967067 73755: [exit ] sighandler(400750) depth: 2
50895.869969790 73755: [entry] foo(40071f) depth: 1
50895.869969907 73755: [exit ] foo(40071f) depth: 1
50895.869970227 73755: [exit ] main(400787) depth: 0
""", sort='dump')
def pre(self):
record_cmd = '%s record -d %s %s' % (TestBase.uftrace_cmd, TDIR, 't-' + self.name)
sp.call(record_cmd.split())
return TestBase.TEST_SUCCESS
def runcmd(self):
return '%s dump --no-libcall -d %s' % (TestBase.uftrace_cmd, TDIR)
def post(self, ret):
sp.call(['rm', '-rf', TDIR])
return ret
| gpl-2.0 |
datakortet/django-cms | cms/plugins/teaser/models.py | 1 | 1148 | from django.core.cache import cache
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin, Page
class Teaser(CMSPlugin):
"""
A Teaser
"""
title = models.CharField(_("title"), max_length=255)
image = models.ImageField(_("image"), upload_to=CMSPlugin.get_media_path, blank=True, null=True)
page_link = models.ForeignKey(
Page,
verbose_name=_("page"),
help_text=_("If present image will be clickable"),
blank=True,
null=True,
limit_choices_to={'publisher_is_draft': True}
)
url = models.CharField(_("link"), max_length=255, blank=True, null=True, help_text=_("If present image will be clickable."))
description = models.TextField(_("description"), blank=True, null=True)
@property
def _cache_key(self):
return "%s_id_%d" % (self.__class__.__name__, self.id)
def save(self, *args, **kwargs):
super(Teaser, self).save(*args, **kwargs)
cache.delete(self._cache_key)
def __unicode__(self):
return self.title
search_fields = ('description',)
| bsd-3-clause |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.0/Lib/dos-8x3/test_win.py | 11 | 5449 | # Test the windows specific win32reg module.
# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
from _winreg import *
import os, sys
test_key_name = "SOFTWARE\\Python Registry Test Key - Delete Me"
test_data = [
("Int Value", 45, REG_DWORD),
("String Val", "A string value", REG_SZ,),
(u"Unicode Val", u"A Unicode value", REG_SZ,),
("StringExpand", "The path is %path%", REG_EXPAND_SZ),
("UnicodeExpand", u"The path is %path%", REG_EXPAND_SZ),
("Multi-string", ["Lots", "of", "string", "values"], REG_MULTI_SZ),
("Multi-unicode", [u"Lots", u"of", u"unicode", u"values"], REG_MULTI_SZ),
("Multi-mixed", [u"Unicode", u"and", "string", "values"],REG_MULTI_SZ),
("Raw Data", ("binary"+chr(0)+"data"), REG_BINARY),
]
def WriteTestData(root_key):
# Set the default value for this key.
SetValue(root_key, test_key_name, REG_SZ, "Default value")
key = CreateKey(root_key, test_key_name)
# Create a sub-key
sub_key = CreateKey(key, "sub_key")
# Give the sub-key some named values
for value_name, value_data, value_type in test_data:
SetValueEx(sub_key, value_name, 0, value_type, value_data)
# Check we wrote as many items as we thought.
nkeys, nvalues, since_mod = QueryInfoKey(key)
assert nkeys==1, "Not the correct number of sub keys"
assert nvalues==1, "Not the correct number of values"
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
assert nkeys==0, "Not the correct number of sub keys"
assert nvalues==len(test_data), "Not the correct number of values"
# Close this key this way...
# (but before we do, copy the key as an integer - this allows
# us to test that the key really gets closed).
int_sub_key = int(sub_key)
CloseKey(sub_key)
try:
QueryInfoKey(int_sub_key)
raise RuntimeError, "It appears the CloseKey() function does not close the actual key!"
except EnvironmentError:
pass
# ... and close that key that way :-)
int_key = int(key)
key.Close()
try:
QueryInfoKey(int_key)
raise RuntimeError, "It appears the key.Close() function does not close the actual key!"
except EnvironmentError:
pass
def ReadTestData(root_key):
# Check we can get default value for this key.
val = QueryValue(root_key, test_key_name)
assert val=="Default value", "Registry didn't give back the correct value"
key = OpenKey(root_key, test_key_name)
# Read the sub-keys
sub_key = OpenKey(key, "sub_key")
# Check I can enumerate over the values.
index = 0
while 1:
try:
data = EnumValue(sub_key, index)
except EnvironmentError:
break
assert data in test_data, "Didn't read back the correct test data"
index = index + 1
assert index==len(test_data), "Didn't read the correct number of items"
# Check I can directly access each item
for value_name, value_data, value_type in test_data:
read_val, read_typ = QueryValueEx(sub_key, value_name)
assert read_val==value_data and read_typ == value_type, \
"Could not directly read the value"
sub_key.Close()
# Enumerate our main key.
read_val = EnumKey(key, 0)
assert read_val == "sub_key", "Read subkey value wrong"
try:
EnumKey(key, 1)
assert 0, "Was able to get a second key when I only have one!"
except EnvironmentError:
pass
key.Close()
def DeleteTestData(root_key):
key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS)
sub_key = OpenKey(key, "sub_key", 0, KEY_ALL_ACCESS)
# It is not necessary to delete the values before deleting
# the key (although subkeys must not exist). We delete them
# manually just to prove we can :-)
for value_name, value_data, value_type in test_data:
DeleteValue(sub_key, value_name)
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
assert nkeys==0 and nvalues==0, "subkey not empty before delete"
sub_key.Close()
DeleteKey(key, "sub_key")
try:
# Shouldnt be able to delete it twice!
DeleteKey(key, "sub_key")
assert 0, "Deleting the key twice succeeded"
except EnvironmentError:
pass
key.Close()
DeleteKey(root_key, test_key_name)
# Opening should now fail!
try:
key = OpenKey(root_key, test_key_name)
assert 0, "Could open the non-existent key"
except WindowsError: # Use this error name this time
pass
def TestAll(root_key):
WriteTestData(root_key)
ReadTestData(root_key)
DeleteTestData(root_key)
# Test on my local machine.
TestAll(HKEY_CURRENT_USER)
print "Local registry tests worked"
try:
remote_name = sys.argv[sys.argv.index("--remote")+1]
except (IndexError, ValueError):
remote_name = None
if remote_name is not None:
try:
remote_key = ConnectRegistry(remote_name, HKEY_CURRENT_USER)
except EnvironmentError, exc:
print "Could not connect to the remote machine -", exc.strerror
remote_key = None
if remote_key is not None:
TestAll(remote_key)
print "Remote registry tests worked"
else:
print "Remote registry calls can be tested using",
print "'test_winreg.py --remote \\\\machine_name'"
| mit |
JshWright/home-assistant | tests/components/switch/test_command_line.py | 25 | 7031 | """The tests for the Command line switch platform."""
import json
import os
import tempfile
import unittest
from homeassistant.setup import setup_component
from homeassistant.const import STATE_ON, STATE_OFF
import homeassistant.components.switch as switch
import homeassistant.components.switch.command_line as command_line
from tests.common import get_test_home_assistant
# pylint: disable=invalid-name
class TestCommandSwitch(unittest.TestCase):
"""Test the command switch."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_state_none(self):
"""Test with none state."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'command_on': 'echo 1 > {}'.format(path),
'command_off': 'echo 0 > {}'.format(path),
}
self.assertTrue(setup_component(self.hass, switch.DOMAIN, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_value(self):
"""Test with state value."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'command_state': 'cat {}'.format(path),
'command_on': 'echo 1 > {}'.format(path),
'command_off': 'echo 0 > {}'.format(path),
'value_template': '{{ value=="1" }}'
}
self.assertTrue(setup_component(self.hass, switch.DOMAIN, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_json_value(self):
"""Test with state JSON value."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
oncmd = json.dumps({'status': 'ok'})
offcmd = json.dumps({'status': 'nope'})
test_switch = {
'command_state': 'cat {}'.format(path),
'command_on': 'echo \'{}\' > {}'.format(oncmd, path),
'command_off': 'echo \'{}\' > {}'.format(offcmd, path),
'value_template': '{{ value_json.status=="ok" }}'
}
self.assertTrue(setup_component(self.hass, switch.DOMAIN, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_code(self):
"""Test with state code."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'command_state': 'cat {}'.format(path),
'command_on': 'echo 1 > {}'.format(path),
'command_off': 'echo 0 > {}'.format(path),
}
self.assertTrue(setup_component(self.hass, switch.DOMAIN, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
def test_assumed_state_should_be_true_if_command_state_is_none(self):
"""Test with state value."""
# args: hass, device_name, friendly_name, command_on, command_off,
# command_state, value_template
init_args = [
self.hass,
"test_device_name",
"Test friendly name!",
"echo 'on command'",
"echo 'off command'",
None,
None,
]
no_state_device = command_line.CommandSwitch(*init_args)
self.assertTrue(no_state_device.assumed_state)
# Set state command
init_args[-2] = 'cat {}'
state_device = command_line.CommandSwitch(*init_args)
self.assertFalse(state_device.assumed_state)
def test_entity_id_set_correctly(self):
"""Test that entity_id is set correctly from object_id."""
init_args = [
self.hass,
"test_device_name",
"Test friendly name!",
"echo 'on command'",
"echo 'off command'",
False,
None,
]
test_switch = command_line.CommandSwitch(*init_args)
self.assertEqual(test_switch.entity_id, 'switch.test_device_name')
self.assertEqual(test_switch.name, 'Test friendly name!')
| apache-2.0 |
cloudfoundry/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/rexec.py | 228 | 20148 | """Restricted execution facilities.
The class RExec exports methods r_exec(), r_eval(), r_execfile(), and
r_import(), which correspond roughly to the built-in operations
exec, eval(), execfile() and import, but executing the code in an
environment that only exposes those built-in operations that are
deemed safe. To this end, a modest collection of 'fake' modules is
created which mimics the standard modules by the same names. It is a
policy decision which built-in modules and operations are made
available; this module provides a reasonable default, but derived
classes can change the policies e.g. by overriding or extending class
variables like ok_builtin_modules or methods like make_sys().
XXX To do:
- r_open should allow writing tmp dir
- r_exec etc. with explicit globals/locals? (Use rexec("exec ... in ...")?)
"""
from warnings import warnpy3k
warnpy3k("the rexec module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import sys
import __builtin__
import os
import ihooks
import imp
__all__ = ["RExec"]
class FileBase:
ok_file_methods = ('fileno', 'flush', 'isatty', 'read', 'readline',
'readlines', 'seek', 'tell', 'write', 'writelines', 'xreadlines',
'__iter__')
class FileWrapper(FileBase):
# XXX This is just like a Bastion -- should use that!
def __init__(self, f):
for m in self.ok_file_methods:
if not hasattr(self, m) and hasattr(f, m):
setattr(self, m, getattr(f, m))
def close(self):
self.flush()
TEMPLATE = """
def %s(self, *args):
return getattr(self.mod, self.name).%s(*args)
"""
class FileDelegate(FileBase):
def __init__(self, mod, name):
self.mod = mod
self.name = name
for m in FileBase.ok_file_methods + ('close',):
exec TEMPLATE % (m, m)
class RHooks(ihooks.Hooks):
def __init__(self, *args):
# Hacks to support both old and new interfaces:
# old interface was RHooks(rexec[, verbose])
# new interface is RHooks([verbose])
verbose = 0
rexec = None
if args and type(args[-1]) == type(0):
verbose = args[-1]
args = args[:-1]
if args and hasattr(args[0], '__class__'):
rexec = args[0]
args = args[1:]
if args:
raise TypeError, "too many arguments"
ihooks.Hooks.__init__(self, verbose)
self.rexec = rexec
def set_rexec(self, rexec):
# Called by RExec instance to complete initialization
self.rexec = rexec
def get_suffixes(self):
return self.rexec.get_suffixes()
def is_builtin(self, name):
return self.rexec.is_builtin(name)
def init_builtin(self, name):
m = __import__(name)
return self.rexec.copy_except(m, ())
def init_frozen(self, name): raise SystemError, "don't use this"
def load_source(self, *args): raise SystemError, "don't use this"
def load_compiled(self, *args): raise SystemError, "don't use this"
def load_package(self, *args): raise SystemError, "don't use this"
def load_dynamic(self, name, filename, file):
return self.rexec.load_dynamic(name, filename, file)
def add_module(self, name):
return self.rexec.add_module(name)
def modules_dict(self):
return self.rexec.modules
def default_path(self):
return self.rexec.modules['sys'].path
# XXX Backwards compatibility
RModuleLoader = ihooks.FancyModuleLoader
RModuleImporter = ihooks.ModuleImporter
class RExec(ihooks._Verbose):
"""Basic restricted execution framework.
Code executed in this restricted environment will only have access to
modules and functions that are deemed safe; you can subclass RExec to
add or remove capabilities as desired.
The RExec class can prevent code from performing unsafe operations like
reading or writing disk files, or using TCP/IP sockets. However, it does
not protect against code using extremely large amounts of memory or
processor time.
"""
ok_path = tuple(sys.path) # That's a policy decision
ok_builtin_modules = ('audioop', 'array', 'binascii',
'cmath', 'errno', 'imageop',
'marshal', 'math', 'md5', 'operator',
'parser', 'select',
'sha', '_sre', 'strop', 'struct', 'time',
'_weakref')
ok_posix_names = ('error', 'fstat', 'listdir', 'lstat', 'readlink',
'stat', 'times', 'uname', 'getpid', 'getppid',
'getcwd', 'getuid', 'getgid', 'geteuid', 'getegid')
ok_sys_names = ('byteorder', 'copyright', 'exit', 'getdefaultencoding',
'getrefcount', 'hexversion', 'maxint', 'maxunicode',
'platform', 'ps1', 'ps2', 'version', 'version_info')
nok_builtin_names = ('open', 'file', 'reload', '__import__')
ok_file_types = (imp.C_EXTENSION, imp.PY_SOURCE)
def __init__(self, hooks = None, verbose = 0):
"""Returns an instance of the RExec class.
The hooks parameter is an instance of the RHooks class or a subclass
of it. If it is omitted or None, the default RHooks class is
instantiated.
Whenever the RExec module searches for a module (even a built-in one)
or reads a module's code, it doesn't actually go out to the file
system itself. Rather, it calls methods of an RHooks instance that
was passed to or created by its constructor. (Actually, the RExec
object doesn't make these calls --- they are made by a module loader
object that's part of the RExec object. This allows another level of
flexibility, which can be useful when changing the mechanics of
import within the restricted environment.)
By providing an alternate RHooks object, we can control the file
system accesses made to import a module, without changing the
actual algorithm that controls the order in which those accesses are
made. For instance, we could substitute an RHooks object that
passes all filesystem requests to a file server elsewhere, via some
RPC mechanism such as ILU. Grail's applet loader uses this to support
importing applets from a URL for a directory.
If the verbose parameter is true, additional debugging output may be
sent to standard output.
"""
raise RuntimeError, "This code is not secure in Python 2.2 and later"
ihooks._Verbose.__init__(self, verbose)
# XXX There's a circular reference here:
self.hooks = hooks or RHooks(verbose)
self.hooks.set_rexec(self)
self.modules = {}
self.ok_dynamic_modules = self.ok_builtin_modules
list = []
for mname in self.ok_builtin_modules:
if mname in sys.builtin_module_names:
list.append(mname)
self.ok_builtin_modules = tuple(list)
self.set_trusted_path()
self.make_builtin()
self.make_initial_modules()
# make_sys must be last because it adds the already created
# modules to its builtin_module_names
self.make_sys()
self.loader = RModuleLoader(self.hooks, verbose)
self.importer = RModuleImporter(self.loader, verbose)
def set_trusted_path(self):
# Set the path from which dynamic modules may be loaded.
# Those dynamic modules must also occur in ok_builtin_modules
self.trusted_path = filter(os.path.isabs, sys.path)
def load_dynamic(self, name, filename, file):
if name not in self.ok_dynamic_modules:
raise ImportError, "untrusted dynamic module: %s" % name
if name in sys.modules:
src = sys.modules[name]
else:
src = imp.load_dynamic(name, filename, file)
dst = self.copy_except(src, [])
return dst
def make_initial_modules(self):
self.make_main()
self.make_osname()
# Helpers for RHooks
def get_suffixes(self):
return [item # (suff, mode, type)
for item in imp.get_suffixes()
if item[2] in self.ok_file_types]
def is_builtin(self, mname):
return mname in self.ok_builtin_modules
# The make_* methods create specific built-in modules
def make_builtin(self):
m = self.copy_except(__builtin__, self.nok_builtin_names)
m.__import__ = self.r_import
m.reload = self.r_reload
m.open = m.file = self.r_open
def make_main(self):
self.add_module('__main__')
def make_osname(self):
osname = os.name
src = __import__(osname)
dst = self.copy_only(src, self.ok_posix_names)
dst.environ = e = {}
for key, value in os.environ.items():
e[key] = value
def make_sys(self):
m = self.copy_only(sys, self.ok_sys_names)
m.modules = self.modules
m.argv = ['RESTRICTED']
m.path = map(None, self.ok_path)
m.exc_info = self.r_exc_info
m = self.modules['sys']
l = self.modules.keys() + list(self.ok_builtin_modules)
l.sort()
m.builtin_module_names = tuple(l)
# The copy_* methods copy existing modules with some changes
def copy_except(self, src, exceptions):
dst = self.copy_none(src)
for name in dir(src):
setattr(dst, name, getattr(src, name))
for name in exceptions:
try:
delattr(dst, name)
except AttributeError:
pass
return dst
def copy_only(self, src, names):
dst = self.copy_none(src)
for name in names:
try:
value = getattr(src, name)
except AttributeError:
continue
setattr(dst, name, value)
return dst
def copy_none(self, src):
m = self.add_module(src.__name__)
m.__doc__ = src.__doc__
return m
# Add a module -- return an existing module or create one
def add_module(self, mname):
m = self.modules.get(mname)
if m is None:
self.modules[mname] = m = self.hooks.new_module(mname)
m.__builtins__ = self.modules['__builtin__']
return m
# The r* methods are public interfaces
def r_exec(self, code):
"""Execute code within a restricted environment.
The code parameter must either be a string containing one or more
lines of Python code, or a compiled code object, which will be
executed in the restricted environment's __main__ module.
"""
m = self.add_module('__main__')
exec code in m.__dict__
def r_eval(self, code):
"""Evaluate code within a restricted environment.
The code parameter must either be a string containing a Python
expression, or a compiled code object, which will be evaluated in
the restricted environment's __main__ module. The value of the
expression or code object will be returned.
"""
m = self.add_module('__main__')
return eval(code, m.__dict__)
def r_execfile(self, file):
"""Execute the Python code in the file in the restricted
environment's __main__ module.
"""
m = self.add_module('__main__')
execfile(file, m.__dict__)
def r_import(self, mname, globals={}, locals={}, fromlist=[]):
"""Import a module, raising an ImportError exception if the module
is considered unsafe.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
return self.importer.import_module(mname, globals, locals, fromlist)
def r_reload(self, m):
"""Reload the module object, re-parsing and re-initializing it.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
return self.importer.reload(m)
def r_unload(self, m):
"""Unload the module.
Removes it from the restricted environment's sys.modules dictionary.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
return self.importer.unload(m)
# The s_* methods are similar but also swap std{in,out,err}
def make_delegate_files(self):
s = self.modules['sys']
self.delegate_stdin = FileDelegate(s, 'stdin')
self.delegate_stdout = FileDelegate(s, 'stdout')
self.delegate_stderr = FileDelegate(s, 'stderr')
self.restricted_stdin = FileWrapper(sys.stdin)
self.restricted_stdout = FileWrapper(sys.stdout)
self.restricted_stderr = FileWrapper(sys.stderr)
def set_files(self):
if not hasattr(self, 'save_stdin'):
self.save_files()
if not hasattr(self, 'delegate_stdin'):
self.make_delegate_files()
s = self.modules['sys']
s.stdin = self.restricted_stdin
s.stdout = self.restricted_stdout
s.stderr = self.restricted_stderr
sys.stdin = self.delegate_stdin
sys.stdout = self.delegate_stdout
sys.stderr = self.delegate_stderr
def reset_files(self):
self.restore_files()
s = self.modules['sys']
self.restricted_stdin = s.stdin
self.restricted_stdout = s.stdout
self.restricted_stderr = s.stderr
def save_files(self):
self.save_stdin = sys.stdin
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
def restore_files(self):
sys.stdin = self.save_stdin
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
def s_apply(self, func, args=(), kw={}):
self.save_files()
try:
self.set_files()
r = func(*args, **kw)
finally:
self.restore_files()
return r
def s_exec(self, *args):
"""Execute code within a restricted environment.
Similar to the r_exec() method, but the code will be granted access
to restricted versions of the standard I/O streams sys.stdin,
sys.stderr, and sys.stdout.
The code parameter must either be a string containing one or more
lines of Python code, or a compiled code object, which will be
executed in the restricted environment's __main__ module.
"""
return self.s_apply(self.r_exec, args)
def s_eval(self, *args):
"""Evaluate code within a restricted environment.
Similar to the r_eval() method, but the code will be granted access
to restricted versions of the standard I/O streams sys.stdin,
sys.stderr, and sys.stdout.
The code parameter must either be a string containing a Python
expression, or a compiled code object, which will be evaluated in
the restricted environment's __main__ module. The value of the
expression or code object will be returned.
"""
return self.s_apply(self.r_eval, args)
def s_execfile(self, *args):
"""Execute the Python code in the file in the restricted
environment's __main__ module.
Similar to the r_execfile() method, but the code will be granted
access to restricted versions of the standard I/O streams sys.stdin,
sys.stderr, and sys.stdout.
"""
return self.s_apply(self.r_execfile, args)
def s_import(self, *args):
"""Import a module, raising an ImportError exception if the module
is considered unsafe.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
Similar to the r_import() method, but has access to restricted
versions of the standard I/O streams sys.stdin, sys.stderr, and
sys.stdout.
"""
return self.s_apply(self.r_import, args)
def s_reload(self, *args):
"""Reload the module object, re-parsing and re-initializing it.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
Similar to the r_reload() method, but has access to restricted
versions of the standard I/O streams sys.stdin, sys.stderr, and
sys.stdout.
"""
return self.s_apply(self.r_reload, args)
def s_unload(self, *args):
"""Unload the module.
Removes it from the restricted environment's sys.modules dictionary.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
Similar to the r_unload() method, but has access to restricted
versions of the standard I/O streams sys.stdin, sys.stderr, and
sys.stdout.
"""
return self.s_apply(self.r_unload, args)
# Restricted open(...)
def r_open(self, file, mode='r', buf=-1):
"""Method called when open() is called in the restricted environment.
The arguments are identical to those of the open() function, and a
file object (or a class instance compatible with file objects)
should be returned. RExec's default behaviour is allow opening
any file for reading, but forbidding any attempt to write a file.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
mode = str(mode)
if mode not in ('r', 'rb'):
raise IOError, "can't open files for writing in restricted mode"
return open(file, mode, buf)
# Restricted version of sys.exc_info()
def r_exc_info(self):
ty, va, tr = sys.exc_info()
tr = None
return ty, va, tr
def test():
import getopt, traceback
opts, args = getopt.getopt(sys.argv[1:], 'vt:')
verbose = 0
trusted = []
for o, a in opts:
if o == '-v':
verbose = verbose+1
if o == '-t':
trusted.append(a)
r = RExec(verbose=verbose)
if trusted:
r.ok_builtin_modules = r.ok_builtin_modules + tuple(trusted)
if args:
r.modules['sys'].argv = args
r.modules['sys'].path.insert(0, os.path.dirname(args[0]))
else:
r.modules['sys'].path.insert(0, "")
fp = sys.stdin
if args and args[0] != '-':
try:
fp = open(args[0])
except IOError, msg:
print "%s: can't open file %r" % (sys.argv[0], args[0])
return 1
if fp.isatty():
try:
import readline
except ImportError:
pass
import code
class RestrictedConsole(code.InteractiveConsole):
def runcode(self, co):
self.locals['__builtins__'] = r.modules['__builtin__']
r.s_apply(code.InteractiveConsole.runcode, (self, co))
try:
RestrictedConsole(r.modules['__main__'].__dict__).interact()
except SystemExit, n:
return n
else:
text = fp.read()
fp.close()
c = compile(text, fp.name, 'exec')
try:
r.s_exec(c)
except SystemExit, n:
return n
except:
traceback.print_exc()
return 1
if __name__ == '__main__':
sys.exit(test())
| mit |
c0hen/django-venv | lib/python3.4/site-packages/django/contrib/admin/helpers.py | 27 | 15048 | from __future__ import unicode_literals
import json
import warnings
from django import forms
from django.conf import settings
from django.contrib.admin.utils import (
display_for_field, flatten_fieldsets, help_text_for_field, label_for_field,
lookup_field,
)
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields.related import ManyToManyRel
from django.forms.utils import flatatt
from django.template.defaultfilters import capfirst, linebreaksbr
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, smart_text
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext, ugettext_lazy as _
ACTION_CHECKBOX_NAME = '_selected_action'
class ActionForm(forms.Form):
action = forms.ChoiceField(label=_('Action:'))
select_across = forms.BooleanField(
label='',
required=False,
initial=0,
widget=forms.HiddenInput({'class': 'select-across'}),
)
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
class AdminForm(object):
def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None):
self.form, self.fieldsets = form, fieldsets
self.prepopulated_fields = [{
'field': form[field_name],
'dependencies': [form[f] for f in dependencies]
} for field_name, dependencies in prepopulated_fields.items()]
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(
self.form, name,
readonly_fields=self.readonly_fields,
model_admin=self.model_admin,
**options
)
def _media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class Fieldset(object):
def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),
description=None, model_admin=None):
self.form = form
self.name, self.fields = name, fields
self.classes = ' '.join(classes)
self.description = description
self.model_admin = model_admin
self.readonly_fields = readonly_fields
def _media(self):
if 'collapse' in self.classes:
extra = '' if settings.DEBUG else '.min'
js = [
'vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'collapse%s.js' % extra,
]
return forms.Media(js=['admin/js/%s' % url for url in js])
return forms.Media()
media = property(_media)
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class Fieldline(object):
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__") or isinstance(field, six.text_type):
self.fields = [field]
else:
self.fields = field
self.has_visible_field = not all(
field in self.form.fields and self.form.fields[field].widget.is_hidden
for field in self.fields
)
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield AdminReadonlyField(self.form, field, is_first=(i == 0), model_admin=self.model_admin)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe(
'\n'.join(
self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields
).strip('\n')
)
class AdminField(object):
def __init__(self, form, field, is_first):
self.field = form[field] # A django.forms.BoundField instance
self.is_first = is_first # Whether this field is first on the line
self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
self.is_readonly = False
def label_tag(self):
classes = []
contents = conditional_escape(force_text(self.field.label))
if self.is_checkbox:
classes.append('vCheckboxLabel')
if self.field.field.required:
classes.append('required')
if not self.is_first:
classes.append('inline')
attrs = {'class': ' '.join(classes)} if classes else {}
# checkboxes should not have a label suffix as the checkbox appears
# to the left of the label.
return self.field.label_tag(
contents=mark_safe(contents), attrs=attrs,
label_suffix='' if self.is_checkbox else None,
)
def errors(self):
return mark_safe(self.field.errors.as_ul())
class AdminReadonlyField(object):
def __init__(self, form, field, is_first, model_admin=None):
# Make self.field look a little bit like a field. This means that
# {{ field.name }} must be a useful class name to identify the field.
# For convenience, store other field-related data here too.
if callable(field):
class_name = field.__name__ if field.__name__ != '<lambda>' else ''
else:
class_name = field
if form._meta.labels and class_name in form._meta.labels:
label = form._meta.labels[class_name]
else:
label = label_for_field(field, form._meta.model, model_admin)
if form._meta.help_texts and class_name in form._meta.help_texts:
help_text = form._meta.help_texts[class_name]
else:
help_text = help_text_for_field(class_name, form._meta.model)
self.field = {
'name': class_name,
'label': label,
'help_text': help_text,
'field': field,
}
self.form = form
self.model_admin = model_admin
self.is_first = is_first
self.is_checkbox = False
self.is_readonly = True
self.empty_value_display = model_admin.get_empty_value_display()
def label_tag(self):
attrs = {}
if not self.is_first:
attrs["class"] = "inline"
label = self.field['label']
return format_html('<label{}>{}:</label>',
flatatt(attrs),
capfirst(force_text(label)))
def contents(self):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin
try:
f, attr, value = lookup_field(field, obj, model_admin)
except (AttributeError, ValueError, ObjectDoesNotExist):
result_repr = self.empty_value_display
else:
if f is None:
boolean = getattr(attr, "boolean", False)
if boolean:
result_repr = _boolean_icon(value)
else:
if hasattr(value, "__html__"):
result_repr = value
else:
result_repr = smart_text(value)
if getattr(attr, "allow_tags", False):
warnings.warn(
"Deprecated allow_tags attribute used on %s. "
"Use django.utils.html.format_html(), format_html_join(), "
"or django.utils.safestring.mark_safe() instead." % attr,
RemovedInDjango20Warning
)
result_repr = mark_safe(value)
else:
result_repr = linebreaksbr(result_repr)
else:
if isinstance(f.remote_field, ManyToManyRel) and value is not None:
result_repr = ", ".join(map(six.text_type, value.all()))
else:
result_repr = display_for_field(value, f, self.empty_value_display)
result_repr = linebreaksbr(result_repr)
return conditional_escape(result_repr)
class InlineAdminFormSet(object):
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(self, inline, formset, fieldsets, prepopulated_fields=None,
readonly_fields=None, model_admin=None):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
if prepopulated_fields is None:
prepopulated_fields = {}
self.prepopulated_fields = prepopulated_fields
self.classes = ' '.join(inline.classes) if inline.classes else ''
def __iter__(self):
for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):
view_on_site_url = self.opts.get_view_on_site_url(original)
yield InlineAdminForm(
self.formset, form, self.fieldsets, self.prepopulated_fields,
original, self.readonly_fields, model_admin=self.opts,
view_on_site_url=view_on_site_url,
)
for form in self.formset.extra_forms:
yield InlineAdminForm(
self.formset, form, self.fieldsets, self.prepopulated_fields,
None, self.readonly_fields, model_admin=self.opts,
)
yield InlineAdminForm(
self.formset, self.formset.empty_form,
self.fieldsets, self.prepopulated_fields, None,
self.readonly_fields, model_admin=self.opts,
)
def fields(self):
fk = getattr(self.formset, "fk", None)
for i, field_name in enumerate(flatten_fieldsets(self.fieldsets)):
if fk and fk.name == field_name:
continue
if field_name in self.readonly_fields:
yield {
'label': label_for_field(field_name, self.opts.model, self.opts),
'widget': {'is_hidden': False},
'required': False,
'help_text': help_text_for_field(field_name, self.opts.model),
}
else:
form_field = self.formset.empty_form.fields[field_name]
label = form_field.label
if label is None:
label = label_for_field(field_name, self.opts.model, self.opts)
yield {
'label': label,
'widget': form_field.widget,
'required': form_field.required,
'help_text': form_field.help_text,
}
def inline_formset_data(self):
verbose_name = self.opts.verbose_name
return json.dumps({
'name': '#%s' % self.formset.prefix,
'options': {
'prefix': self.formset.prefix,
'addText': ugettext('Add another %(verbose_name)s') % {
'verbose_name': capfirst(verbose_name),
},
'deleteText': ugettext('Remove'),
}
})
def _media(self):
media = self.opts.media + self.formset.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class InlineAdminForm(AdminForm):
"""
A wrapper around an inline form for use in the admin system.
"""
def __init__(self, formset, form, fieldsets, prepopulated_fields, original,
readonly_fields=None, model_admin=None, view_on_site_url=None):
self.formset = formset
self.model_admin = model_admin
self.original = original
self.show_url = original and view_on_site_url is not None
self.absolute_url = view_on_site_url
super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields, readonly_fields, model_admin)
def __iter__(self):
for name, options in self.fieldsets:
yield InlineFieldset(
self.formset, self.form, name, self.readonly_fields,
model_admin=self.model_admin, **options
)
def needs_explicit_pk_field(self):
# Auto fields are editable (oddly), so need to check for auto or non-editable pk
if self.form._meta.model._meta.has_auto_field or not self.form._meta.model._meta.pk.editable:
return True
# Also search any parents for an auto field. (The pk info is propagated to child
# models so that does not need to be checked in parents.)
for parent in self.form._meta.model._meta.get_parent_list():
if parent._meta.has_auto_field:
return True
return False
def pk_field(self):
return AdminField(self.form, self.formset._pk_field.name, False)
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
def deletion_field(self):
from django.forms.formsets import DELETION_FIELD_NAME
return AdminField(self.form, DELETION_FIELD_NAME, False)
def ordering_field(self):
from django.forms.formsets import ORDERING_FIELD_NAME
return AdminField(self.form, ORDERING_FIELD_NAME, False)
class InlineFieldset(Fieldset):
def __init__(self, formset, *args, **kwargs):
self.formset = formset
super(InlineFieldset, self).__init__(*args, **kwargs)
def __iter__(self):
fk = getattr(self.formset, "fk", None)
for field in self.fields:
if fk and fk.name == field:
continue
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class AdminErrorList(forms.utils.ErrorList):
"""
Stores all errors for the form/formsets in an add/change stage view.
"""
def __init__(self, form, inline_formsets):
super(AdminErrorList, self).__init__()
if form.is_bound:
self.extend(form.errors.values())
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(errors_in_inline_form.values())
| gpl-3.0 |
hoangt/gem5v | src/mem/ruby/network/garnet/fixed-pipeline/GarnetLink_d.py | 18 | 3743 | # Copyright (c) 2008 Princeton University
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
from BasicLink import BasicIntLink, BasicExtLink
class NetworkLink_d(SimObject):
type = 'NetworkLink_d'
link_id = Param.Int(Parent.link_id, "link id")
link_latency = Param.Int(Parent.latency, "link latency")
vcs_per_vnet = Param.Int(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.Int(Parent.number_of_virtual_networks,
"number of virtual networks")
channel_width = Param.Int(Parent.bandwidth_factor,
"channel width == bw factor")
class CreditLink_d(NetworkLink_d):
type = 'CreditLink_d'
# Interior fixed pipeline links between routers
class GarnetIntLink_d(BasicIntLink):
type = 'GarnetIntLink_d'
# The detailed fixed pipeline bi-directional link include two main
# forward links and two backward flow-control links, one per direction
nls = []
# In uni-directional link
nls.append(NetworkLink_d());
# Out uni-directional link
nls.append(NetworkLink_d());
network_links = VectorParam.NetworkLink_d(nls, "forward links")
cls = []
# In uni-directional link
cls.append(CreditLink_d());
# Out uni-directional link
cls.append(CreditLink_d());
credit_links = VectorParam.CreditLink_d(cls, "backward flow-control links")
# Exterior fixed pipeline links between a router and a controller
class GarnetExtLink_d(BasicExtLink):
type = 'GarnetExtLink_d'
# The detailed fixed pipeline bi-directional link include two main
# forward links and two backward flow-control links, one per direction
nls = []
# In uni-directional link
nls.append(NetworkLink_d());
# Out uni-directional link
nls.append(NetworkLink_d());
network_links = VectorParam.NetworkLink_d(nls, "forward links")
cls = []
# In uni-directional link
cls.append(CreditLink_d());
# Out uni-directional link
cls.append(CreditLink_d());
credit_links = VectorParam.CreditLink_d(cls, "backward flow-control links")
| bsd-3-clause |
cryptickp/troposphere | examples/CloudFront_S3.py | 22 | 1622 | # Converted from CloudFront_S3.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
from troposphere import GetAtt, Join, Output
from troposphere import Parameter, Ref, Template
from troposphere.cloudfront import Distribution, DistributionConfig
from troposphere.cloudfront import Origin, DefaultCacheBehavior
from troposphere.cloudfront import ForwardedValues
t = Template()
t.add_description(
"AWS CloudFormation Sample Template CloudFront_S3: Sample template "
"showing how to create an Amazon CloudFront distribution using an "
"S3 origin. "
"**WARNING** This template creates a CloudFront distribution. "
"You will be billed for the AWS resources used if you create "
"a stack from this template.")
s3dnsname = t.add_parameter(Parameter(
"S3DNSNAme",
Description="The DNS name of an existing S3 bucket to use as the "
"Cloudfront distribution origin",
Type="String",
))
myDistribution = t.add_resource(Distribution(
"myDistribution",
DistributionConfig=DistributionConfig(
Origins=[Origin(Id="Origin 1", DomainName=Ref(s3dnsname))],
DefaultCacheBehavior=DefaultCacheBehavior(
TargetOriginId="Origin 1",
ForwardedValues=ForwardedValues(
QueryString=False
),
ViewerProtocolPolicy="allow-all"),
Enabled=True
)
))
t.add_output([
Output("DistributionId", Value=Ref(myDistribution)),
Output(
"DistributionName",
Value=Join("", ["http://", GetAtt(myDistribution, "DomainName")])),
])
print(t.to_json())
| bsd-2-clause |
nelmiux/CarnotKE | jyhton/Lib/test/clamp.py | 12 | 2254 | import java
import os
import os.path
from java.lang.reflect import Modifier
from org.python.util import CodegenUtils
from org.python.compiler import CustomMaker, ProxyCodeHelpers
__all__ = ["PackageProxy", "SerializableProxies"]
class SerializableProxies(CustomMaker):
# NOTE: SerializableProxies is itself a java proxy, but it's not a custom one!
serialized_path = None
def doConstants(self):
self.classfile.addField("serialVersionUID",
CodegenUtils.ci(java.lang.Long.TYPE), Modifier.PUBLIC | Modifier.STATIC | Modifier.FINAL)
code = self.classfile.addMethod("<clinit>", ProxyCodeHelpers.makeSig("V"), Modifier.STATIC)
code.visitLdcInsn(java.lang.Long(1))
code.putstatic(self.classfile.name, "serialVersionUID", CodegenUtils.ci(java.lang.Long.TYPE))
code.return_()
def saveBytes(self, bytes):
if self.serialized_path:
path = os.path.join(self.serialized_path, os.path.join(*self.myClass.split(".")) + ".class")
parent = os.path.dirname(path)
try:
os.makedirs(parent)
except OSError:
pass # Directory exists
with open(path, "wb") as f:
f.write(bytes.toByteArray())
def makeClass(self):
try:
# If already defined on CLASSPATH, simply return this class
cls = java.lang.Class.forName(self.myClass)
print "Class defined on CLASSPATH", cls
except:
# Otherwise build it
cls = CustomMaker.makeClass(self)
return cls
class PackageProxy(object):
def __init__(self, package):
self.package = package
def __call__(self, superclass, interfaces, className, pythonModuleName, fullProxyName, mapping):
"""Constructs a usable proxy name that does not depend on ordering"""
if "." in pythonModuleName:
# get around that will be called differently from regrtest, as test.module instead of module
pythonModuleName = pythonModuleName.split(".")[-1]
return SerializableProxies(superclass, interfaces, className, pythonModuleName, self.package + "." + pythonModuleName + "." + className, mapping)
| apache-2.0 |
minifirocks/nifi-minifi-cpp | thirdparty/rocksdb/buckifier/targets_cfg.py | 6 | 3002 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
rocksdb_target_header = """
import os
TARGETS_PATH = os.path.dirname(__file__)
REPO_PATH = "rocksdb/src/"
BUCK_BINS = "buck-out/gen/" + REPO_PATH
TEST_RUNNER = REPO_PATH + "buckifier/rocks_test_runner.sh"
rocksdb_compiler_flags = [
"-fno-builtin-memcmp",
"-DROCKSDB_PLATFORM_POSIX",
"-DROCKSDB_LIB_IO_POSIX",
"-DROCKSDB_FALLOCATE_PRESENT",
"-DROCKSDB_MALLOC_USABLE_SIZE",
"-DROCKSDB_RANGESYNC_PRESENT",
"-DROCKSDB_SCHED_GETCPU_PRESENT",
"-DROCKSDB_SUPPORT_THREAD_LOCAL",
"-DOS_LINUX",
# Flags to enable libs we include
"-DSNAPPY",
"-DZLIB",
"-DBZIP2",
"-DLZ4",
"-DZSTD",
"-DGFLAGS=gflags",
"-DNUMA",
"-DTBB",
# Needed to compile in fbcode
"-Wno-expansion-to-defined",
]
rocksdb_external_deps = [
('bzip2', None, 'bz2'),
('snappy', None, "snappy"),
('zlib', None, 'z'),
('gflags', None, 'gflags'),
('lz4', None, 'lz4'),
('zstd', None),
('tbb', None),
("numa", None, "numa"),
("googletest", None, "gtest"),
]
rocksdb_preprocessor_flags = [
# Directories with files for #include
"-I" + REPO_PATH + "include/",
"-I" + REPO_PATH,
]
rocksdb_arch_preprocessor_flags = {
"x86_64": ["-DHAVE_SSE42"],
}
"""
library_template = """
cpp_library(
name = "%s",
headers = %s,
srcs = [%s],
deps = [%s],
preprocessor_flags = rocksdb_preprocessor_flags,
arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
compiler_flags = rocksdb_compiler_flags,
external_deps = rocksdb_external_deps,
)
"""
binary_template = """
cpp_binary(
name = "%s",
srcs = [%s],
deps = [%s],
preprocessor_flags = rocksdb_preprocessor_flags,
arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
compiler_flags = rocksdb_compiler_flags,
external_deps = rocksdb_external_deps,
)
"""
unittests_template = """
# [test_name, test_src, test_type]
ROCKS_TESTS = %s
# Generate a test rule for each entry in ROCKS_TESTS
for test_cfg in ROCKS_TESTS:
test_name = test_cfg[0]
test_cc = test_cfg[1]
ttype = "gtest" if test_cfg[2] == "parallel" else "simple"
test_bin = test_name + "_bin"
cpp_binary (
name = test_bin,
srcs = [test_cc],
deps = [":rocksdb_test_lib"],
preprocessor_flags = rocksdb_preprocessor_flags,
arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
compiler_flags = rocksdb_compiler_flags,
external_deps = rocksdb_external_deps,
)
custom_unittest(
name = test_name,
type = ttype,
deps = [":" + test_bin],
command = [TEST_RUNNER, BUCK_BINS + test_bin]
)
custom_unittest(
name = "make_rocksdbjavastatic",
type = "simple",
command = ["internal_repo_rocksdb/make_rocksdbjavastatic.sh"],
)
custom_unittest(
name = "make_rocksdb_lite_release",
type = "simple",
command = ["internal_repo_rocksdb/make_rocksdb_lite_release.sh"],
)
"""
| apache-2.0 |
YufeiZhang/Principles-of-Programming-Python-3 | Lectures/Lecture_6/k_means_clustering.py | 1 | 10239 | # Written by Eric Martin for COMP9021
import tkinter as tk
import tkinter.messagebox
class KMeansClustering(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.title('k-means clustering')
menubar = tk.Menu()
help_menu = tk.Menu(menubar)
menubar.add_cascade(label = 'k-means Clustering Help', menu = help_menu)
help_menu.add_command(label = 'Principle', command = self.principle_help)
help_menu.add_command(label = 'Clearing', command = self.clearing_help)
help_menu.add_command(label = 'Creating points and initial centroids',
command = self.creating_points_and_initial_centroids_help)
self.config(menu = menubar)
self.space = Space()
buttons = tk.Frame(bd = 20)
self.configure_space_or_cluster_button = tk.Button(buttons, text = 'Cluster', width = 5,
command = self.configure_space_or_cluster)
self.configure_space_or_cluster_button.pack(padx = 30, side = tk.LEFT)
self.clear_or_iterate_button = tk.Button(buttons, text = 'Clear', width = 5,
command = self.clear_or_iterate)
self.clear_or_iterate_button.pack(padx = 30)
buttons.pack()
self.space.pack()
self.clustering = False
def principle_help(self):
tkinter.messagebox.showinfo('Principle',
'k, a positive integer which here can only be at most equal to 6, represents '
'the number of clusters to be created.\n\n'
'After the user has created a number of (round) points, the button displaying "Cluster" '
'can be clicked, and then the user can create k (square) points, or "centroids", '
'displayed in different colors.\n'
'Clicking the button displaying "Iterate" gives each point the colour of the closest '
'centroid, making that point a member of the cluster associated with that colour.\n\n'
'The centre of gravity of each cluster then becomes the new centroid. '
'The same computation can be done again by clicking the button displaying "Iterate", '
'until the clusters do not change any more, in which case the button labels change and '
'the user is in a position to run another experiment.\n\n'
'The user can also click the button displaying "Stop" to get back to that position, and '
'change her mind by clicking again on the button displaying "Cluster".')
def clearing_help(self):
tkinter.messagebox.showinfo('Clearing',
'In case centroids are displayed, clicking the "Clear" button deletes the centroids, and '
'if the points are coloured because they have been clustered, then they lose their '
'colour.\n\n'
'In case no centroid is displayed, possibly because the "Clear" button has just been '
'clicked, then clicking the "Clear" button deletes all points.')
def creating_points_and_initial_centroids_help(self):
tkinter.messagebox.showinfo('Creating points and initial centroids',
'Points and initial centroids are created simply by clicking in the grey area.\n'
'Clicking on an existing point or initial centroid deletes it.\n'
'No point or centroid is created when it is too close to an existing point or centroid, '
'respectively.\n\n'
'There can be at most 6 centroids. Trying to create more will have no effect.')
def configure_space_or_cluster(self):
if self.clustering:
self.configure_space_or_cluster_button.config(text = 'Cluster')
self.clear_or_iterate_button.config(text = 'Clear')
self.clustering = False
self.space.clustering = False
self.space.nb_of_clusters = 0
else:
self.configure_space_or_cluster_button.config(text = 'Stop')
self.clear_or_iterate_button.config(text = 'Iterate')
self.clustering = True
self.space.clustering = True
def clear_or_iterate(self):
if self.clustering:
if not self.space.iterate():
self.configure_space_or_cluster()
else:
self.space.clear()
class Space(tk.Frame):
space_dim = 600
space_colour = '#F5F5F5'
point_colour = '#808080'
def __init__(self):
tk.Frame.__init__(self, padx = 20, pady = 20)
self.space = tk.Canvas(self, width = self.space_dim, height = self.space_dim, bg = self.space_colour)
self.space.bind('<1>', self.act_on_click)
self.space.pack()
self.points = {}
self.centroids = {}
self.colours = 'red', 'green', 'blue', 'cyan', 'black', 'magenta'
self.available_colours = list(self.colours)
self.clustering = False
def clear(self):
if self.centroids:
for centroid_coordinates in self.centroids:
self.space.itemconfig(self.centroids[centroid_coordinates].drawn_point, fill = '',
outline = '')
self.centroids.clear()
for point_coordinates in self.points:
self.points[point_coordinates].colour = self.point_colour
self.space.itemconfig(self.points[point_coordinates].drawn_point, fill = self.point_colour,
outline = self.point_colour)
self.available_colours = list(self.colours)
else:
for point_coordinates in self.points:
self.space.itemconfig(self.points[point_coordinates].drawn_point, fill = '',
outline = '')
self.points.clear()
def act_on_click(self, event):
x = self.space.canvasx(event.x)
y = self.space.canvasx(event.y)
if x < 10 or x > self.space_dim - 5 or y < 10 or y > self.space_dim - 5:
return
coordinates = x, y
if self.clustering:
if (self.request_point_otherwise_delete_or_ignore(coordinates, self.centroids, 8) and
self.available_colours):
colour = self.available_colours.pop()
self.centroids[coordinates] = Point(self.draw_centroid(x, y, colour), colour)
else:
if self.request_point_otherwise_delete_or_ignore(coordinates, self.points, 25):
self.points[coordinates] = Point(self.space.create_oval(x - 2, y - 2, x + 2, y + 2,
fill = self.point_colour,
outline = self.point_colour),
self.point_colour)
def request_point_otherwise_delete_or_ignore(self, coordinates, points, size):
for point_coordinates in points:
if self.square_of_distance(coordinates, point_coordinates) < size:
self.space.itemconfig(points[point_coordinates].drawn_point, fill = '', outline = '')
colour = points[point_coordinates].colour
if colour != self.point_colour:
self.available_colours.append(colour)
del points[point_coordinates]
return False
if any(self.square_of_distance(coordinates, point_coordinates) < 4 * size
for point_coordinates in points):
return False
return True
def square_of_distance(self, coordinates_1, coordinates_2):
return (coordinates_1[0] - coordinates_2[0]) ** 2 + (coordinates_1[1] - coordinates_2[1]) ** 2
def iterate(self):
clusters = {centroid_coordinates: [] for centroid_coordinates in self.centroids}
if not clusters:
return
different_clustering = False
for point_coordinates in self.points:
min_square_of_distance = float('inf')
for centroid_coordinates in self.centroids:
square_of_distance = self.square_of_distance(point_coordinates, centroid_coordinates)
if square_of_distance < min_square_of_distance:
min_square_of_distance = square_of_distance
closest_centroid_coordinates = centroid_coordinates
colour = self.centroids[closest_centroid_coordinates].colour
if self.points[point_coordinates].colour != colour:
self.points[point_coordinates].colour = colour
self.space.itemconfig(self.points[point_coordinates].drawn_point, fill = colour,
outline = colour)
different_clustering = True
clusters[closest_centroid_coordinates].append(point_coordinates)
for centroid_coordinates in clusters:
nb_of_points = len(clusters[centroid_coordinates])
if nb_of_points:
x, y = tuple(map(sum, zip(*clusters[centroid_coordinates])))
clusters[centroid_coordinates] = x / nb_of_points, y / nb_of_points
for centroid_coordinates in self.centroids:
self.space.itemconfig(self.centroids[centroid_coordinates].drawn_point, fill = '',
outline = '')
updated_centroids = {}
for centroid_coordinates in clusters:
if clusters[centroid_coordinates]:
colour = self.centroids[centroid_coordinates].colour
x, y = clusters[centroid_coordinates]
updated_centroids[(x, y)] = Point(self.draw_centroid(x, y, colour), colour)
self.centroids = updated_centroids
return different_clustering
def draw_centroid(self, x, y, colour):
return self.space.create_rectangle(x - 1, y - 1, x + 1, y + 1, fill = colour, outline = colour)
class Point:
def __init__(self, drawn_point, colour):
self.drawn_point = drawn_point
self.colour = colour
if __name__ == '__main__':
KMeansClustering().mainloop()
| gpl-3.0 |
aerval/blast_comparison | main.py | 1 | 16353 | #!/bin/env python
##############################################
# CompareBLASTs #
# A tool to compare the found hits from two #
# BLAST searches with the same search query. #
# #
# by Philipp B. Rentzsch #
# BCCDC Vancouver, BC #
# October 2014 - January 2015 #
# License: MIT #
##############################################
from __future__ import print_function
from time import strptime # convert string into time object
import optparse # commandline parsing
from blast_hit import * # BlastHit.py file
import string # for valid letters in filename
def load_blasthits(file):
'''
Read a tabular BLAST file into a list of BlastHits.
file = (string) filename of tabular blast result file
'''
blastfile = open(file).readlines()
hits = []
# We can not extract every line from the tabular file into a single hit
# since some correspont to multiple such hits
for hit in blastfile:
h = hit.split('\t')
if h[1] == h[12]:
hits.append(BlastHit(hit))
else:
# When multiple gene ids contribute to the same alignment, they
# can be summarized to one hit. In the following we split these
# up because we want to check all hit seperately.
subhits = h[12].split(';')
for sub in subhits:
h[1] = sub
hits.append(BlastHit('\t'.join(h)))
return hits
class CompareBLASTs(object):
def __init__(self, old_hits, new_hits, email, name):
'''
Initialize the comparison object.
old_hits = List of BlastHits from the older BLAST Search
new_hits = List of BlastHits from the newer, second BLAST Search
email = Your email address, needed for use of NCBIs Enterez to prevent
misuse of their service
name = Query name that lead to the BlastHits to identify them later
'''
self.input_old_hits = old_hits
self.input_new_hits = new_hits
self.email = email
self.name = name
def compare(self):
'''
Compares the two lists of BlastHits for more or less similar elements
and extracts those elements form both lists that have no companion in
each other.
'''
# Compare for exact (or similar) hits.
self.new_hits, self.old_hits = compare_blasts(self.input_new_hits,
self.input_old_hits)
# Retrieve basic information of coresponding genes for all old hits.
self.oldGeneIDs = get_idlist(self.old_hits['all'], self.email)
# Check all the old hits without a copy in the new hit list what
# happend to their associated gene (whether it still exists, was
# updated (=replaced) or deleted (=suppressed).
oldOnly = {'live': [], 'replaced': [], 'suppressed': []}
# A bit confusing: live and lost are handled here equivalent since a
# hit that is live (=still existing in the db) but not found in the
# new BLAST search was 'lost' at some point.
for hit in self.old_hits['unknown']:
for ID in hit.ids:
if ID.db == 'gi':
oldOnly[self.oldGeneIDs[ID.num]['Status']].append(hit)
hit.status = self.oldGeneIDs[ID.num]['Status']
break
self.new_hits['replacement'] = [] # Equivalent to old_hits 'replaced'
self.old_hits['lost'] = oldOnly['live']
self.old_hits['suppressed'] = oldOnly['suppressed']
self.old_hits['replacement'] = []
# Check the old hits with a known replacement tag, whether a replacing
# hit can be found within the new hits.
for num, hit in enumerate(oldOnly['replaced']):
for ID in hit.ids:
if ID.db == 'gi':
new_id = self.oldGeneIDs[ID.num]['ReplacedBy']
found = False
for num2, hit2 in enumerate(self.new_hits['unknown']):
if new_id in [ID.num for ID in hit2.ids]:
same, differences = hit.compare_hit(hit2, check_ids=False)
if same:
rep = self.new_hits['unknown'].pop(num2)
rep.status = 'replacement'
self.new_hits['replacement'].append(rep)
self.old_hits['replacement'].append(
oldOnly['replaced'][num])
found = True
break
if not found:
# Hit can be replaced but the replacement was nevertheless not
# found in the new Blast Search => lost/live.
self.old_hits['lost'].append(oldOnly['replaced'][num])
oldOnly['replaced'][num].status = 'live'
# Get the basic info for those hit in the new search, that have no
# know relative in the old search.
self.newGeneIDs = get_idlist(self.new_hits['unknown'], self.email)
# Estimate the time of the old BLAST (or last used database update)
# search by looking for the creation of the youngest entree that match
# to the old hits.
date_oldsearch = max([strptime(record['CreateDate'], '%Y/%m/%d')
for record in self.oldGeneIDs.values()])
# Check wether all new hits with no relative in the old Search are
# indeed new (there for created after the last of the old Hits). I
# never had this case but one can never know ...
self.new_hits['new'] = []
self.new_hits['old'] = []
for hit in self.new_hits['unknown']:
if strptime(self.newGeneIDs[hit.ids[0].num]['CreateDate'],
'%Y/%m/%d') < date_oldsearch:
self.new_hits['old'].append(hit)
hit.status = 'strange'
else:
self.new_hits['new'].append(hit)
hit.status = 'new'
def output_comparison(self, output_types=[lambda x: print(x)], top=0,
long_output=False, adaptive=True):
'''
Prints (and or writes to a file) the output of the BLAST comparison.
output_types = List of output lambdas like 'lambda x: print(x)' and
'lambda x: output_file.write(''.join([x, '\n']))'
top = The number of Hits (from the top score) that are of interest for
the comparion (0 = all)
long_output = A longer, more describitive output
adaptive = In adaptive mode only those categories are displayed that
appear like if there are no new hits in the second BLAST, this is not
dispalyed
'''
# Determine the number of hits (in the interested interval) that
# belong to each category.
hits_per_category = {'equal': 0, 'similar': 0, 'live': 0,
'replaced': 0, 'suppressed': 0, 'new': 0,
'strange': 0}
if top == 0: # Count all hits
top_old = len(self.old_hits['all'])
top_new = len(self.new_hits['all'])
else: # Count only the specified fraction of hits
top_old = min(top, len(self.old_hits['all']))
top_new = min(top, len(self.new_hits['all']))
for hit in self.old_hits['all'][:top_old]:
hits_per_category[hit.status] += 1
for hit in self.new_hits['all'][:top_new]:
if hit.status in ['new', 'strange']:
hits_per_category[hit.status] += 1
if long_output:
category_names = {
'equal': 'Found in both BLASTs results:\t%i',
'similar': 'Found in both BLASTs results with slight \
changes:\t%i',
'live': 'Not showing up for unknown reasons in the second \
BLAST (probably low scores):\t%i',
'replaced': 'Replaced/updated before the second BLAST:\t%i',
'suppressed': 'Deleted/suppressed before the second BLAST:\t\
%i',
'new': 'New hits added to the database for the second BLAST:\t\
%i',
'strange': 'Hits that do only appear in the second BLAST \
that should have appeared in the first:\t%i'}
else:
category_names = {
'equal': 'Equal Hits:\t%i',
'similar': 'Changed Hits\t%i',
'live': 'Lost Hits\t%i',
'replaced': 'Replaced Hits:\t%i',
'suppressed': 'Deleted Hits:\t%i',
'new': 'New Hits:\t%i',
'strange': 'New appearing Hits:\t%i'}
# For the different output channels (write to file or print).
for output in output_types:
# Always print the query name as more than one query can be found
# in a single BLAST.
if self.name:
output('Query:\t%s' % self.name)
if long_output:
output('Total hits in old search:\t%i' %
len(self.old_hits['all']))
output('Total hits in new search:\t%i' %
len(self.new_hits['all']))
if top_old != len(self.old_hits['all']) and \
top_new != len(self.new_hits['all']):
output('Among the top %i hits were:' % top)
else:
output('From all hits were:')
for key in ['equal', 'similar', 'live', 'replaced', 'suppressed',
'new', 'strange']:
if not adaptive or hits_per_category[key] > 0:
# In (default) adaptive mode, only those hit categories
# are displayed that appear (example: if there is no
# replaced hit, the replaced hits column is not displayed.
output(category_names[key] % hits_per_category[key])
# separate from following queries
output('\n')
def export_hit_categories(self, categories, path=''):
'''
Exports the given categories into files (format similar to the input
.blast format with a status column added at the end).
categories = String with comma ',' delimited categories (e.g: new,
all_old to export all new Hits and all the hits from the old search)
path = file path to the exported files
'''
categories = categories.split(',')
# Generate valid filenames:
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
name = ''.join(c for c in self.name if c in valid_chars)
for category in categories:
hits = None
if category == 'new':
hits = self.new_hits['new']
if category == 'equal':
hits = self.old_hits['same']
if category == 'similar':
hits = self.old_hits['similar']
if category == 'live':
hits = self.old_hits['lost']
if category == 'replaced':
hits = self.old_hits['replacement']
if category == 'suppressed':
hits = self.old_hits['suppressed']
if category == 'all_old':
hits = self.old_hits['all']
if category == 'all_new':
hits = self.new_hits['all']
if category == 'strange':
hits = self.new_hits['old']
if hits:
with open(path + name + '_' + category + '.blast', 'w+') as f:
# The query name and category speciefies the file name
# (e.g. Query7_all_new.blast).
for hit in hits:
f.write(str(hit) + '\n')
else:
print("Unknown export category %s" % category)
def perform_comparison(opts):
'''
The main function that compares two BLAST files against the same Query
Sequence
opts = parsed OptionsParser
'''
new_hits = {}
old_hits = {}
# Load the hits from the two input files.
new_hits_all = load_blasthits(opts.new_Blast)
old_hits_all = load_blasthits(opts.old_Blast)
# Sort all hits for their repective query (as one BLAST file can contain
# multiple queries.
for hit in new_hits_all:
if hit.name in new_hits.keys():
new_hits[hit.name].append(hit)
else:
new_hits[hit.name] = [hit]
for hit in old_hits_all:
if hit.name in old_hits.keys():
old_hits[hit.name].append(hit)
else:
old_hits[hit.name] = [hit]
# Make sure that both files where against the same queries.
assert old_hits.keys() == new_hits.keys()
# Define how to output the (general) results (print to console and/or save
# to file).
output_types = []
if opts.verbose:
output_types.append(lambda x: print(x))
if opts.save_output:
output_file = open(opts.output_path + opts.save_output, 'w+')
output_types.append(lambda x: output_file.write(''.join([x, '\n'])))
# Somewhat complicated expression because file.write does not
# automatically add a line end character.
for key in old_hits.keys():
blastComparison = CompareBLASTs(old_hits[key], new_hits[key],
opts.email, key)
blastComparison.compare()
blastComparison.output_comparison(output_types, opts.top,
opts.long_output, opts.adaptive)
# Export specified hit categories to file.
if opts.export:
blastComparison.export_hit_categories(opts.export,
path=opts.output_path)
if opts.save_output:
output_file.close()
if __name__ == '__main__':
# General description of the program
usage = '''
%prog [options]
Neccessary to provide are the two tabular BLAST files old (-o) and new
(-n)
'''
op = optparse.OptionParser(usage=usage)
op.add_option('-o', '--old', default=None, dest='old_Blast',
help='the older tabular BLAST file (24 columns)')
op.add_option('-n', '--new', default=None, dest='new_Blast',
help='the newer BLAST file')
op.add_option('-t', '--top', type='int', default=0,
help='specify when only the top X (integer value) hits for \
each query are of interest')
op.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=True, help='print everything')
op.add_option('-q', '--quiet', action='store_false', dest='verbose',
help='stay quiet')
op.add_option('-s', '--save', default=None, dest='save_output',
help='file where the output is saved')
op.add_option('-p', '--put', default='', dest='output_path',
help='the path where the saved output and/or exported hit \
files are stored')
op.add_option('-l', '--longOutput', action='store_true',
dest='long_output', default=False,
help='enable long names in the output')
op.add_option('-a', '--adaptive', action='store_true',
dest='adaptive', default=True,
help='only display those hit classes, that have elements')
op.add_option('-A', '--notAdaptive', action='store_false',
dest='adaptive', help='display all elements')
op.add_option('-e', '--email', default='[email protected]',
help='email address of the user to send him/her notice of \
excess use')
op.add_option('-x', '--export', default=None,
help='export specified hit categories (Example: \
"-x new,old_all,suppressed", Categories: "equal, similar, \
live, replaced, suppressed, new, strange, all_old and \
all_new)"')
opts, args = op.parse_args()
assert opts.old_Blast and opts.new_Blast
# Executes the analysing program
perform_comparison(opts)
| mit |
makinacorpus/pygal | pygal/test/test_interpolate.py | 4 | 3200 | # -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2014 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
from pygal.test import make_data
def test_cubic(Chart, datas):
chart = Chart(interpolate='cubic')
chart = make_data(chart, datas)
assert chart.render()
def test_cubic_prec(Chart, datas):
chart = Chart(interpolate='cubic', interpolation_precision=200)
chart = make_data(chart, datas)
chart_low = Chart(interpolate='cubic', interpolation_precision=5)
chart_low = make_data(chart, datas)
assert len(chart.render()) >= len(chart_low.render())
def test_quadratic(Chart, datas):
chart = Chart(interpolate='quadratic')
chart = make_data(chart, datas)
assert chart.render()
def test_lagrange(Chart, datas):
chart = Chart(interpolate='lagrange')
chart = make_data(chart, datas)
assert chart.render()
def test_trigonometric(Chart, datas):
chart = Chart(interpolate='trigonometric')
chart = make_data(chart, datas)
assert chart.render()
def test_hermite(Chart, datas):
chart = Chart(interpolate='hermite')
chart = make_data(chart, datas)
assert chart.render()
def test_hermite_finite(Chart, datas):
chart = Chart(interpolate='hermite',
interpolation_parameters={'type': 'finite_difference'})
chart = make_data(chart, datas)
assert chart.render()
def test_hermite_cardinal(Chart, datas):
chart = Chart(interpolate='hermite',
interpolation_parameters={'type': 'cardinal', 'c': .75})
chart = make_data(chart, datas)
assert chart.render()
def test_hermite_catmull_rom(Chart, datas):
chart = Chart(interpolate='hermite',
interpolation_parameters={'type': 'catmull_rom'})
chart = make_data(chart, datas)
assert chart.render()
def test_hermite_kochanek_bartels(Chart, datas):
chart = Chart(interpolate='hermite',
interpolation_parameters={
'type': 'kochanek_bartels', 'b': -1, 'c': 1, 't': 1})
chart = make_data(chart, datas)
assert chart.render()
chart = Chart(interpolate='hermite',
interpolation_parameters={
'type': 'kochanek_bartels', 'b': -1, 'c': -8, 't': 0})
chart = make_data(chart, datas)
assert chart.render()
chart = Chart(interpolate='hermite',
interpolation_parameters={
'type': 'kochanek_bartels', 'b': 0, 'c': 10, 't': -1})
chart = make_data(chart, datas)
assert chart.render()
| lgpl-3.0 |
jit/pyew | pymsasid/decode.py | 16 | 31254 | # -----------------------------------------------------------------------------
# decode.py
#
# author: [email protected]
# Mainly rewrited from udis86 -- Vivek Mohan <[email protected]>
# -----------------------------------------------------------------------------
from common import DecodeException, VENDOR_INTEL, VENDOR_AMD
from inst import Inst, Operand, Ptr, ie_invalid, ie_pause, ie_nop
# this is intended: hundreds of constants used
from itab import *
from operand import *
# Extracts instruction prefixes.
def get_prefixes(u, inst):
have_pfx = 1
# if in error state, bail out
if u.error:
return -1
# keep going as long as there are prefixes available
i = 0
while have_pfx:
# Get next byte.
u.input.next()
if u.error:
return -1
curr = u.input.current()
# rex prefixes in 64bit mode
if u.dis_mode == 64 and (curr & 0xF0) == 0x40:
inst.pfx.rex = curr
else:
if curr == 0x2E:
inst.pfx.seg = 'cs'
inst.pfx.rex = 0
elif curr == 0x36:
inst.pfx.seg = 'ss'
inst.pfx.rex = 0
elif curr == 0x3E:
inst.pfx.seg = 'ds'
inst.pfx.rex = 0
elif curr == 0x26:
inst.pfx.seg = 'es'
inst.pfx.rex = 0
elif curr == 0x64:
inst.pfx.seg = 'fs'
inst.pfx.rex = 0
elif curr == 0x65:
inst.pfx.seg = 'gs'
inst.pfx.rex = 0
elif curr == 0x67: #adress-size override prefix
inst.pfx.adr = 0x67
inst.pfx.rex = 0
elif curr == 0xF0:
inst.pfx.lock = 0xF0
inst.pfx.rex = 0
elif curr == 0x66:
# the 0x66 sse prefix is only effective if no other sse prefix
# has already been specified.
if inst.pfx.insn == 0:
inst.pfx.insn = 0x66
inst.pfx.opr = 0x66
inst.pfx.rex = 0
elif curr == 0xF2:
inst.pfx.insn = 0xF2
inst.pfx.repne = 0xF2
inst.pfx.rex = 0
elif curr == 0xF3:
inst.pfx.insn = 0xF3
inst.pfx.rep = 0xF3
inst.pfx.repe = 0xF3
inst.pfx.rex = 0
else:
# No more prefixes
have_pfx = 0
# check if we reached max instruction length
if(i + 1) == MAX_INSN_LENGTH:
u.error = 1
i += 1
# return status
if u.error:
return -1
# rewind back one byte in stream, since the above loop
# stops with a non-prefix byte.
u.input.back()
# speculatively determine the effective operand mode,
# based on the prefixes and the current disassembly
# mode. This may be inaccurate, but useful for mode
# dependent decoding.
if u.dis_mode == 64:
if REX_W(inst.pfx.rex):
inst.opr_mode = 64
elif inst.pfx.opr:
inst.opr_mode = 16
elif(P_DEF64(inst.itab_entry.prefix)):
inst.opr_mode = 64
else:
inst.opr_mode = 32
if inst.pfx.adr:
inst.adr_mode = 32
else:
inst.adr_mode = 64
elif u.dis_mode == 32:
if inst.pfx.opr:
inst.opr_mode = 16
else:
inst.opr_mode = 32
if inst.pfx.adr:
inst.adr_mode = 16
else:
inst.adr_mode = 32
elif u.dis_mode == 16:
if inst.pfx.opr:
inst.opr_mode = 32
else:
inst.opr_mode = 16
if inst.pfx.adr:
inst.adr_mode = 32
else:
inst.adr_mode = 16
return 0
# Searches the instruction tables for the right entry.
def search_itab(u, inst):
# if in state of error, return
did_peek = 0
if u.error:
return -1
# get first byte of opcode
u.input.next()
if u.error:
return -1
curr = u.input.current()
if curr == None :
inst.itab_entry = ie_invalid
inst.operator = inst.itab_entry.operator
return 0
# resolve xchg, nop, pause crazyness
if 0x90 == curr:
if not(u.dis_mode == 64 and REX_B(inst.pfx.rex)):
if(inst.pfx.rep):
inst.pfx.rep = 0
e = ie_pause
else:
e = ie_nop
inst.itab_entry = e
inst.operator = inst.itab_entry.operator
return 0
# get top-level table
elif 0x0F == curr:
table = ITAB__0F
curr = u.input.next()
if u.error:
return -1
# 2byte opcodes can be modified by 0x66, F3, and F2 prefixes
if 0x66 == inst.pfx.insn:
if itab_list[ITAB__PFX_SSE66__0F][curr].operator != 'invalid':
table = ITAB__PFX_SSE66__0F
inst.pfx.opr = 0
elif 0xF2 == inst.pfx.insn:
if itab_list[ITAB__PFX_SSEF2__0F][curr].operator != 'invalid':
table = ITAB__PFX_SSEF2__0F
inst.pfx.repne = 0
elif 0xF3 == inst.pfx.insn:
if itab_list[ITAB__PFX_SSEF3__0F][curr].operator != 'invalid':
table = ITAB__PFX_SSEF3__0F
inst.pfx.repe = 0
inst.pfx.rep = 0
# pick an instruction from the 1byte table
else:
table = ITAB__1BYTE
index = curr
while True:
e = itab_list[ table ][ index ]
# if operator constant is a standard instruction constant
# our search is over.
if e.operator in operator:
if e.operator == 'invalid':
if did_peek:
u.input.next()
if u.input.error:
raise DecodeException('error')
#return -1
inst.itab_entry = e
inst.operator = inst.itab_entry.operator
return 0
table = e.prefix
if e.operator == 'grp_reg':
peek = u.input.peek()
did_peek = 1
index = MODRM_REG(peek)
elif e.operator == 'grp_mod':
peek = u.input.peek()
did_peek = 1
index = MODRM_MOD(peek)
if index == 3:
index = ITAB__MOD_INDX__11
else:
index = ITAB__MOD_INDX__NOT_11
elif e.operator == 'grp_rm':
curr = u.input.next()
did_peek = 0
if u.error:
return -1
index = MODRM_RM(curr)
elif e.operator == 'grp_x87':
curr = u.input.next()
did_peek = 0
if u.error:
return -1
index = curr - 0xC0
elif e.operator == 'grp_osize':
if inst.opr_mode == 64:
index = ITAB__MODE_INDX__64
elif inst.opr_mode == 32:
index = ITAB__MODE_INDX__32
else:
index = ITAB__MODE_INDX__16
elif e.operator == 'grp_asize':
if inst.adr_mode == 64:
index = ITAB__MODE_INDX__64
elif inst.adr_mode == 32:
index = ITAB__MODE_INDX__32
else:
index = ITAB__MODE_INDX__16
elif e.operator == 'grp_mode':
if u.dis_mode == 64:
index = ITAB__MODE_INDX__64
elif u.dis_mode == 32:
index = ITAB__MODE_INDX__32
else:
index = ITAB__MODE_INDX__16
elif e.operator == 'grp_vendor':
if u.vendor == VENDOR_INTEL:
index = ITAB__VENDOR_INDX__INTEL
elif u.vendor == VENDOR_AMD:
index = ITAB__VENDOR_INDX__AMD
else:
raise DecodeException('unrecognized vendor id')
elif e.operator == 'd3vil':
raise DecodeException('invalid instruction operator constant Id3vil')
else:
raise DecodeException('invalid instruction operator constant')
inst.itab_entry = e
inst.operator = inst.itab_entry.operator
return 0
def resolve_operand_size(u, inst, s):
if s == SZ_V:
return inst.opr_mode
elif s == SZ_Z:
if inst.opr_mode == 16:
return 16
else:
return 32
elif s == SZ_P:
if inst.opr_mode == 16:
return SZ_WP
else:
return SZ_DP
elif s == SZ_MDQ:
if inst.opr_mode == 16:
return 32
else:
return inst.opr_mode
elif s == SZ_RDQ:
if u.dis_mode == 64:
return 64
else:
return 32
else:
return s
def resolve_operator(u, inst):
# far/near flags
inst.branch_dist = None
# readjust operand sizes for call/jmp instrcutions
if inst.operator == 'call' or inst.operator == 'jmp':
# WP: 16bit pointer
if inst.operand[0].size == SZ_WP:
inst.operand[0].size = 16
inst.branch_dist = 'far'
# DP: 32bit pointer
elif inst.operand[0].size == SZ_DP:
inst.operand[0].size = 32
inst.branch_dist = 'far'
elif inst.operand[0].size == 8:
inst.branch_dist = 'near'
# resolve 3dnow weirdness
elif inst.operator == '3dnow':
inst.operator = itab_list[ITAB__3DNOW][u.input.current()].operator
# SWAPGS is only valid in 64bits mode
if inst.operator == 'swapgs' and u.dis_mode != 64:
u.error = 1
return -1
return 0
def decode_a(u, inst, op):
"""Decodes operands of the type seg:offset."""
if inst.opr_mode == 16:
# seg16:off16
op.type = 'OP_PTR'
op.size = 32
op.lval = Ptr(u.input.read(16), u.input.read(16))
else:
# seg16:off32
op.type = 'OP_PTR'
op.size = 48
op.lval = Ptr(u.input.read(32), u.input.read(16))
def decode_gpr(u, inst, s, rm):
"""Returns decoded General Purpose Register."""
s = resolve_operand_size(u, inst, s)
if s == 64:
return GPR[64][rm]
elif s == SZ_DP or s == 32:
return GPR[32][rm]
elif s == SZ_WP or s == 16:
return GPR[16][rm]
elif s == 8:
if u.dis_mode == 64 and inst.pfx.rex:
if rm >= 4:
return GPR[8][rm+4]
return GPR[8][rm]
else:
return GPR[8][rm]
else:
return None
def resolve_gpr64(u, inst, gpr_op):
"""64bit General Purpose Register-Selection."""
if gpr_op in range(OP_rAXr8, OP_rDIr15) :
index = (gpr_op - OP_rAXr8) |(REX_B(inst.pfx.rex) << 3)
else:
index = gpr_op - OP_rAX
if inst.opr_mode == 16:
return GPR[16][index]
elif u.dis_mode == 32 or not(inst.opr_mode == 32 and REX_W(inst.pfx.rex) == 0):
return GPR[32][index]
return GPR[64][index]
def resolve_gpr32(u, inst, gpr_op):
"""32bit General Purpose Register-Selection."""
index = gpr_op - OP_eAX
if(inst.opr_mode == 16):
return GPR[16][index]
return GPR[32][index]
def resolve_reg(regtype, i):
"""Resolves the register type."""
return GPR[regtype][i]
def decode_imm(u, inst, s, op):
"""Decodes Immediate values."""
op.size = resolve_operand_size(u, inst, s)
op.type = 'OP_IMM'
op.lval = u.input.read(op.size)
def decode_modrm(u, inst, op, s, rm_type, opreg, reg_size, reg_type):
"""Decodes ModRM Byte."""
u.input.next()
# get mod, r/m and reg fields
mod = MODRM_MOD(u.input.current())
rm = (REX_B(inst.pfx.rex) << 3) | MODRM_RM(u.input.current())
reg = (REX_R(inst.pfx.rex) << 3) | MODRM_REG(u.input.current())
op.size = resolve_operand_size(u, inst, s)
# if mod is 11b, then the m specifies a gpr/mmx/sse/control/debug
if mod == 3:
op.type = 'OP_REG'
if rm_type == 'T_GPR':
op.base = decode_gpr(u, inst, op.size, rm)
else:
op.base = resolve_reg(rm_type, (REX_B(inst.pfx.rex) << 3) |(rm&7))
# else its memory addressing
else:
op.type = 'OP_MEM'
op.seg = inst.pfx.seg
# 64bit addressing
if inst.adr_mode == 64:
op.base = GPR[64][rm]
# get offset type
if mod == 1:
op.offset = 8
elif mod == 2:
op.offset = 32
elif mod == 0 and(rm & 7) == 5:
op.base = 'rip'
op.offset = 32
else:
op.offset = 0
# Scale-Index-Base(SIB)
if rm & 7 == 4:
u.input.next()
op.scale = (1 << SIB_S(u.input.current())) & ~1
op.index = GPR[64][(SIB_I(u.input.current()) |(REX_X(inst.pfx.rex) << 3))]
op.base = GPR[64][(SIB_B(u.input.current()) |(REX_B(inst.pfx.rex) << 3))]
# special conditions for base reference
if op.index == 'rsp':
op.index = None
op.scale = 0
if op.base == 'rbp' or op.base == 'r13':
if mod == 0:
op.base = None
if mod == 1:
op.offset = 8
else:
op.offset = 32
# 32-Bit addressing mode
elif inst.adr_mode == 32:
# get base
op.base = GPR[16][rm]
# get offset type
if mod == 1:
op.offset = 8
elif mod == 2:
op.offset = 32
elif mod == 0 and rm == 5:
op.base = None
op.offset = 32
else:
op.offset = 0
# Scale-Index-Base(SIB)
if(rm & 7) == 4:
u.input.next()
op.scale = (1 << SIB_S(u.input.current())) & ~1
op.index = GPR[32][SIB_I(u.input.current()) |(REX_X(inst.pfx.rex) << 3)]
op.base = GPR[32][SIB_B(u.input.current()) |(REX_B(inst.pfx.rex) << 3)]
if op.index == 'esp':
op.index = None
op.scale = 0
# special condition for base reference
if op.base == 'ebp':
if mod == 0:
op.base = None
if mod == 1:
op.offset = 8
else:
op.offset = 32
# 16bit addressing mode
else:
if rm == 0:
op.base = 'bx'
op.index = 'si'
elif rm == 1:
op.base = 'bx'
op.index = 'di'
elif rm == 2:
op.base = 'bp'
op.index = 'si'
elif rm == 3:
op.base = 'bp'
op.index = 'di'
elif rm == 4:
op.base = 'si'
elif rm == 5:
op.base = 'di'
elif rm == 6:
op.base = 'bp'
elif rm == 7:
op.base = 'bx'
if mod == 0 and rm == 6:
op.offset = 16
op.base = None
elif mod == 1:
op.offset = 8
elif mod == 2:
op.offset = 16
# extract offset, if any
if op.offset in [8, 16, 32, 64]:
op.lval = u.input.read(op.offset)
bound = pow(2, op.offset - 1)
if op.lval > bound:
op.lval = -(((2 * bound) - op.lval) % bound)
# resolve register encoded in reg field
if opreg:
opreg.type = 'OP_REG'
opreg.size = resolve_operand_size(u, inst, reg_size)
if reg_type == 'T_GPR':
opreg.base = decode_gpr(u, inst, opreg.size, reg)
else:
opreg.base = resolve_reg(reg_type, reg)
def decode_o(u, inst, s, op):
"""Decodes offset."""
op.seg = inst.pfx.seg
op.offset = inst.adr_mode
op.lval = u.input.read(inst.adr_mode)
op.type = 'OP_MEM'
op.size = resolve_operand_size(u, inst, s)
def disasm_operands(u, inst):
"""Disassembles Operands."""
# get type
def get_mopt(x): return x.type
mopt = map(get_mopt, inst.itab_entry.operand)
# get size
def get_mops(x): return x.size
mops = map(get_mops, inst.itab_entry.operand)
if mopt[2] != OP_NONE:
inst.operand = [Operand(), Operand(), Operand()]
elif mopt[1] != OP_NONE:
inst.operand = [Operand(), Operand()]
elif mopt[0] != OP_NONE:
inst.operand = [Operand()]
# iop = instruction operand
#iop = inst.operand
if mopt[0] == OP_A:
decode_a(u, inst, inst.operand[0])
# M[b] ...
# E, G/P/V/I/CL/1/S
elif mopt[0] == OP_M or mopt[0] == OP_E:
if mopt[0] == OP_M and MODRM_MOD(u.input.peek()) == 3:
u.error = 1
if mopt[1] == OP_G:
decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', inst.operand[1], mops[1], 'T_GPR')
if mopt[2] == OP_I:
decode_imm(u, inst, mops[2], inst.operand[2])
elif mopt[2] == OP_CL:
inst.operand[2].type = 'OP_REG'
inst.operand[2].base = 'cl'
inst.operand[2].size = 8
elif mopt[1] == OP_P:
decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', inst.operand[1], mops[1], 'T_MMX')
elif mopt[1] == OP_V:
decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', inst.operand[1], mops[1], 'T_XMM')
elif mopt[1] == OP_S:
decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', inst.operand[1], mops[1], 'T_SEG')
else:
decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', NULL, 0, 'T_NONE')
if mopt[1] == OP_CL:
inst.operand[1].type = 'OP_REG'
inst.operand[1].base = 'cl'
inst.operand[1].size = 8
elif mopt[1] == OP_I1:
inst.operand[1].type = 'OP_IMM'
inst.operand[1].lval = 1
elif mopt[1] == OP_I:
decode_imm(u, inst, mops[1], inst.operand[1])
# G, E/PR[,I]/VR
elif mopt[0] == OP_G:
if mopt[1] == OP_M:
if MODRM_MOD(u.input.peek()) == 3:
u.error = 1
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_GPR')
elif mopt[1] == OP_E:
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_GPR')
if mopt[2] == OP_I:
decode_imm(u, inst, mops[2], inst.operand[2])
elif mopt[1] == OP_PR:
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_MMX', inst.operand[0], mops[0], 'T_GPR')
if mopt[2] == OP_I:
decode_imm(u, inst, mops[2], inst.operand[2])
elif mopt[1] == OP_VR:
if MODRM_MOD(u.input.peek()) != 3:
u.error = 1
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_XMM', inst.operand[0], mops[0], 'T_GPR')
elif mopt[1] == OP_W:
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_XMM', inst.operand[0], mops[0], 'T_GPR')
# AL..BH, I/O/DX
elif mopt[0] in [OP_AL, OP_CL, OP_DL, OP_BL,
OP_AH, OP_CH, OP_DH, OP_BH]:
inst.operand[0].type = 'OP_REG'
inst.operand[0].base = GPR[8][mopt[0] - OP_AL]
inst.operand[0].size = 8
if mopt[1] == OP_I:
decode_imm(u, inst, mops[1], inst.operand[1])
elif mopt[1] == OP_DX:
inst.operand[1].type = 'OP_REG'
inst.operand[1].base = 'dx'
inst.operand[1].size = 16
elif mopt[1] == OP_O:
decode_o(u, inst, mops[1], inst.operand[1])
# rAX[r8]..rDI[r15], I/rAX..rDI/O
elif mopt[0] in [OP_rAXr8, OP_rCXr9, OP_rDXr10, OP_rBXr11,
OP_rSPr12, OP_rBPr13, OP_rSIr14, OP_rDIr15,
OP_rAX, OP_rCX, OP_rDX, OP_rBX,
OP_rSP, OP_rBP, OP_rSI, OP_rDI]:
inst.operand[0].type = 'OP_REG'
inst.operand[0].base = resolve_gpr64(u, inst, mopt[0])
if mopt[1] == OP_I:
decode_imm(u, inst, mops[1], inst.operand[1])
elif mopt[1] in [OP_rAX, OP_rCX, OP_rDX, OP_rBX,
OP_rSP, OP_rBP, OP_rSI, OP_rDI]:
inst.operand[1].type = 'OP_REG'
inst.operand[1].base = resolve_gpr64(u, inst, mopt[1])
elif mopt[1] == OP_O:
decode_o(u, inst, mops[1], inst.operand[1])
inst.operand[0].size = resolve_operand_size(u, inst, mops[1])
elif mopt[0] in [OP_ALr8b, OP_CLr9b, OP_DLr10b, OP_BLr11b,
OP_AHr12b, OP_CHr13b, OP_DHr14b, OP_BHr15b]:
gpr = (mopt[0] - OP_ALr8b +(REX_B(inst.pfx.rex) << 3))
if gpr in ['ah', 'ch', 'dh', 'bh',
'spl', 'bpl', 'sil', 'dil',
'r8b', 'r9b', 'r10b', 'r11b',
'r12b', 'r13b', 'r14b', 'r15b',
] and inst.pfx.rex:
gpr = gpr + 4
inst.operand[0].type = 'OP_REG'
inst.operand[0].base = GPR[8][gpr]
if mopt[1] == OP_I:
decode_imm(u, inst, mops[1], inst.operand[1])
# eAX..eDX, DX/I
elif mopt[0] in [OP_eAX, OP_eCX, OP_eDX, OP_eBX,
OP_eSP, OP_eBP, OP_eSI, OP_eDI]:
inst.operand[0].type = 'OP_REG'
inst.operand[0].base = resolve_gpr32(u, inst, mopt[0])
if mopt[1] == OP_DX:
inst.operand[1].type = 'OP_REG'
inst.operand[1].base = 'dx'
inst.operand[1].size = 16
elif mopt[1] == OP_I:
decode_imm(u, inst, mops[1], inst.operand[1])
# ES..GS
elif mopt[0] in [OP_ES, OP_CS, OP_DS,
OP_SS, OP_FS, OP_GS]:
# in 64bits mode, only fs and gs are allowed
if u.dis_mode == 64:
if mopt[0] != OP_FS and mopt[0] != OP_GS:
u.error = 1
inst.operand[0].type = 'OP_REG'
inst.operand[0].base = GPR['T_SEG'][mopt[0] - OP_ES]
inst.operand[0].size = 16
# J
elif mopt[0] == OP_J:
decode_imm(u, inst, mops[0], inst.operand[0])
# MK take care of signs
bound = pow(2, inst.operand[0].size - 1)
if inst.operand[0].lval > bound:
inst.operand[0].lval = -(((2 * bound) - inst.operand[0].lval) % bound)
inst.operand[0].type = 'OP_JIMM'
# PR, I
elif mopt[0] == OP_PR:
if MODRM_MOD(u.input.peek()) != 3:
u.error = 1
decode_modrm(u, inst, inst.operand[0], mops[0], 'T_MMX', NULL, 0, 'T_NONE')
if mopt[1] == OP_I:
decode_imm(u, inst, mops[1], inst.operand[1])
# VR, I
elif mopt[0] == OP_VR:
if MODRM_MOD(u.input.peek()) != 3:
u.error = 1
decode_modrm(u, inst, inst.operand[0], mops[0], 'T_XMM', NULL, 0, 'T_NONE')
if mopt[1] == OP_I:
decode_imm(u, inst, mops[1], inst.operand[1])
# P, Q[,I]/W/E[,I],VR
elif mopt[0] == OP_P:
if mopt[1] == OP_Q:
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_MMX', inst.operand[0], mops[0], 'T_MMX')
if mopt[2] == OP_I:
decode_imm(u, inst, mops[2], inst.operand[2])
elif mopt[1] == OP_W:
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_XMM', inst.operand[0], mops[0], 'T_MMX')
elif mopt[1] == OP_VR:
if MODRM_MOD(u.input.peek()) != 3:
u.error = 1
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_XMM', inst.operand[0], mops[0], 'T_MMX')
elif mopt[1] == OP_E:
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_MMX')
if mopt[2] == OP_I:
decode_imm(u, inst, mops[2], inst.operand[2])
# R, C/D
elif mopt[0] == OP_R:
if mopt[1] == OP_C:
decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', inst.operand[1], mops[1], 'T_CRG')
elif mopt[1] == OP_D:
decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', inst.operand[1], mops[1], 'T_DBG')
# C, R
elif mopt[0] == OP_C:
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_CRG')
# D, R
elif mopt[0] == OP_D:
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_DBG')
# Q, P
elif mopt[0] == OP_Q:
decode_modrm(u, inst, inst.operand[0], mops[0], 'T_MMX', inst.operand[1], mops[1], 'T_MMX')
# S, E
elif mopt[0] == OP_S:
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_SEG')
# W, V
elif mopt[0] == OP_W:
decode_modrm(u, inst, inst.operand[0], mops[0], 'T_XMM', inst.operand[1], mops[1], 'T_XMM')
# V, W[,I]/Q/M/E
elif mopt[0] == OP_V:
if mopt[1] == OP_W:
# special cases for movlps and movhps
if MODRM_MOD(u.input.peek()) == 3:
if inst.operator == 'movlps':
inst.operator = 'movhlps'
elif inst.operator == 'movhps':
inst.operator = 'movlhps'
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_XMM', inst.operand[0], mops[0], 'T_XMM')
if mopt[2] == OP_I:
decode_imm(u, inst, mops[2], inst.operand[2])
elif mopt[1] == OP_Q:
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_MMX', inst.operand[0], mops[0], 'T_XMM')
elif mopt[1] == OP_M:
if MODRM_MOD(u.input.peek()) == 3:
u.error = 1
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_XMM')
elif mopt[1] == OP_E:
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_XMM')
elif mopt[1] == OP_PR:
decode_modrm(u, inst, inst.operand[1], mops[1], 'T_MMX', inst.operand[0], mops[0], 'T_XMM')
# DX, eAX/AL
elif mopt[0] == OP_DX:
inst.operand[0].type = 'OP_REG'
inst.operand[0].base = 'dx'
inst.operand[0].size = 16
if mopt[1] == OP_eAX:
inst.operand[1].type = 'OP_REG'
inst.operand[1].base = resolve_gpr32(u, inst, mopt[1])
elif mopt[1] == OP_AL:
inst.operand[1].type = 'OP_REG'
inst.operand[1].base = 'al'
inst.operand[1].size = 8
# I, I/AL/eAX
elif mopt[0] == OP_I:
decode_imm(u, inst, mops[0], inst.operand[0])
if mopt[1] == OP_I:
decode_imm(u, inst, mops[1], inst.operand[1])
elif mopt[1] == OP_AL:
inst.operand[1].type = 'OP_REG'
inst.operand[1].base = 'al'
inst.operand[1].size = 16
elif mopt[1] == OP_eAX:
inst.operand[1].type = 'OP_REG'
inst.operand[1].base = resolve_gpr32(u, inst, mopt[1])
# O, AL/eAX
elif mopt[0] == OP_O:
decode_o(u, inst, mops[0], inst.operand[0])
inst.operand[1].type = 'OP_REG'
inst.operand[1].size = resolve_operand_size(u, inst, mops[0])
if mopt[1] == OP_AL:
inst.operand[1].base = 'al'
elif mopt[1] == OP_eAX:
inst.operand[1].base = resolve_gpr32(u, inst, mopt[1])
elif mopt[1] == OP_rAX:
inst.operand[1].base = resolve_gpr64(u, inst, mopt[1])
# 3
elif mopt[0] == OP_I3:
inst.operand[0].type = 'OP_IMM'
inst.operand[0].lval = 3
# ST(n), ST(n)
elif mopt[0] in [OP_ST0, OP_ST1, OP_ST2, OP_ST3,
OP_ST4, OP_ST5, OP_ST6, OP_ST7]:
inst.operand[0].type = 'OP_REG'
inst.operand[0].base = GPR['T_ST'][mopt[0] - OP_ST0]
inst.operand[0].size = 0
if mopt[1] in [OP_ST0, OP_ST1, OP_ST2, OP_ST3,
OP_ST4, OP_ST5, OP_ST6, OP_ST7]:
inst.operand[1].type = 'OP_REG'
inst.operand[1].base = GPR['T_ST'][mopt[1] - OP_ST0]
inst.operand[1].size = 0
# AX
elif mopt[0] == OP_AX:
inst.operand[0].type = 'OP_REG'
inst.operand[0].base = 'ax'
inst.operand[0].size = 16
# none
else:
for op in inst.operand:
op.type = None
return 0
def do_mode(u, inst):
# if in error state, bail out
if u.error:
return -1
# propagate perfix effects
if u.dis_mode == 64: # set 64bit-mode flags
# Check validity of instruction m64
if P_INV64(inst.itab_entry.prefix):
u.error = 1
return -1
# effective rex prefix is the effective mask for the
# instruction hard-coded in the opcode map.
inst.pfx.rex = ((inst.pfx.rex & 0x40)
|(inst.pfx.rex & REX_PFX_MASK(inst.itab_entry.prefix)))
# calculate effective operand size
if REX_W(inst.pfx.rex) or P_DEF64(inst.itab_entry.prefix):
inst.opr_mode = 64
elif inst.pfx.opr:
inst.opr_mode = 16
else:
inst.opr_mode = 32
# calculate effective address size
if inst.pfx.adr:
inst.adr_mode = 32
else:
inst.adr_mode = 64
elif u.dis_mode == 32: # set 32bit-mode flags
if inst.pfx.opr:
inst.opr_mode = 16
else:
inst.opr_mode = 32
if inst.pfx.adr:
inst.adr_mode = 16
else:
inst.adr_mode = 32
elif u.dis_mode == 16: # set 16bit-mode flags
if inst.pfx.opr:
inst.opr_mode = 32
else:
inst.opr_mode = 16
if inst.pfx.adr:
inst.adr_mode = 32
else:
inst.adr_mode = 16
# These flags determine which operand to apply the operand size
# cast to.
cast = [P_C0, P_C1, P_C2]
for i in range(len(inst.operand)):
inst.operand[i].cast = cast[i](inst.itab_entry.prefix)
return 0
def decode(self):
"""Instruction decoder. Returns the number of bytes decoded."""
inst = Inst(myInput = self.input, add = self.pc, mode = self.dis_mode, syntax = self.syntax)
self.error = 0
self.input.start ()
if get_prefixes(self, inst) != 0:
pass # ;print('prefixes error') # error
elif search_itab(self, inst) != 0:
pass #; print('itab error') # error
elif do_mode(self, inst) != 0:
pass #; print('mode error') # error
elif disasm_operands(self, inst) != 0:
pass #; print('operand error') # error
elif resolve_operator(self, inst) != 0:
pass #; print('operator error') # error
# Handle decode error.
if self.error:
inst.clear()
inst.size = self.input.ctr + 1
inst.raw = self.input.buffer[0:inst.size]
inst.set_pc(inst.add + inst.size)
return inst
| gpl-2.0 |
jemofthewest/mykoans | python2/libs/colorama/win32.py | 86 | 2730 |
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort
)
handles = {
STDOUT: windll.kernel32.GetStdHandle(STDOUT),
STDERR: windll.kernel32.GetStdHandle(STDERR),
}
SHORT = c_short
WORD = c_ushort
DWORD = c_uint32
TCHAR = c_char
class COORD(Structure):
"""struct in wincon.h"""
_fields_ = [
('X', SHORT),
('Y', SHORT),
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def GetConsoleScreenBufferInfo(stream_id):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
# This fails when imported via setup.py when installing using 'pip'
# presumably the fix is that running setup.py should not trigger all
# this activity.
# assert success
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
success = windll.kernel32.SetConsoleTextAttribute(handle, attrs)
assert success
def SetConsoleCursorPosition(stream_id, position):
handle = handles[stream_id]
position = COORD(*position)
success = windll.kernel32.SetConsoleCursorPosition(handle, position)
assert success
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = TCHAR(char)
length = DWORD(length)
start = COORD(*start)
num_written = DWORD(0)
# AttributeError: function 'FillConsoleOutputCharacter' not found
# could it just be that my types are wrong?
success = windll.kernel32.FillConsoleOutputCharacter(
handle, char, length, start, byref(num_written))
assert success
return num_written.value
if __name__=='__main__':
x = GetConsoleScreenBufferInfo(STDOUT)
print(x.dwSize)
print(x.dwCursorPosition)
print(x.wAttributes)
print(x.srWindow)
print(x.dwMaximumWindowSize)
| mit |
ztrautt/tutorials | TEM-image-simple/mdcs/explore.py | 10 | 1530 | #! /usr/bin/env python
import requests
from collections import OrderedDict
def select_all(host,user,pswd,cert=None,format=None):
url = host + "/rest/explore/select/all"
params = dict()
if format: params['dataformat'] = format
r = requests.get(url, params=params, auth=(user, pswd), verify=cert)
return r.json(object_pairs_hook=OrderedDict)
def select(host,user,pswd,cert=None,format=None,ID=None,template=None,title=None):
url = host + "/rest/explore/select"
params = dict()
if format: params['dataformat'] = format
if ID: params['id'] = ID
if template: params['schema'] = template
if title: params['title'] = title
r = requests.get(url, params=params, auth=(user, pswd), verify=cert)
return r.json(object_pairs_hook=OrderedDict)
def delete(ID,host,user,pswd,cert=None):
url = host + "/rest/explore/delete"
params = dict()
params['id']=ID
r = requests.delete(url, params=params, auth=(user, pswd), verify=cert)
if int(r.status_code)==204:
return "Successful deletion of: "+ID
else:
return r.json()
def query(host,user,pswd,cert=None,format=None,query=None,repositories=None):
url = host + "/rest/explore/query-by-example"
data = dict()
if format: data['dataformat'] = format
if query: data['query'] = query
if repositories: data['repositories'] = repositories
r = requests.post(url, data=data, auth=(user, pswd), verify=cert)
return r.json(object_pairs_hook=OrderedDict) | cc0-1.0 |
HBehrens/feedsanitizer | django/contrib/gis/utils/wkt.py | 419 | 1846 | """
Utilities for manipulating Geometry WKT.
"""
def precision_wkt(geom, prec):
"""
Returns WKT text of the geometry according to the given precision (an
integer or a string). If the precision is an integer, then the decimal
places of coordinates WKT will be truncated to that number:
>>> pnt = Point(5, 23)
>>> pnt.wkt
'POINT (5.0000000000000000 23.0000000000000000)'
>>> precision(geom, 1)
'POINT (5.0 23.0)'
If the precision is a string, it must be valid Python format string
(e.g., '%20.7f') -- thus, you should know what you're doing.
"""
if isinstance(prec, int):
num_fmt = '%%.%df' % prec
elif isinstance(prec, basestring):
num_fmt = prec
else:
raise TypeError
# TODO: Support 3D geometries.
coord_fmt = ' '.join([num_fmt, num_fmt])
def formatted_coords(coords):
return ','.join([coord_fmt % c[:2] for c in coords])
def formatted_poly(poly):
return ','.join(['(%s)' % formatted_coords(r) for r in poly])
def formatted_geom(g):
gtype = str(g.geom_type).upper()
yield '%s(' % gtype
if gtype == 'POINT':
yield formatted_coords((g.coords,))
elif gtype in ('LINESTRING', 'LINEARRING'):
yield formatted_coords(g.coords)
elif gtype in ('POLYGON', 'MULTILINESTRING'):
yield formatted_poly(g)
elif gtype == 'MULTIPOINT':
yield formatted_coords(g.coords)
elif gtype == 'MULTIPOLYGON':
yield ','.join(['(%s)' % formatted_poly(p) for p in g])
elif gtype == 'GEOMETRYCOLLECTION':
yield ','.join([''.join([wkt for wkt in formatted_geom(child)]) for child in g])
else:
raise TypeError
yield ')'
return ''.join([wkt for wkt in formatted_geom(geom)])
| mit |
manumathewthomas/Chat-with-Joey | chatbot/chatbot.py | 1 | 31681 | # Copyright 2015 Conchylicultor. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Main script. See README.md for more information
Use python 3
"""
import argparse # Command line parsing
import configparser # Saving the models parameters
import datetime # Chronometer
import os # Files management
import tensorflow as tf
import numpy as np
import math
from tqdm import tqdm # Progress bar
from tensorflow.python import debug as tf_debug
from chatbot.textdata import TextData
from chatbot.model import Model
class Chatbot:
"""
Main class which launch the training or testing mode
"""
class TestMode:
""" Simple structure representing the different testing modes
"""
ALL = 'all'
INTERACTIVE = 'interactive' # The user can write his own questions
DAEMON = 'daemon' # The chatbot runs on background and can regularly be called to predict something
def __init__(self):
"""
"""
# Model/dataset parameters
self.args = None
# Task specific object
self.textData = None # Dataset
self.model = None # Sequence to sequence model
# Tensorflow utilities for convenience saving/logging
self.writer = None
self.saver = None
self.modelDir = '' # Where the model is saved
self.globStep = 0 # Represent the number of iteration for the current model
# TensorFlow main session (we keep track for the daemon)
self.sess = None
# Filename and directories constants
self.MODEL_DIR_BASE = 'save/model'
self.MODEL_NAME_BASE = 'model'
self.MODEL_EXT = '.ckpt'
self.CONFIG_FILENAME = 'params.ini'
self.CONFIG_VERSION = '0.4'
self.TEST_IN_NAME = 'data/test/samples.txt'
self.TEST_OUT_SUFFIX = '_predictions.txt'
self.SENTENCES_PREFIX = ['Q: ', 'A: ']
@staticmethod
def parseArgs(args):
"""
Parse the arguments from the given command line
Args:
args (list<str>): List of arguments to parse. If None, the default sys.argv will be parsed
"""
parser = argparse.ArgumentParser()
# Global options
globalArgs = parser.add_argument_group('Global options')
globalArgs.add_argument('--test',
nargs='?',
choices=[Chatbot.TestMode.ALL, Chatbot.TestMode.INTERACTIVE, Chatbot.TestMode.DAEMON],
const=Chatbot.TestMode.ALL, default=None,
help='if present, launch the program try to answer all sentences from data/test/ with'
' the defined model(s), in interactive mode, the user can wrote his own sentences,'
' use daemon mode to integrate the chatbot in another program')
globalArgs.add_argument('--createDataset', action='store_true', help='if present, the program will only generate the dataset from the corpus (no training/testing)')
globalArgs.add_argument('--playDataset', type=int, nargs='?', const=10, default=None, help='if set, the program will randomly play some samples(can be use conjointly with createDataset if this is the only action you want to perform)')
globalArgs.add_argument('--reset', action='store_true', help='use this if you want to ignore the previous model present on the model directory (Warning: the model will be destroyed with all the folder content)')
globalArgs.add_argument('--verbose', action='store_true', help='When testing, will plot the outputs at the same time they are computed')
globalArgs.add_argument('--debug', action='store_true', help='run DeepQA with Tensorflow debug mode. Read TF documentation for more details on this.')
globalArgs.add_argument('--keepAll', action='store_true', help='If this option is set, all saved model will be kept (Warning: make sure you have enough free disk space or increase saveEvery)') # TODO: Add an option to delimit the max size
globalArgs.add_argument('--modelTag', type=str, default=None, help='tag to differentiate which model to store/load')
globalArgs.add_argument('--rootDir', type=str, default=None, help='folder where to look for the models and data')
globalArgs.add_argument('--watsonMode', action='store_true', help='Inverse the questions and answer when training (the network try to guess the question)')
globalArgs.add_argument('--autoEncode', action='store_true', help='Randomly pick the question or the answer and use it both as input and output')
globalArgs.add_argument('--device', type=str, default=None, help='\'gpu\' or \'cpu\' (Warning: make sure you have enough free RAM), allow to choose on which hardware run the model')
globalArgs.add_argument('--seed', type=int, default=None, help='random seed for replication')
# Dataset options
datasetArgs = parser.add_argument_group('Dataset options')
datasetArgs.add_argument('--corpus', choices=TextData.corpusChoices(), default=TextData.corpusChoices()[0], help='corpus on which extract the dataset.')
datasetArgs.add_argument('--datasetTag', type=str, default='', help='add a tag to the dataset (file where to load the vocabulary and the precomputed samples, not the original corpus). Useful to manage multiple versions. Also used to define the file used for the lightweight format.') # The samples are computed from the corpus if it does not exist already. There are saved in \'data/samples/\'
datasetArgs.add_argument('--ratioDataset', type=float, default=1.0, help='ratio of dataset used to avoid using the whole dataset') # Not implemented, useless ?
datasetArgs.add_argument('--maxLength', type=int, default=10, help='maximum length of the sentence (for input and output), define number of maximum step of the RNN')
datasetArgs.add_argument('--lightweightFile', type=str, default=None, help='file containing our lightweight-formatted corpus')
# Network options (Warning: if modifying something here, also make the change on save/loadParams() )
nnArgs = parser.add_argument_group('Network options', 'architecture related option')
nnArgs.add_argument('--hiddenSize', type=int, default=256, help='number of hidden units in each RNN cell')
nnArgs.add_argument('--numLayers', type=int, default=2, help='number of rnn layers')
nnArgs.add_argument('--embeddingSize', type=int, default=32, help='embedding size of the word representation')
nnArgs.add_argument('--initEmbeddings', action='store_true', help='if present, the program will initialize the embeddings with pre-trained word2vec vectors')
nnArgs.add_argument('--softmaxSamples', type=int, default=0, help='Number of samples in the sampled softmax loss function. A value of 0 deactivates sampled softmax')
# Training options
trainingArgs = parser.add_argument_group('Training options')
trainingArgs.add_argument('--numEpochs', type=int, default=30, help='maximum number of epochs to run')
trainingArgs.add_argument('--saveEvery', type=int, default=1000, help='nb of mini-batch step before creating a model checkpoint')
trainingArgs.add_argument('--batchSize', type=int, default=10, help='mini-batch size')
trainingArgs.add_argument('--learningRate', type=float, default=0.001, help='Learning rate')
return parser.parse_args(args)
def main(self, args=None):
"""
Launch the training and/or the interactive mode
"""
print('Welcome to DeepQA v0.1 !')
print()
print('TensorFlow detected: v{}'.format(tf.__version__))
# General initialisation
self.args = self.parseArgs(args)
if not self.args.rootDir:
self.args.rootDir = os.getcwd() # Use the current working directory
#tf.logging.set_verbosity(tf.logging.INFO) # DEBUG, INFO, WARN (default), ERROR, or FATAL
self.loadModelParams() # Update the self.modelDir and self.globStep, for now, not used when loading Model (but need to be called before _getSummaryName)
self.textData = TextData(self.args)
# TODO: Add a mode where we can force the input of the decoder // Try to visualize the predictions for
# each word of the vocabulary / decoder input
# TODO: For now, the model are trained for a specific dataset (because of the maxLength which define the
# vocabulary). Add a compatibility mode which allow to launch a model trained on a different vocabulary (
# remap the word2id/id2word variables).
if self.args.createDataset:
print('Dataset created! Thanks for using this program')
return # No need to go further
# Prepare the model
with tf.device(self.getDevice()):
self.model = Model(self.args, self.textData)
# Saver/summaries
self.writer = tf.summary.FileWriter(self._getSummaryName())
self.saver = tf.train.Saver(max_to_keep=200, write_version=tf.train.SaverDef.V1) # TODO: See GitHub for format name issue (when restoring the model)
# TODO: Fixed seed (WARNING: If dataset shuffling, make sure to do that after saving the
# dataset, otherwise, all which cames after the shuffling won't be replicable when
# reloading the dataset). How to restore the seed after loading ??
# Also fix seed for random.shuffle (does it works globally for all files ?)
# Running session
self.sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True, # Allows backup device for non GPU-available operations (when forcing GPU)
log_device_placement=False) # Too verbose ?
) # TODO: Replace all sess by self.sess (not necessary a good idea) ?
if self.args.debug:
self.sess = tf_debug.LocalCLIDebugWrapperSession(self.sess)
self.sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
print('Initialize variables...')
self.sess.run(tf.global_variables_initializer())
# Reload the model eventually (if it exist.), on testing mode, the models are not loaded here (but in predictTestset)
if self.args.test != Chatbot.TestMode.ALL:
self.managePreviousModel(self.sess)
# Initialize embeddings with pre-trained word2vec vectors
if self.args.initEmbeddings:
print("Loading pre-trained embeddings from GoogleNews-vectors-negative300.bin")
self.loadEmbedding(self.sess)
if self.args.test:
if self.args.test == Chatbot.TestMode.INTERACTIVE:
self.mainTestInteractive(self.sess)
elif self.args.test == Chatbot.TestMode.ALL:
print('Start predicting...')
self.predictTestset(self.sess)
print('All predictions done')
elif self.args.test == Chatbot.TestMode.DAEMON:
print('Daemon mode, running in background...')
else:
raise RuntimeError('Unknown test mode: {}'.format(self.args.test)) # Should never happen
else:
self.mainTrain(self.sess)
if self.args.test != Chatbot.TestMode.DAEMON:
self.sess.close()
print("The End! Thanks for using this program")
def mainTrain(self, sess):
""" Training loop
Args:
sess: The current running session
"""
# Specific training dependent loading
self.textData.makeLighter(self.args.ratioDataset) # Limit the number of training samples
mergedSummaries = tf.summary.merge_all() # Define the summary operator (Warning: Won't appear on the tensorboard graph)
if self.globStep == 0: # Not restoring from previous run
self.writer.add_graph(sess.graph) # First time only
# If restoring a model, restore the progression bar ? and current batch ?
print('Start training (press Ctrl+C to save and exit)...')
try: # If the user exit while training, we still try to save the model
for e in range(self.args.numEpochs):
print()
print("----- Epoch {}/{} ; (lr={}) -----".format(e+1, self.args.numEpochs, self.args.learningRate))
batches = self.textData.getBatches()
# TODO: Also update learning parameters eventually
tic = datetime.datetime.now()
for nextBatch in tqdm(batches, desc="Training"):
# Training pass
ops, feedDict = self.model.step(nextBatch)
assert len(ops) == 2 # training, loss
_, loss, summary = sess.run(ops + (mergedSummaries,), feedDict)
self.writer.add_summary(summary, self.globStep)
self.globStep += 1
# Output training status
if self.globStep % 100 == 0:
perplexity = math.exp(float(loss)) if loss < 300 else float("inf")
tqdm.write("----- Step %d -- Loss %.2f -- Perplexity %.2f" % (self.globStep, loss, perplexity))
# Checkpoint
if self.globStep % self.args.saveEvery == 0:
self._saveSession(sess)
toc = datetime.datetime.now()
print("Epoch finished in {}".format(toc-tic)) # Warning: Will overflow if an epoch takes more than 24 hours, and the output isn't really nicer
except (KeyboardInterrupt, SystemExit): # If the user press Ctrl+C while testing progress
print('Interruption detected, exiting the program...')
self._saveSession(sess) # Ultimate saving before complete exit
def predictTestset(self, sess):
""" Try predicting the sentences from the samples.txt file.
The sentences are saved on the modelDir under the same name
Args:
sess: The current running session
"""
# Loading the file to predict
with open(os.path.join(self.args.rootDir, self.TEST_IN_NAME), 'r') as f:
lines = f.readlines()
modelList = self._getModelList()
if not modelList:
print('Warning: No model found in \'{}\'. Please train a model before trying to predict'.format(self.modelDir))
return
# Predicting for each model present in modelDir
for modelName in sorted(modelList): # TODO: Natural sorting
print('Restoring previous model from {}'.format(modelName))
self.saver.restore(sess, modelName)
print('Testing...')
saveName = modelName[:-len(self.MODEL_EXT)] + self.TEST_OUT_SUFFIX # We remove the model extension and add the prediction suffix
with open(saveName, 'w') as f:
nbIgnored = 0
for line in tqdm(lines, desc='Sentences'):
question = line[:-1] # Remove the endl character
answer = self.singlePredict(question)
if not answer:
nbIgnored += 1
continue # Back to the beginning, try again
predString = '{x[0]}{0}\n{x[1]}{1}\n\n'.format(question, self.textData.sequence2str(answer, clean=True), x=self.SENTENCES_PREFIX)
if self.args.verbose:
tqdm.write(predString)
f.write(predString)
print('Prediction finished, {}/{} sentences ignored (too long)'.format(nbIgnored, len(lines)))
def mainTestInteractive(self, sess):
""" Try predicting the sentences that the user will enter in the console
Args:
sess: The current running session
"""
# TODO: If verbose mode, also show similar sentences from the training set with the same words (include in mainTest also)
# TODO: Also show the top 10 most likely predictions for each predicted output (when verbose mode)
# TODO: Log the questions asked for latter re-use (merge with test/samples.txt)
print('Testing: Launch interactive mode:')
print('')
print('Welcome to the interactive mode, here you can ask to Deep Q&A the sentence you want. Don\'t have high '
'expectation. Type \'exit\' or just press ENTER to quit the program. Have fun.')
while True:
question = input(self.SENTENCES_PREFIX[0])
if question == '' or question == 'exit':
break
questionSeq = [] # Will be contain the question as seen by the encoder
answer = self.singlePredict(question, questionSeq)
if not answer:
print('Warning: sentence too long, sorry. Maybe try a simpler sentence.')
continue # Back to the beginning, try again
print('{}{}'.format(self.SENTENCES_PREFIX[1], self.textData.sequence2str(answer, clean=True)))
if self.args.verbose:
print(self.textData.batchSeq2str(questionSeq, clean=True, reverse=True))
print(self.textData.sequence2str(answer))
print()
def singlePredict(self, question, questionSeq=None):
""" Predict the sentence
Args:
question (str): the raw input sentence
questionSeq (List<int>): output argument. If given will contain the input batch sequence
Return:
list <int>: the word ids corresponding to the answer
"""
# Create the input batch
batch = self.textData.sentence2enco(question)
if not batch:
return None
if questionSeq is not None: # If the caller want to have the real input
questionSeq.extend(batch.encoderSeqs)
# Run the model
ops, feedDict = self.model.step(batch)
output = self.sess.run(ops[0], feedDict) # TODO: Summarize the output too (histogram, ...)
answer = self.textData.deco2sentence(output)
return answer
def daemonPredict(self, sentence):
""" Return the answer to a given sentence (same as singlePredict() but with additional cleaning)
Args:
sentence (str): the raw input sentence
Return:
str: the human readable sentence
"""
return self.textData.sequence2str(
self.singlePredict(sentence),
clean=True
)
def daemonClose(self):
""" A utility function to close the daemon when finish
"""
print('Exiting the daemon mode...')
self.sess.close()
print('Daemon closed.')
def loadEmbedding(self, sess):
""" Initialize embeddings with pre-trained word2vec vectors
Will modify the embedding weights of the current loaded model
Uses the GoogleNews pre-trained values (path hardcoded)
"""
# Fetch embedding variables from model
with tf.variable_scope("embedding_rnn_seq2seq/RNN/EmbeddingWrapper", reuse=True):
em_in = tf.get_variable("embedding")
with tf.variable_scope("embedding_rnn_seq2seq/embedding_rnn_decoder", reuse=True):
em_out = tf.get_variable("embedding")
# Disable training for embeddings
variables = tf.get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES)
variables.remove(em_in)
variables.remove(em_out)
# If restoring a model, we can leave here
if self.globStep != 0:
return
# New model, we load the pre-trained word2vec data and initialize embeddings
with open(os.path.join(self.args.rootDir, 'data/word2vec/GoogleNews-vectors-negative300.bin'), "rb", 0) as f:
header = f.readline()
vocab_size, vector_size = map(int, header.split())
binary_len = np.dtype('float32').itemsize * vector_size
initW = np.random.uniform(-0.25,0.25,(len(self.textData.word2id), vector_size))
for line in tqdm(range(vocab_size)):
word = []
while True:
ch = f.read(1)
if ch == b' ':
word = b''.join(word).decode('utf-8')
break
if ch != b'\n':
word.append(ch)
if word in self.textData.word2id:
initW[self.textData.word2id[word]] = np.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
# PCA Decomposition to reduce word2vec dimensionality
if self.args.embeddingSize < vector_size:
U, s, Vt = np.linalg.svd(initW, full_matrices=False)
S = np.zeros((vector_size, vector_size), dtype=complex)
S[:vector_size, :vector_size] = np.diag(s)
initW = np.dot(U[:, :self.args.embeddingSize], S[:self.args.embeddingSize, :self.args.embeddingSize])
# Initialize input and output embeddings
sess.run(em_in.assign(initW))
sess.run(em_out.assign(initW))
def managePreviousModel(self, sess):
""" Restore or reset the model, depending of the parameters
If the destination directory already contains some file, it will handle the conflict as following:
* If --reset is set, all present files will be removed (warning: no confirmation is asked) and the training
restart from scratch (globStep & cie reinitialized)
* Otherwise, it will depend of the directory content. If the directory contains:
* No model files (only summary logs): works as a reset (restart from scratch)
* Other model files, but modelName not found (surely keepAll option changed): raise error, the user should
decide by himself what to do
* The right model file (eventually some other): no problem, simply resume the training
In any case, the directory will exist as it has been created by the summary writer
Args:
sess: The current running session
"""
print('WARNING: ', end='')
modelName = self._getModelName()
if os.listdir(self.modelDir):
if self.args.reset:
print('Reset: Destroying previous model at {}'.format(self.modelDir))
# Analysing directory content
elif os.path.exists(modelName): # Restore the model
print('Restoring previous model from {}'.format(modelName))
self.saver.restore(sess, modelName) # Will crash when --reset is not activated and the model has not been saved yet
elif self._getModelList():
print('Conflict with previous models.')
raise RuntimeError('Some models are already present in \'{}\'. You should check them first (or re-try with the keepAll flag)'.format(self.modelDir))
else: # No other model to conflict with (probably summary files)
print('No previous model found, but some files found at {}. Cleaning...'.format(self.modelDir)) # Warning: No confirmation asked
self.args.reset = True
if self.args.reset:
fileList = [os.path.join(self.modelDir, f) for f in os.listdir(self.modelDir)]
for f in fileList:
print('Removing {}'.format(f))
os.remove(f)
else:
print('No previous model found, starting from clean directory: {}'.format(self.modelDir))
def _saveSession(self, sess):
""" Save the model parameters and the variables
Args:
sess: the current session
"""
tqdm.write('Checkpoint reached: saving model (don\'t stop the run)...')
self.saveModelParams()
self.saver.save(sess, self._getModelName()) # TODO: Put a limit size (ex: 3GB for the modelDir)
tqdm.write('Model saved.')
def _getModelList(self):
""" Return the list of the model files inside the model directory
"""
return [os.path.join(self.modelDir, f) for f in os.listdir(self.modelDir) if f.endswith(self.MODEL_EXT)]
def loadModelParams(self):
""" Load the some values associated with the current model, like the current globStep value
For now, this function does not need to be called before loading the model (no parameters restored). However,
the modelDir name will be initialized here so it is required to call this function before managePreviousModel(),
_getModelName() or _getSummaryName()
Warning: if you modify this function, make sure the changes mirror saveModelParams, also check if the parameters
should be reset in managePreviousModel
"""
# Compute the current model path
self.modelDir = os.path.join(self.args.rootDir, self.MODEL_DIR_BASE)
if self.args.modelTag:
self.modelDir += '-' + self.args.modelTag
# If there is a previous model, restore some parameters
configName = os.path.join(self.modelDir, self.CONFIG_FILENAME)
if not self.args.reset and not self.args.createDataset and os.path.exists(configName):
# Loading
config = configparser.ConfigParser()
config.read(configName)
# Check the version
currentVersion = config['General'].get('version')
if currentVersion != self.CONFIG_VERSION:
raise UserWarning('Present configuration version {0} does not match {1}. You can try manual changes on \'{2}\''.format(currentVersion, self.CONFIG_VERSION, configName))
# Restoring the the parameters
self.globStep = config['General'].getint('globStep')
self.args.maxLength = config['General'].getint('maxLength') # We need to restore the model length because of the textData associated and the vocabulary size (TODO: Compatibility mode between different maxLength)
self.args.watsonMode = config['General'].getboolean('watsonMode')
self.args.autoEncode = config['General'].getboolean('autoEncode')
self.args.corpus = config['General'].get('corpus')
self.args.datasetTag = config['General'].get('datasetTag', '')
self.args.hiddenSize = config['Network'].getint('hiddenSize')
self.args.numLayers = config['Network'].getint('numLayers')
self.args.embeddingSize = config['Network'].getint('embeddingSize')
self.args.initEmbeddings = config['Network'].getboolean('initEmbeddings')
self.args.softmaxSamples = config['Network'].getint('softmaxSamples')
# No restoring for training params, batch size or other non model dependent parameters
# Show the restored params
print()
print('Warning: Restoring parameters:')
print('globStep: {}'.format(self.globStep))
print('maxLength: {}'.format(self.args.maxLength))
print('watsonMode: {}'.format(self.args.watsonMode))
print('autoEncode: {}'.format(self.args.autoEncode))
print('corpus: {}'.format(self.args.corpus))
print('datasetTag: {}'.format(self.args.datasetTag))
print('hiddenSize: {}'.format(self.args.hiddenSize))
print('numLayers: {}'.format(self.args.numLayers))
print('embeddingSize: {}'.format(self.args.embeddingSize))
print('initEmbeddings: {}'.format(self.args.initEmbeddings))
print('softmaxSamples: {}'.format(self.args.softmaxSamples))
print()
# For now, not arbitrary independent maxLength between encoder and decoder
self.args.maxLengthEnco = self.args.maxLength
self.args.maxLengthDeco = self.args.maxLength + 2
if self.args.watsonMode:
self.SENTENCES_PREFIX.reverse()
def saveModelParams(self):
""" Save the params of the model, like the current globStep value
Warning: if you modify this function, make sure the changes mirror loadModelParams
"""
config = configparser.ConfigParser()
config['General'] = {}
config['General']['version'] = self.CONFIG_VERSION
config['General']['globStep'] = str(self.globStep)
config['General']['maxLength'] = str(self.args.maxLength)
config['General']['watsonMode'] = str(self.args.watsonMode)
config['General']['autoEncode'] = str(self.args.autoEncode)
config['General']['corpus'] = str(self.args.corpus)
config['General']['datasetTag'] = str(self.args.datasetTag)
config['Network'] = {}
config['Network']['hiddenSize'] = str(self.args.hiddenSize)
config['Network']['numLayers'] = str(self.args.numLayers)
config['Network']['embeddingSize'] = str(self.args.embeddingSize)
config['Network']['initEmbeddings'] = str(self.args.initEmbeddings)
config['Network']['softmaxSamples'] = str(self.args.softmaxSamples)
# Keep track of the learning params (but without restoring them)
config['Training (won\'t be restored)'] = {}
config['Training (won\'t be restored)']['learningRate'] = str(self.args.learningRate)
config['Training (won\'t be restored)']['batchSize'] = str(self.args.batchSize)
with open(os.path.join(self.modelDir, self.CONFIG_FILENAME), 'w') as configFile:
config.write(configFile)
def _getSummaryName(self):
""" Parse the argument to decide were to save the summary, at the same place that the model
The folder could already contain logs if we restore the training, those will be merged
Return:
str: The path and name of the summary
"""
return self.modelDir
def _getModelName(self):
""" Parse the argument to decide were to save/load the model
This function is called at each checkpoint and the first time the model is load. If keepAll option is set, the
globStep value will be included in the name.
Return:
str: The path and name were the model need to be saved
"""
modelName = os.path.join(self.modelDir, self.MODEL_NAME_BASE)
if self.args.keepAll: # We do not erase the previously saved model by including the current step on the name
modelName += '-' + str(self.globStep)
return modelName + self.MODEL_EXT
def getDevice(self):
""" Parse the argument to decide on which device run the model
Return:
str: The name of the device on which run the program
"""
if self.args.device == 'cpu':
return '/cpu:0'
elif self.args.device == 'gpu':
return '/gpu:0'
elif self.args.device is None: # No specified device (default)
return None
else:
print('Warning: Error in the device name: {}, use the default device'.format(self.args.device))
return None
| apache-2.0 |
codemeow5/PyPack | pypack/protocol.py | 1 | 2657 | """ Class and function related to protocol operation
"""
import datetime
import struct
MSG_TYPE_SEND = 0x1
MSG_TYPE_ACK = 0x2
MSG_TYPE_RECEIVED = 0x3
MSG_TYPE_RELEASE = 0x4
MSG_TYPE_COMPLETED = 0x5
QOS0 = 0
QOS1 = 1
QOS2 = 2
# MAX_DATETIME = int((datetime.datetime(2500, 1, 1) - datetime.datetime(1970, 1, 1)).total_seconds())
class Packet(object):
""" This is a class that describe an incoming or outgoing message
Members:
msg_type : Enum. message type
qos : Enum. quality of service level
dup : Bool. whether the message is resent
msg_id : Number. message id
remaining_length : Number. payload length
total_length : Number. buffer length
payload : String. message body
buff : String. full message
confirm : whether the message is answered
retry_times : resent times
timestamp : next send time
"""
def __init__(self, msg_type=MSG_TYPE_SEND, qos=QOS0, dup=False, msg_id=0, payload=None):
self.msg_type = msg_type
self.qos = qos
self.dup = dup
self.msg_id = msg_id
if payload is not None and not isinstance(payload, str):
raise TypeError("parameter payload must be str, not %s" % type(payload).__name__)
self.payload = payload
if payload is None:
self.remaining_length = 0
else:
self.remaining_length = len(payload)
self.total_length = 5 + self.remaining_length
self.confirm = False
self.retry_times = 0
self.timestamp = 0
self.buff = None
@staticmethod
def encode(packet):
""" Encode packet object and fill buff field
"""
buff = bytearray()
fixed_header = (packet.msg_type << 4) | (packet.qos << 2) | (packet.dup << 1)
buff.extend(struct.pack("!B", fixed_header))
buff.extend(struct.pack("!H", packet.msg_id))
buff.extend(struct.pack("!H", packet.remaining_length))
if packet.payload is not None:
buff.extend(packet.payload)
packet.buff = str(buff)
@staticmethod
def decode(buff):
""" Convert buff string to packet object
"""
(fixed_header, msg_id, remaining_length) = struct.unpack("!BHH", buff[:5])
msg_type = fixed_header >> 4
qos = (fixed_header & 0xf) >> 2
dup = (fixed_header & 0x3) >> 1
if len(buff) >= 5 + remaining_length:
(_, payload) = struct.unpack("!5s%ss" % remaining_length, buff[:5 + remaining_length])
packet = Packet(msg_type, qos, dup, msg_id, payload)
packet.buff = buff
return packet
else:
return None
| mit |
ioanaantoche/muhaha | ioana/examples/feet.py | 1 | 1624 | import sys
from naoqi import ALProxy
import time
def main(robotIP):
PORT = 9559
try:
motionProxy = ALProxy("ALMotion", robotIP, PORT)
except Exception,e:
print "Could not create proxy to ALMotion"
print "Error was: ",e
sys.exit(1)
try:
postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)
except Exception, e:
print "Could not create proxy to ALRobotPosture"
print "Error was: ", e
# Send NAO to Pose Init
postureProxy.goToPosture("StandInit", 0.5)
motionProxy.wbEnable(True)
# Example showing how to fix the feet.
#print "Feet fixed."
#stateName = "Fixed"
#supportLeg = "Legs"
#motionProxy.wbFootState(stateName, supportLeg)
# Example showing how to fix the left leg and constrained in a plane the right leg.
#print "Left leg fixed, right leg in a plane."
#motionProxy.wbFootState("Fixed", "LLeg")
#motionProxy.wbFootState("Plane", "RLeg")
# Example showing how to fix the left leg and keep free the right leg.
print "Left leg fixed, right leg free"
motionProxy.wbFootState("Fixed", "LLeg")
motionProxy.wbFootState("Free", "RLeg")
time.sleep(10.0)
print "motionProxy.wbEnable(False)"
motionProxy.wbEnable(False)
time.sleep(5.0)
print "postureProxy.goToPosture(Sit, 0.5)"
postureProxy.goToPosture("SitRelax", 0.5)
if __name__ == "__main__":
robotIp = "127.0.0.1"
if len(sys.argv) <= 1:
print "Usage python almotion_wbfootstate.py robotIP (optional default: 127.0.0.1)"
else:
robotIp = sys.argv[1]
main(robotIp) | gpl-2.0 |
seomoz/gevent-soup | bs4/element.py | 438 | 61538 | import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = unicode.__new__(cls, name)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self, is_initialized=True, accept_self=True):
"Finds the last element beneath this object to be parsed."
if is_initialized and self.next_sibling:
last_child = self.next_sibling.previous_element
else:
last_child = self
while isinstance(last_child, Tag) and last_child.contents:
last_child = last_child.contents[-1]
if not accept_self and last_child == self:
last_child = None
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant(False)
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
if text is None and not limit and not attrs and not kwargs:
if name is True or name is None:
# Optimization to find all tags.
result = (element for element in generator
if isinstance(element, Tag))
return ResultSet(strainer, result)
elif isinstance(name, basestring):
# Optimization to find all tags with a given name.
result = (element for element in generator
if isinstance(element, Tag)
and element.name == name)
return ResultSet(strainer, result)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __copy__(self):
return self
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
@property
def name(self):
return None
@name.setter
def name(self, name):
raise AttributeError("A NavigableString cannot be given a name.")
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif attrs and builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None and
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
or self._is_xml))
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
def select(self, selector, _candidate_generator=None):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print 'Running CSS selector "%s"' % selector
for index, token in enumerate(tokens):
if self._select_debug:
print ' Considering token "%s"' % token
recursive_candidate_generator = None
tag_name = None
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print ' Token was consumed by the previous combinator.'
continue
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is not None:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
print '-' * 40
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
yield i
if self._select_debug:
print '-' * 40
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print ' Default candidate generator, tag name="%s"' % check
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
new_context = []
new_context_ids = set([])
for tag in current_context:
if self._select_debug:
print " Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
elif self._select_debug:
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
current_context = new_context
if self._select_debug:
print "Final verdict:"
for i in current_context:
print " %s %s" % (i.name, i.attrs)
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source, result=()):
super(ResultSet, self).__init__(result)
self.source = source
| mit |
BreakawayConsulting/pyxmlerrors | pyxmlerrors.py | 1 | 3091 | """
Copyright (c) 2013 Breakaway Consulting Pty. Ltd.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xml.dom.minidom
import xml.dom.expatbuilder
from xml.parsers.expat import ExpatError
def monkey_start_element_handler(self, name, attributes):
"""This function is monkey-patched over the standard start_element_handle method.
It adds the _line and _col attributes to the element node so that later error-checking can produce useful,
targeted error messages.
"""
real_start_element_handler(self, name, attributes)
node = self.curNode
node._line = self.getParser().CurrentLineNumber
node._col = self.getParser().CurrentColumnNumber
real_start_element_handler = xml.dom.expatbuilder.ExpatBuilderNS.start_element_handler
xml.dom.expatbuilder.ExpatBuilderNS.start_element_handler = monkey_start_element_handler
def xml_error_str(el, msg):
"""Return an error string in the form:
filename:lineno.colno msg
"""
return "{}:{}.{} {}".format(el.ownerDocument._path, el.ownerDocument._start_line + el._line, el._col, msg)
def xml_parse_file(filename):
"""Parse XML file `filename` and return the documentElement.
This is a thin-wrapper for the underlying standard file parsing routine that add extra attributes to the
DOM to enable better diagnostics via the xml_error_str function.
"""
try:
dom = xml.dom.minidom.parse(filename)
except ExpatError as e:
e._path = filename
raise e
dom._path = filename
dom._start_line = 0
return dom
def xml_parse_string(string, name='<string>', start_line=0):
"""Parse an XML string.
Optionally a name can be provided that will be used when providing diagnosics.
In the case where the string has been extracted from another file the start_line parameter can be used to adjust
the line number diagnostics.
"""
try:
dom = xml.dom.minidom.parseString(string)
except ExpatError as e:
e._path = name
e.lineno += start_line
raise e
dom._path = name
dom._start_line = start_line
return dom
| mit |
blakfeld/ansible | v1/ansible/runner/action_plugins/synchronize.py | 86 | 8449 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012-2013, Timothy Appnel <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os.path
from ansible import utils
from ansible import constants
from ansible.runner.return_data import ReturnData
import ansible.utils.template as template
class ActionModule(object):
def __init__(self, runner):
self.runner = runner
self.inject = None
def _get_absolute_path(self, path=None):
if 'vars' in self.inject:
if '_original_file' in self.inject['vars']:
# roles
original_path = path
path = utils.path_dwim_relative(self.inject['_original_file'], 'files', path, self.runner.basedir)
if original_path and original_path[-1] == '/' and path[-1] != '/':
# make sure the dwim'd path ends in a trailing "/"
# if the original path did
path += '/'
return path
def _process_origin(self, host, path, user):
if not host in ['127.0.0.1', 'localhost']:
if user:
return '%s@%s:%s' % (user, host, path)
else:
return '%s:%s' % (host, path)
else:
if not ':' in path:
if not path.startswith('/'):
path = self._get_absolute_path(path=path)
return path
def _process_remote(self, host, path, user):
transport = self.runner.transport
return_data = None
if not host in ['127.0.0.1', 'localhost'] or transport != "local":
if user:
return_data = '%s@%s:%s' % (user, host, path)
else:
return_data = '%s:%s' % (host, path)
else:
return_data = path
if not ':' in return_data:
if not return_data.startswith('/'):
return_data = self._get_absolute_path(path=return_data)
return return_data
def setup(self, module_name, inject):
''' Always default to localhost as delegate if None defined '''
self.inject = inject
# Store original transport and sudo values.
self.original_transport = inject.get('ansible_connection', self.runner.transport)
self.original_become = self.runner.become
self.transport_overridden = False
if inject.get('delegate_to') is None:
inject['delegate_to'] = '127.0.0.1'
# IF original transport is not local, override transport and disable sudo.
if self.original_transport != 'local':
inject['ansible_connection'] = 'local'
self.transport_overridden = True
self.runner.become = False
def run(self, conn, tmp, module_name, module_args,
inject, complex_args=None, **kwargs):
''' generates params and passes them on to the rsync module '''
self.inject = inject
# load up options
options = {}
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
src = options.get('src', None)
dest = options.get('dest', None)
use_ssh_args = options.pop('use_ssh_args', None)
src = template.template(self.runner.basedir, src, inject)
dest = template.template(self.runner.basedir, dest, inject)
use_ssh_args = template.template(self.runner.basedir, use_ssh_args, inject)
try:
options['local_rsync_path'] = inject['ansible_rsync_path']
except KeyError:
pass
# from the perspective of the rsync call the delegate is the localhost
src_host = '127.0.0.1'
dest_host = inject.get('ansible_ssh_host', inject['inventory_hostname'])
# allow ansible_ssh_host to be templated
dest_host = template.template(self.runner.basedir, dest_host, inject, fail_on_undefined=True)
dest_is_local = dest_host in ['127.0.0.1', 'localhost']
# CHECK FOR NON-DEFAULT SSH PORT
dest_port = options.get('dest_port')
inv_port = inject.get('ansible_ssh_port', inject['inventory_hostname'])
if inv_port != dest_port and inv_port != inject['inventory_hostname']:
options['dest_port'] = inv_port
# edge case: explicit delegate and dest_host are the same
if dest_host == inject['delegate_to']:
dest_host = '127.0.0.1'
# SWITCH SRC AND DEST PER MODE
if options.get('mode', 'push') == 'pull':
(dest_host, src_host) = (src_host, dest_host)
# CHECK DELEGATE HOST INFO
use_delegate = False
if conn.delegate != conn.host:
if 'hostvars' in inject:
if conn.delegate in inject['hostvars'] and self.original_transport != 'local':
# use a delegate host instead of localhost
use_delegate = True
# COMPARE DELEGATE, HOST AND TRANSPORT
process_args = False
if not dest_host is src_host and self.original_transport != 'local':
# interpret and inject remote host info into src or dest
process_args = True
# MUNGE SRC AND DEST PER REMOTE_HOST INFO
if process_args or use_delegate:
user = None
if utils.boolean(options.get('set_remote_user', 'yes')):
if use_delegate:
user = inject['hostvars'][conn.delegate].get('ansible_ssh_user')
if not use_delegate or not user:
user = inject.get('ansible_ssh_user',
self.runner.remote_user)
if use_delegate:
# FIXME
private_key = inject.get('ansible_ssh_private_key_file', self.runner.private_key_file)
else:
private_key = inject.get('ansible_ssh_private_key_file', self.runner.private_key_file)
private_key = template.template(self.runner.basedir, private_key, inject, fail_on_undefined=True)
if not private_key is None:
private_key = os.path.expanduser(private_key)
options['private_key'] = private_key
# use the mode to define src and dest's url
if options.get('mode', 'push') == 'pull':
# src is a remote path: <user>@<host>, dest is a local path
src = self._process_remote(src_host, src, user)
dest = self._process_origin(dest_host, dest, user)
else:
# src is a local path, dest is a remote path: <user>@<host>
src = self._process_origin(src_host, src, user)
dest = self._process_remote(dest_host, dest, user)
options['src'] = src
options['dest'] = dest
if 'mode' in options:
del options['mode']
if use_ssh_args:
options['ssh_args'] = constants.ANSIBLE_SSH_ARGS
# Allow custom rsync path argument.
rsync_path = options.get('rsync_path', None)
# If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument.
if not rsync_path and self.transport_overridden and self.original_become and not dest_is_local and self.runner.become_method == 'sudo':
rsync_path = 'sudo rsync'
# make sure rsync path is quoted.
if rsync_path:
options['rsync_path'] = '"' + rsync_path + '"'
module_args = ""
if self.runner.noop_on_check(inject):
module_args = "CHECKMODE=True"
# run the module and store the result
result = self.runner._execute_module(conn, tmp, 'synchronize', module_args, complex_args=options, inject=inject)
# reset the sudo property
self.runner.become = self.original_become
return result
| gpl-3.0 |
LaurentClaessens/phystricks | manual/phystricksIllusionNHwEtp.py | 1 | 1130 | # -*- coding: utf8 -*-
from phystricks import *
def IllusionNHwEtp():
pspict,fig = SinglePicture("IllusionNHwEtp")
pspict.dilatation(0.7)
perspective=ObliqueProjection(45,sqrt(2)/2)
l=2
P=(0,0)
cubesP=[]
cubesL=[]
cubesH=[]
profondeur=7
longueur=4
hauteur=4
for i in range(0,profondeur):
cubesP.append(perspective.cuboid( P,l,l,l ))
Q=cubesP[-1].c2[3]
P=(Q.x,Q.y)
P=(0,0)
for i in range(0,longueur):
cubesL.append(perspective.cuboid(P,l,l,l))
Q=cubesL[-1].c1[2]
P=(Q.x,Q.y)
for i in range(0,hauteur):
cubesH.append(perspective.cuboid(P,l,l,l))
Q=cubesH[-1].c1[0]
P=(Q.x,Q.y)
cubesP.reverse() # Ainsi les plus éloignés sont tracés en premier.
for i,cub in enumerate(cubesP):
cub.make_opaque()
pspict.DrawGraphs(cub)
for i,cub in enumerate(cubesL):
cub.make_opaque()
pspict.DrawGraphs(cub)
for i,cub in enumerate(cubesH):
cub.make_opaque()
pspict.DrawGraphs(cub)
fig.no_figure()
fig.conclude()
fig.write_the_file()
| gpl-3.0 |
czgu/metaHack | env/lib/python2.7/site-packages/django/core/checks/compatibility/django_1_7_0.py | 91 | 1368 | from __future__ import unicode_literals
from .. import Warning, register, Tags
@register(Tags.compatibility)
def check_1_7_compatibility(**kwargs):
errors = []
errors.extend(_check_middleware_classes(**kwargs))
return errors
def _check_middleware_classes(app_configs=None, **kwargs):
"""
Checks if the user has *not* overridden the ``MIDDLEWARE_CLASSES`` setting &
warns them about the global default changes.
"""
from django.conf import settings
# MIDDLEWARE_CLASSES is overridden by default by startproject. If users
# have removed this override then we'll warn them about the default changes.
if not settings.is_overridden('MIDDLEWARE_CLASSES'):
return [
Warning(
"MIDDLEWARE_CLASSES is not set.",
hint=("Django 1.7 changed the global defaults for the MIDDLEWARE_CLASSES. "
"django.contrib.sessions.middleware.SessionMiddleware, "
"django.contrib.auth.middleware.AuthenticationMiddleware, and "
"django.contrib.messages.middleware.MessageMiddleware were removed from the defaults. "
"If your project needs these middleware then you should configure this setting."),
obj=None,
id='1_7.W001',
)
]
else:
return []
| apache-2.0 |
tempbottle/gunicorn | gunicorn/app/django_wsgi.py | 87 | 4363 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
""" module used to build the django wsgi application """
from __future__ import print_function
import os
import re
import sys
import time
try:
from StringIO import StringIO
except:
from io import StringIO
from imp import reload
from django.conf import settings
from django.core.management.validation import get_validation_errors
from django.utils import translation
try:
from django.core.servers.basehttp import get_internal_wsgi_application
django14 = True
except ImportError:
from django.core.handlers.wsgi import WSGIHandler
django14 = False
from gunicorn import util
def make_wsgi_application():
# validate models
s = StringIO()
if get_validation_errors(s):
s.seek(0)
error = s.read()
msg = "One or more models did not validate:\n%s" % error
print(msg, file=sys.stderr)
sys.stderr.flush()
sys.exit(1)
translation.activate(settings.LANGUAGE_CODE)
if django14:
return get_internal_wsgi_application()
return WSGIHandler()
def reload_django_settings():
mod = util.import_module(os.environ['DJANGO_SETTINGS_MODULE'])
# Reload module.
reload(mod)
# Reload settings.
# Use code from django.settings.Settings module.
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and type(setting_value) == str:
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(settings, setting, setting_value)
# Expand entries in INSTALLED_APPS like "django.contrib.*" to a list
# of all those apps.
new_installed_apps = []
for app in settings.INSTALLED_APPS:
if app.endswith('.*'):
app_mod = util.import_module(app[:-2])
appdir = os.path.dirname(app_mod.__file__)
app_subdirs = os.listdir(appdir)
name_pattern = re.compile(r'[a-zA-Z]\w*')
for d in sorted(app_subdirs):
if (name_pattern.match(d) and
os.path.isdir(os.path.join(appdir, d))):
new_installed_apps.append('%s.%s' % (app[:-2], d))
else:
new_installed_apps.append(app)
setattr(settings, "INSTALLED_APPS", new_installed_apps)
if hasattr(time, 'tzset') and settings.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root,
*(settings.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" %
settings.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = settings.TIME_ZONE
time.tzset()
# Settings are configured, so we can set up the logger if required
if getattr(settings, 'LOGGING_CONFIG', False):
# First find the logging configuration function ...
logging_config_path, logging_config_func_name = settings.LOGGING_CONFIG.rsplit('.', 1)
logging_config_module = util.import_module(logging_config_path)
logging_config_func = getattr(logging_config_module, logging_config_func_name)
# ... then invoke it with the logging settings
logging_config_func(settings.LOGGING)
def make_command_wsgi_application(admin_mediapath):
reload_django_settings()
try:
from django.core.servers.basehttp import AdminMediaHandler
return AdminMediaHandler(make_wsgi_application(), admin_mediapath)
except ImportError:
return make_wsgi_application()
| mit |
nttdata-osscloud/ceilometer | ceilometer/tests/api/v1/test_list_sources_scenarios.py | 2 | 1184 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 Julien Danjou
#
# Author: Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test listing users.
"""
from ceilometer.tests import api as tests_api
from ceilometer.tests import db as tests_db
class TestListSource(tests_api.TestBase,
tests_db.MixinTestsWithBackendScenarios):
def test_source(self):
ydata = self.get('/sources/test_source')
self.assertIn("somekey", ydata)
self.assertEqual(666, ydata["somekey"])
def test_unknownsource(self):
ydata = self.get('/sources/test_source_that_does_not_exist')
self.assertEqual({}, ydata)
| apache-2.0 |
thilbern/scikit-learn | sklearn/neighbors/base.py | 7 | 25049 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
with np.errstate(divide='ignore'):
dist = 1. / dist
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to point, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = check_array(X, accept_sparse='csr')
if n_neighbors is None:
n_neighbors = self.n_neighbors
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
j = np.arange(neigh_ind.shape[0])[:, None]
neigh_ind = neigh_ind[j, np.argsort(dist[j, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
return np.sqrt(dist[j, neigh_ind]), neigh_ind
else:
return dist[j, neigh_ind], neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
return result
else:
raise ValueError("internal: _fit_method not recognized")
def kneighbors_graph(self, X, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if n_neighbors is None:
n_neighbors = self.n_neighbors
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones((n_samples1, n_neighbors))
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
data, ind = self.kneighbors(X, n_neighbors + 1,
return_distance=True)
A_data, A_ind = data[:, 1:], ind[:, 1:]
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
return csr_matrix((A_data.ravel(), A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point or points
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the euclidean distances to each point,
only present if return_distance=True.
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.radius_neighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 1.5, 0.5]]...), array([[1, 2]]...)
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = check_array(X, accept_sparse='csr')
if radius is None:
radius = self.radius
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = [np.where(d < radius)[0] for d in dist]
# if there are the same number of neighbors for each point,
# we can do a normal array. Otherwise, we return an object
# array with elements that are numpy arrays
try:
neigh_ind = np.asarray(neigh_ind, dtype=int)
dtype_F = float
except ValueError:
neigh_ind = np.asarray(neigh_ind, dtype='object')
dtype_F = object
if return_distance:
if self.effective_metric_ == 'euclidean':
dist = np.array([np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)],
dtype=dtype_F)
else:
dist = np.array([d[neigh_ind[i]]
for i, d in enumerate(dist)],
dtype=dtype_F)
return dist, neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
ind, dist = results
return dist, ind
else:
return results
else:
raise ValueError("internal: _fit_method not recognized")
def radius_neighbors_graph(self, X, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if radius is None:
radius = self.radius
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_neighbors = np.array([len(a) for a in A_ind])
n_nonzero = np.sum(n_neighbors)
if A_data is None:
A_data = np.ones(n_nonzero)
A_ind = np.concatenate(list(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
mjgrav2001/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 142 | 5990 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
Baekalfen/Helmholtz-Coil-Simulator | main_grid_treaded.py | 1 | 7586 | # -*- coding: utf-8 -*-
##from multiprocessing import Process
##from multiprocessing import Pool
from visual import *
from math import *
from sys import platform
import time
#######################################
# #
# Author: Mads Ynddal #
# All Rights Reserved 2012 #
# #
#######################################
#######################################
#
# Settings - Start
#
#Window setup:
Width=800 #Width of window
Height=750 #Height of window
### Tekniske specifikationer
FPS=60
cpu_threads=2 #Windows kan kun håndtere 1 tråd
defi=1. #punkter pr. dm
grid_sizex=30
grid_sizey=1
grid_sizez=30
OP_ref_spacing=2
coil_1=1
coil_2=1
max_blen=20
hue_multiplier=2./6
strength_as_color=1
vector_pointers=1
debug_vectors3=0
debug_vectors3_multiplier=10**6
debug_offset=0
###
### Fysik og matematik
Vp=4*pi*10**-7
I=1
constant=Vp*I/(4*pi)
coiloffset=-15./2 #Distance from each coil divided by two
dm=15*8*2*pi #Definitionsmængden for funktionen (rundes op)
###
#
# Settings - End
#
#######################################
#######################################
#
# INITIALIZING - Start
#
scene = display(title='Helmholtz Coil',width=Width, height=Height,autoscale = False,scale=(0.03,0.03,0.03))
vectors_threaded=[]
coil1=curve(pos=[])
coil2=curve(pos=[])
vectors=[]
col=0
P=[]
if platform=="win32":
print "WARNING! Windows can't run multiple threads!\nForcing cpu_threads to 1"
cpu_threads=1
if cpu_threads>1:
from multiprocessing import Process
from multiprocessing import Pool
#
# INITIALIZING - End
#
#######################################
#######################################
# Optegner spoler ud fra funktionen
for tt in range(int(dm*defi)+1):
t=tt/defi
x=t*0.0005+15./2+debug_offset
y=31.5/2*sin(t)
z=31.5/2*cos(t)
if coil_1==1:
coil1.append((x,y,z))
else:
coil1.append((0,0,0))
x=t*0.0005-15./2-debug_offset
y=31.5/2*sin(t)
z=31.5/2*cos(t)
if coil_2:
coil2.append((x,y,z))
else:
coil2.append((0,0,0))
#
#######################################
#######################################
# Vektor regneregler
def vlen(a):
return sqrt(a[0]**2+a[1]**2+a[2]**2)
#Vector length
def vsub(a,b):
return [a[0]-b[0],a[1]-b[1],a[2]-b[2]]
#Substract vectors a,b
def vadd(a,b):
return [a[0]+b[0],a[1]+b[1],a[2]+b[2]]
#Add vectors a,b
def vdiv(a,b):
return [a[0]/float(b),a[1]/float(b),a[2]/float(b)]
#Divide vector by scalar b
def cprod(a,b):
return [a[1]*b[2]-a[2]*b[1],
a[2]*b[0]-a[0]*b[2],
a[0]*b[1]-a[1]*b[0]]
#Cross product
#
#######################################
#######################################
# Biot-Savarts lov
def dlxrr3(dl,r):
return vdiv(cprod(dl,r),vlen(r)**3)
def Apply_contant(Bsum1,Bsum2):
Bsum=vdiv(vadd(Bsum1,Bsum2),1/constant)
#Bsum=vdiv(vsub(Bsum1,Bsum2),1/constant)
Bsum1=vdiv(Bsum1,1/constant)
Bsum2=vdiv(Bsum2,1/constant)
return Bsum
def inte(OP):
global coiloffset,col
Bsum1=[0,0,0]
Bsum2=[0,0,0]
#Første spole
coiloffset*=-1
for tt in range(int(dm*defi)):
t=tt/float(defi)
s1,s2=s1s2(t)
dl=vsub(s2,s1)
m=vdiv(vadd(s1,s2),2)
r=vsub(OP,m)
Bsum1=vadd(Bsum1,dlxrr3(dl,r))
if not coil_1:
Bsum1=[0,0,0]
#Anden spole
coiloffset*=-1
for tt in range(int(dm*defi)):
t=tt/float(defi)
s1,s2=s1s2(t)
dl=vsub(s2,s1)
m=vdiv(vadd(s1,s2),2)
r=vsub(OP,m)
Bsum2=vadd(Bsum2,dlxrr3(dl,r))
if not coil_2:
Bsum2=[0,0,0]
return Bsum1,Bsum2
#
#######################################
#######################################
# Udregn funktionsværdi til tiden 't'
def s1s2(t1):
s1=[t1*0.0005+coiloffset,31.5/2*sin(t1),31.5/2*cos(t1)]
t2=t1+1/float(defi)
s2=[t2*0.0005+coiloffset,31.5/2*sin(t2),31.5/2*cos(t2)]
return s1,s2
#
#######################################
#######################################
# Udregn vektorstyrke og retning
def cal_vectors(xx,yy,zz):
global vectors_threaded
P=[xx*OP_ref_spacing-((grid_sizex-1)*OP_ref_spacing)/2,yy*OP_ref_spacing-((grid_sizey-1)*OP_ref_spacing)/2,zz*OP_ref_spacing-((grid_sizez-1)*OP_ref_spacing)/2]
n=xx+yy+zz
Bsum=vdiv(Apply_contant(*inte(P)),1./debug_vectors3_multiplier)
Blen=vlen(Bsum)
return (P,Bsum,Blen)
#
#######################################
#######################################
# Distribuerer opgaver til CPU-kerner
if cpu_threads>1:
pool = Pool(processes=cpu_threads)# start 4 worker processes
result=[]
P=[]
Bsum=[]
time_stamp=time.time()
for xx in range(grid_sizex):
for yy in range(grid_sizey):
for zz in range(grid_sizez):
if cpu_threads>1:
result.append(pool.apply_async(cal_vectors, [xx,yy,zz]))
else:
vectors_threaded.append(cal_vectors(xx,yy,zz))
### Indsamler svar fra CPU-kerner
if cpu_threads>1:
for n in range(grid_sizex*grid_sizey*grid_sizez):
vectors_threaded.append(result[n].get())
#
#######################################
#######################################
# Vektorfelt bliver rendereret
for n in range(len(vectors_threaded)):
P,Bsum,Blen=vectors_threaded[n]
if strength_as_color==1:
Blen=vlen(Bsum)
vcolor=color.hsv_to_rgb((1./4-(Blen*1./(max_blen/2)*hue_multiplier),1,1))
Bsum=vdiv(Bsum,Blen)
else:
vcolor=color.red
if Blen<max_blen:
curve(color=vcolor,pos=[(P[0],P[1],P[2]),(P[0]+Bsum[0],P[1]+Bsum[1],P[2]+Bsum[2])])
if vector_pointers==1:
sphere(pos=(P[0]+Bsum[0],P[1]+Bsum[1],P[2]+Bsum[2]), radius=0.1, color=color.white, opacity=1)
#
#######################################
print "Processing lasted: "+str(time.time()-time_stamp)[0:5],"sec\nUtilizing",cpu_threads,"processor threads, to animate:",grid_sizez*grid_sizey*grid_sizex,"vectors"
#######################################
# Indsætter partikel og
# indstiller kamera
center_point = sphere (pos=(0,0,0), radius=1, color=color.red, opacity=0.5)
particle = sphere (pos=(0,0,-12), radius=1, color=color.green, opacity=0.4)
speed = label()
i=0.
auto_rotate=1
while(1):
rate(FPS)
if auto_rotate==1:
i+=1
scene.forward=(-1*sin(i/FPS/5),-1,-1*cos(i/FPS/5))
#Particle
Bsum=vdiv(Apply_contant(*inte(particle.pos)),1./debug_vectors3_multiplier)
particle.pos.x+=Bsum[0]/30.
particle.pos.y+=Bsum[1]/30.
particle.pos.z+=Bsum[2]/30.
speed.pos=particle.pos
speed.pos.x+=4
speed.text=str(vlen(Bsum))[0:3]
#Particle
### Bruger input
if scene.kb.keys: # is there an event waiting to be processed?
c = scene.kb.getkey() # obtain keyboard information
if c=="r":
particle.pos=(20,0,-12)
if c=="t":
particle.pos=(0,0,-12)
if c=="y":
particle.pos=(5,0,-13)
if c=="u":
particle.pos=(14,0,-15)
if c=="w":
auto_rotate=0
scene.forward=(0,-1,0)
if c=="s":
auto_rotate=0
scene.forward=(-1,0,0)
if c=="a":
auto_rotate=0
scene.forward=(0,0,-1)
if c=="d":
if auto_rotate==0:
auto_rotate=1
else:
auto_rotate=0
#
####################################### | gpl-2.0 |
gemini-testing/selenium | py/selenium/webdriver/opera/webdriver.py | 5 | 3372 | #!/usr/bin/python
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import http.client as http_client
except ImportError:
import httplib as http_client
import os
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.webdriver.chrome.webdriver import WebDriver as ChromiumDriver
from .options import Options
class OperaDriver(ChromiumDriver):
"""Controls the new OperaDriver and allows you
to drive the Opera browser based on Chromium."""
def __init__(self, executable_path=None, port=0,
opera_options=None, service_args=None,
desired_capabilities=None, service_log_path=None):
"""
Creates a new instance of the operadriver.
Starts the service and then creates new instance of operadriver.
:Args:
- executable_path - path to the executable. If the default is used
it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0,
a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- chrome_options: this takes an instance of ChromeOptions
"""
executable_path = (executable_path if executable_path is not None
else "operadriver")
ChromiumDriver.__init__(self,
executable_path=executable_path,
port=port,
chrome_options=opera_options,
service_args=service_args,
desired_capabilities=desired_capabilities,
service_log_path=service_log_path)
def create_options(self):
return Options()
class WebDriver(OperaDriver):
class ServiceType:
CHROMIUM = 2
def __init__(self,
desired_capabilities=None,
executable_path=None,
port=0,
service_log_path=None,
service_args=None,
opera_options=None):
OperaDriver.__init__(self, executable_path=executable_path,
port=port, opera_options=opera_options,
service_args=service_args,
desired_capabilities=desired_capabilities,
service_log_path=service_log_path)
| apache-2.0 |
kpurusho/mbed | workspace_tools/host_tests/host_tests_plugins/host_test_plugins.py | 92 | 4881 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os import access, F_OK
from sys import stdout
from time import sleep
from subprocess import call
class HostTestPluginBase:
""" Base class for all plug-ins used with host tests.
"""
###########################################################################
# Interface:
###########################################################################
###########################################################################
# Interface attributes defining plugin name, type etc.
###########################################################################
name = "HostTestPluginBase" # Plugin name, can be plugin class name
type = "BasePlugin" # Plugin type: ResetMethod, Copymethod etc.
capabilities = [] # Capabilities names: what plugin can achieve
# (e.g. reset using some external command line tool)
stable = False # Determine if plugin is stable and can be used
###########################################################################
# Interface methods
###########################################################################
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return False
def execute(self, capabilitity, *args, **kwargs):
""" Executes capability by name.
Each capability e.g. may directly just call some command line
program or execute building pythonic function
"""
return False
###########################################################################
# Interface helper methods - overload only if you need to have custom behaviour
###########################################################################
def print_plugin_error(self, text):
""" Function prints error in console and exits always with False
"""
print "Plugin error: %s::%s: %s"% (self.name, self.type, text)
return False
def print_plugin_info(self, text, NL=True):
""" Function prints notification in console and exits always with True
"""
if NL:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text)
else:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text),
return True
def print_plugin_char(self, char):
""" Function prints char on stdout
"""
stdout.write(char)
stdout.flush()
return True
def check_mount_point_ready(self, destination_disk, init_delay=0.2, loop_delay=0.25):
""" Checks if destination_disk is ready and can be accessed by e.g. copy commands
@init_delay - Initial delay time before first access check
@loop_delay - pooling delay for access check
"""
if not access(destination_disk, F_OK):
self.print_plugin_info("Waiting for mount point '%s' to be ready..."% destination_disk, NL=False)
sleep(init_delay)
while not access(destination_disk, F_OK):
sleep(loop_delay)
self.print_plugin_char('.')
def check_parameters(self, capabilitity, *args, **kwargs):
""" This function should be ran each time we call execute()
to check if none of the required parameters is missing.
"""
missing_parameters = []
for parameter in self.required_parameters:
if parameter not in kwargs:
missing_parameters.append(parameter)
if len(missing_parameters) > 0:
self.print_plugin_error("execute parameter(s) '%s' missing!"% (', '.join(parameter)))
return False
return True
def run_command(self, cmd, shell=True):
""" Runs command from command line.
"""
result = True
ret = 0
try:
ret = call(cmd, shell=shell)
if ret:
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
return False
except Exception as e:
result = False
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
self.print_plugin_error(str(e))
return result
| apache-2.0 |
kmonsoor/python-for-android | python-modules/twisted/twisted/words/im/ircsupport.py | 49 | 9263 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
IRC support for Instance Messenger.
"""
import string
from twisted.words.protocols import irc
from twisted.words.im.locals import ONLINE
from twisted.internet import defer, reactor, protocol
from twisted.internet.defer import succeed
from twisted.words.im import basesupport, interfaces, locals
from zope.interface import implements
class IRCPerson(basesupport.AbstractPerson):
def imperson_whois(self):
if self.account.client is None:
raise locals.OfflineError
self.account.client.sendLine("WHOIS %s" % self.name)
### interface impl
def isOnline(self):
return ONLINE
def getStatus(self):
return ONLINE
def setStatus(self,status):
self.status=status
self.chat.getContactsList().setContactStatus(self)
def sendMessage(self, text, meta=None):
if self.account.client is None:
raise locals.OfflineError
for line in string.split(text, '\n'):
if meta and meta.get("style", None) == "emote":
self.account.client.ctcpMakeQuery(self.name,[('ACTION', line)])
else:
self.account.client.msg(self.name, line)
return succeed(text)
class IRCGroup(basesupport.AbstractGroup):
implements(interfaces.IGroup)
def imgroup_testAction(self):
pass
def imtarget_kick(self, target):
if self.account.client is None:
raise locals.OfflineError
reason = "for great justice!"
self.account.client.sendLine("KICK #%s %s :%s" % (
self.name, target.name, reason))
### Interface Implementation
def setTopic(self, topic):
if self.account.client is None:
raise locals.OfflineError
self.account.client.topic(self.name, topic)
def sendGroupMessage(self, text, meta={}):
if self.account.client is None:
raise locals.OfflineError
if meta and meta.get("style", None) == "emote":
self.account.client.me(self.name,text)
return succeed(text)
#standard shmandard, clients don't support plain escaped newlines!
for line in string.split(text, '\n'):
self.account.client.say(self.name, line)
return succeed(text)
def leave(self):
if self.account.client is None:
raise locals.OfflineError
self.account.client.leave(self.name)
self.account.client.getGroupConversation(self.name,1)
class IRCProto(basesupport.AbstractClientMixin, irc.IRCClient):
def __init__(self, account, chatui, logonDeferred=None):
basesupport.AbstractClientMixin.__init__(self, account, chatui,
logonDeferred)
self._namreplies={}
self._ingroups={}
self._groups={}
self._topics={}
def getGroupConversation(self, name, hide=0):
name=string.lower(name)
return self.chat.getGroupConversation(self.chat.getGroup(name, self),
stayHidden=hide)
def getPerson(self,name):
return self.chat.getPerson(name, self)
def connectionMade(self):
# XXX: Why do I duplicate code in IRCClient.register?
try:
if self.account.password:
self.sendLine("PASS :%s" % self.account.password)
self.setNick(self.account.username)
self.sendLine("USER %s foo bar :Twisted-IM user" % (
self.account.username,))
for channel in self.account.channels:
self.joinGroup(channel)
self.account._isOnline=1
if self._logonDeferred is not None:
self._logonDeferred.callback(self)
self.chat.getContactsList()
except:
import traceback
traceback.print_exc()
def setNick(self,nick):
self.name=nick
self.accountName="%s (IRC)"%nick
irc.IRCClient.setNick(self,nick)
def kickedFrom(self, channel, kicker, message):
"""
Called when I am kicked from a channel.
"""
return self.chat.getGroupConversation(
self.chat.getGroup(channel[1:], self), 1)
def userKicked(self, kickee, channel, kicker, message):
pass
def noticed(self, username, channel, message):
self.privmsg(username, channel, message, {"dontAutoRespond": 1})
def privmsg(self, username, channel, message, metadata=None):
if metadata is None:
metadata = {}
username=string.split(username,'!',1)[0]
if username==self.name: return
if channel[0]=='#':
group=channel[1:]
self.getGroupConversation(group).showGroupMessage(username, message, metadata)
return
self.chat.getConversation(self.getPerson(username)).showMessage(message, metadata)
def action(self,username,channel,emote):
username=string.split(username,'!',1)[0]
if username==self.name: return
meta={'style':'emote'}
if channel[0]=='#':
group=channel[1:]
self.getGroupConversation(group).showGroupMessage(username, emote, meta)
return
self.chat.getConversation(self.getPerson(username)).showMessage(emote,meta)
def irc_RPL_NAMREPLY(self,prefix,params):
"""
RPL_NAMREPLY
>> NAMES #bnl
<< :Arlington.VA.US.Undernet.Org 353 z3p = #bnl :pSwede Dan-- SkOyg AG
"""
group=string.lower(params[2][1:])
users=string.split(params[3])
for ui in range(len(users)):
while users[ui][0] in ["@","+"]: # channel modes
users[ui]=users[ui][1:]
if not self._namreplies.has_key(group):
self._namreplies[group]=[]
self._namreplies[group].extend(users)
for nickname in users:
try:
self._ingroups[nickname].append(group)
except:
self._ingroups[nickname]=[group]
def irc_RPL_ENDOFNAMES(self,prefix,params):
group=params[1][1:]
self.getGroupConversation(group).setGroupMembers(self._namreplies[string.lower(group)])
del self._namreplies[string.lower(group)]
def irc_RPL_TOPIC(self,prefix,params):
self._topics[params[1][1:]]=params[2]
def irc_333(self,prefix,params):
group=params[1][1:]
self.getGroupConversation(group).setTopic(self._topics[group],params[2])
del self._topics[group]
def irc_TOPIC(self,prefix,params):
nickname = string.split(prefix,"!")[0]
group = params[0][1:]
topic = params[1]
self.getGroupConversation(group).setTopic(topic,nickname)
def irc_JOIN(self,prefix,params):
nickname=string.split(prefix,"!")[0]
group=string.lower(params[0][1:])
if nickname!=self.nickname:
try:
self._ingroups[nickname].append(group)
except:
self._ingroups[nickname]=[group]
self.getGroupConversation(group).memberJoined(nickname)
def irc_PART(self,prefix,params):
nickname=string.split(prefix,"!")[0]
group=string.lower(params[0][1:])
if nickname!=self.nickname:
if group in self._ingroups[nickname]:
self._ingroups[nickname].remove(group)
self.getGroupConversation(group).memberLeft(nickname)
def irc_QUIT(self,prefix,params):
nickname=string.split(prefix,"!")[0]
if self._ingroups.has_key(nickname):
for group in self._ingroups[nickname]:
self.getGroupConversation(group).memberLeft(nickname)
self._ingroups[nickname]=[]
def irc_NICK(self, prefix, params):
fromNick = string.split(prefix, "!")[0]
toNick = params[0]
if not self._ingroups.has_key(fromNick):
return
for group in self._ingroups[fromNick]:
self.getGroupConversation(group).memberChangedNick(fromNick, toNick)
self._ingroups[toNick] = self._ingroups[fromNick]
del self._ingroups[fromNick]
def irc_unknown(self, prefix, command, params):
pass
# GTKIM calls
def joinGroup(self,name):
self.join(name)
self.getGroupConversation(name)
class IRCAccount(basesupport.AbstractAccount):
implements(interfaces.IAccount)
gatewayType = "IRC"
_groupFactory = IRCGroup
_personFactory = IRCPerson
def __init__(self, accountName, autoLogin, username, password, host, port,
channels=''):
basesupport.AbstractAccount.__init__(self, accountName, autoLogin,
username, password, host, port)
self.channels = map(string.strip,string.split(channels,','))
if self.channels == ['']:
self.channels = []
def _startLogOn(self, chatui):
logonDeferred = defer.Deferred()
cc = protocol.ClientCreator(reactor, IRCProto, self, chatui,
logonDeferred)
d = cc.connectTCP(self.host, self.port)
d.addErrback(logonDeferred.errback)
return logonDeferred
| apache-2.0 |
karlbright/beets | beets/mediafile.py | 1 | 36961 | # This file is part of beets.
# Copyright 2011, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Handles low-level interfacing for files' tags. Wraps Mutagen to
automatically detect file types and provide a unified interface for a
useful subset of music files' tags.
Usage:
>>> f = MediaFile('Lucy.mp3')
>>> f.title
u'Lucy in the Sky with Diamonds'
>>> f.artist = 'The Beatles'
>>> f.save()
A field will always return a reasonable value of the correct type, even
if no tag is present. If no value is available, the value will be false
(e.g., zero or the empty string).
"""
import mutagen
import mutagen.mp3
import mutagen.oggvorbis
import mutagen.mp4
import mutagen.flac
import mutagen.monkeysaudio
import datetime
import re
import base64
import imghdr
import os
import logging
import traceback
from beets.util.enumeration import enum
__all__ = ['UnreadableFileError', 'FileTypeError', 'MediaFile']
# Logger.
log = logging.getLogger('beets')
# Exceptions.
# Raised for any file MediaFile can't read.
class UnreadableFileError(IOError):
pass
# Raised for files that don't seem to have a type MediaFile supports.
class FileTypeError(UnreadableFileError):
pass
# Constants.
# Human-readable type names.
TYPES = {
'mp3': 'MP3',
'mp4': 'AAC',
'ogg': 'OGG',
'flac': 'FLAC',
'ape': 'APE',
'wv': 'WavPack',
'mpc': 'Musepack',
}
# Utility.
def _safe_cast(out_type, val):
"""Tries to covert val to out_type but will never raise an
exception. If the value can't be converted, then a sensible
default value is returned. out_type should be bool, int, or
unicode; otherwise, the value is just passed through.
"""
if out_type == int:
if val is None:
return 0
elif isinstance(val, int) or isinstance(val, float):
# Just a number.
return int(val)
else:
# Process any other type as a string.
if not isinstance(val, basestring):
val = unicode(val)
# Get a number from the front of the string.
val = re.match(r'[0-9]*', val.strip()).group(0)
if not val:
return 0
else:
return int(val)
elif out_type == bool:
if val is None:
return False
else:
try:
# Should work for strings, bools, ints:
return bool(int(val))
except ValueError:
return False
elif out_type == unicode:
if val is None:
return u''
else:
if isinstance(val, str):
return val.decode('utf8', 'ignore')
elif isinstance(val, unicode):
return val
else:
return unicode(val)
elif out_type == float:
if val is None:
return 0.0
elif isinstance(val, int) or isinstance(val, float):
return float(val)
else:
if not isinstance(val, basestring):
val = unicode(val)
val = re.match(r'[\+-]?[0-9\.]*', val.strip()).group(0)
if not val:
return 0.0
else:
return float(val)
else:
return val
# Flags for encoding field behavior.
# Determine style of packing, if any.
packing = enum('SLASHED', # pair delimited by /
'TUPLE', # a python tuple of 2 items
'DATE', # YYYY-MM-DD
name='packing')
class StorageStyle(object):
"""Parameterizes the storage behavior of a single field for a
certain tag format.
- key: The Mutagen key used to access the field's data.
- list_elem: Store item as a single object or as first element
of a list.
- as_type: Which type the value is stored as (unicode, int,
bool, or str).
- packing: If this value is packed in a multiple-value storage
unit, which type of packing (in the packing enum). Otherwise,
None. (Makes as_type irrelevant).
- pack_pos: If the value is packed, in which position it is
stored.
- ID3 storage only: match against this 'desc' field as well
as the key.
"""
def __init__(self, key, list_elem = True, as_type = unicode,
packing = None, pack_pos = 0, id3_desc = None,
id3_frame_field = 'text'):
self.key = key
self.list_elem = list_elem
self.as_type = as_type
self.packing = packing
self.pack_pos = pack_pos
self.id3_desc = id3_desc
self.id3_frame_field = id3_frame_field
# Dealing with packings.
class Packed(object):
"""Makes a packed list of values subscriptable. To access the packed
output after making changes, use packed_thing.items.
"""
def __init__(self, items, packstyle, none_val=0, out_type=int):
"""Create a Packed object for subscripting the packed values in
items. The items are packed using packstyle, which is a value
from the packing enum. none_val is returned from a request when
no suitable value is found in the items. Vales are converted to
out_type before they are returned.
"""
self.items = items
self.packstyle = packstyle
self.none_val = none_val
self.out_type = out_type
def __getitem__(self, index):
if not isinstance(index, int):
raise TypeError('index must be an integer')
if self.items is None:
return self.none_val
items = self.items
if self.packstyle == packing.DATE:
# Remove time information from dates. Usually delimited by
# a "T" or a space.
items = re.sub(r'[Tt ].*$', '', unicode(items))
# transform from a string packing into a list we can index into
if self.packstyle == packing.SLASHED:
seq = unicode(items).split('/')
elif self.packstyle == packing.DATE:
seq = unicode(items).split('-')
elif self.packstyle == packing.TUPLE:
seq = items # tuple: items is already indexable
try:
out = seq[index]
except:
out = None
if out is None or out == self.none_val or out == '':
return _safe_cast(self.out_type, self.none_val)
else:
return _safe_cast(self.out_type, out)
def __setitem__(self, index, value):
if self.packstyle in (packing.SLASHED, packing.TUPLE):
# SLASHED and TUPLE are always two-item packings
length = 2
else:
# DATE can have up to three fields
length = 3
# make a list of the items we'll pack
new_items = []
for i in range(length):
if i == index:
next_item = value
else:
next_item = self[i]
new_items.append(next_item)
if self.packstyle == packing.DATE:
# Truncate the items wherever we reach an invalid (none)
# entry. This prevents dates like 2008-00-05.
for i, item in enumerate(new_items):
if item == self.none_val or item is None:
del(new_items[i:]) # truncate
break
if self.packstyle == packing.SLASHED:
self.items = '/'.join(map(unicode, new_items))
elif self.packstyle == packing.DATE:
field_lengths = [4, 2, 2] # YYYY-MM-DD
elems = []
for i, item in enumerate(new_items):
elems.append( ('%0' + str(field_lengths[i]) + 'i') % item )
self.items = '-'.join(elems)
elif self.packstyle == packing.TUPLE:
self.items = new_items
# The field itself.
class MediaField(object):
"""A descriptor providing access to a particular (abstract) metadata
field. out_type is the type that users of MediaFile should see and
can be unicode, int, or bool. id3, mp4, and flac are StorageStyle
instances parameterizing the field's storage for each type.
"""
def __init__(self, out_type = unicode, **kwargs):
"""Creates a new MediaField.
- out_type: The field's semantic (exterior) type.
- kwargs: A hash whose keys are 'mp3', 'mp4', and 'etc'
and whose values are StorageStyle instances
parameterizing the field's storage for each type.
"""
self.out_type = out_type
if not set(['mp3', 'mp4', 'etc']) == set(kwargs):
raise TypeError('MediaField constructor must have keyword '
'arguments mp3, mp4, and etc')
self.styles = kwargs
def _fetchdata(self, obj, style):
"""Get the value associated with this descriptor's field stored
with the given StorageStyle. Unwraps from a list if necessary.
"""
# fetch the value, which may be a scalar or a list
if obj.type == 'mp3':
if style.id3_desc is not None: # also match on 'desc' field
frames = obj.mgfile.tags.getall(style.key)
entry = None
for frame in frames:
if frame.desc.lower() == style.id3_desc.lower():
entry = getattr(frame, style.id3_frame_field)
break
if entry is None: # no desc match
return None
else:
# Get the metadata frame object.
try:
frame = obj.mgfile[style.key]
except KeyError:
return None
entry = getattr(frame, style.id3_frame_field)
else: # Not MP3.
try:
entry = obj.mgfile[style.key]
except KeyError:
return None
# possibly index the list
if style.list_elem:
if entry: # List must have at least one value.
return entry[0]
else:
return None
else:
return entry
def _storedata(self, obj, val, style):
"""Store val for this descriptor's field in the tag dictionary
according to the provided StorageStyle. Store it as a
single-item list if necessary.
"""
# wrap as a list if necessary
if style.list_elem: out = [val]
else: out = val
if obj.type == 'mp3':
# Try to match on "desc" field.
if style.id3_desc is not None:
frames = obj.mgfile.tags.getall(style.key)
# try modifying in place
found = False
for frame in frames:
if frame.desc.lower() == style.id3_desc.lower():
setattr(frame, style.id3_frame_field, out)
found = True
break
# need to make a new frame?
if not found:
assert isinstance(style.id3_frame_field, str) # Keyword.
frame = mutagen.id3.Frames[style.key](
encoding=3,
desc=style.id3_desc,
**{style.id3_frame_field: val}
)
obj.mgfile.tags.add(frame)
# Try to match on "owner" field.
elif style.key.startswith('UFID:'):
owner = style.key.split(':', 1)[1]
frames = obj.mgfile.tags.getall(style.key)
for frame in frames:
# Replace existing frame data.
if frame.owner == owner:
setattr(frame, style.id3_frame_field, val)
else:
# New frame.
assert isinstance(style.id3_frame_field, str) # Keyword.
frame = mutagen.id3.UFID(owner=owner,
**{style.id3_frame_field: val})
obj.mgfile.tags.setall('UFID', [frame])
# Just replace based on key.
else:
assert isinstance(style.id3_frame_field, str) # Keyword.
frame = mutagen.id3.Frames[style.key](encoding = 3,
**{style.id3_frame_field: val})
obj.mgfile.tags.setall(style.key, [frame])
else: # Not MP3.
obj.mgfile[style.key] = out
def _styles(self, obj):
if obj.type in ('mp3', 'mp4'):
styles = self.styles[obj.type]
else:
styles = self.styles['etc'] # sane styles
# Make sure we always return a list of styles, even when given
# a single style for convenience.
if isinstance(styles, StorageStyle):
return [styles]
else:
return styles
def __get__(self, obj, owner):
"""Retrieve the value of this metadata field.
"""
# Fetch the data using the various StorageStyles.
styles = self._styles(obj)
if styles is None:
out = None
else:
for style in styles:
# Use the first style that returns a reasonable value.
out = self._fetchdata(obj, style)
if out:
break
if style.packing:
out = Packed(out, style.packing)[style.pack_pos]
# MPEG-4 freeform frames are (should be?) encoded as UTF-8.
if obj.type == 'mp4' and style.key.startswith('----:') and \
isinstance(out, str):
out = out.decode('utf8')
return _safe_cast(self.out_type, out)
def __set__(self, obj, val):
"""Set the value of this metadata field.
"""
# Store using every StorageStyle available.
styles = self._styles(obj)
if styles is None:
return
for style in styles:
if style.packing:
p = Packed(self._fetchdata(obj, style), style.packing)
p[style.pack_pos] = val
out = p.items
else: # unicode, integer, or boolean scalar
out = val
# deal with Nones according to abstract type if present
if out is None:
if self.out_type == int:
out = 0
elif self.out_type == bool:
out = False
elif self.out_type == unicode:
out = u''
# We trust that packed values are handled above.
# Convert to correct storage type (irrelevant for
# packed values).
if style.as_type == unicode:
if out is None:
out = u''
else:
if self.out_type == bool:
# store bools as 1,0 instead of True,False
out = unicode(int(out))
else:
out = unicode(out)
elif style.as_type == int:
if out is None:
out = 0
else:
out = int(out)
elif style.as_type in (bool, str):
out = style.as_type(out)
# MPEG-4 "freeform" (----) frames must be encoded as UTF-8
# byte strings.
if obj.type == 'mp4' and style.key.startswith('----:') and \
isinstance(out, unicode):
out = out.encode('utf8')
# Store the data.
self._storedata(obj, out, style)
class CompositeDateField(object):
"""A MediaFile field for conveniently accessing the year, month, and
day fields as a datetime.date object. Allows both getting and
setting of the component fields.
"""
def __init__(self, year_field, month_field, day_field):
"""Create a new date field from the indicated MediaFields for
the component values.
"""
self.year_field = year_field
self.month_field = month_field
self.day_field = day_field
def __get__(self, obj, owner):
"""Return a datetime.date object whose components indicating the
smallest valid date whose components are at least as large as
the three component fields (that is, if year == 1999, month == 0,
and day == 0, then date == datetime.date(1999, 1, 1)). If the
components indicate an invalid date (e.g., if month == 47),
datetime.date.min is returned.
"""
try:
return datetime.date(
max(self.year_field.__get__(obj, owner), datetime.MINYEAR),
max(self.month_field.__get__(obj, owner), 1),
max(self.day_field.__get__(obj, owner), 1)
)
except ValueError: # Out of range values.
return datetime.date.min
def __set__(self, obj, val):
"""Set the year, month, and day fields to match the components of
the provided datetime.date object.
"""
self.year_field.__set__(obj, val.year)
self.month_field.__set__(obj, val.month)
self.day_field.__set__(obj, val.day)
class ImageField(object):
"""A descriptor providing access to a file's embedded album art.
Holds a bytestring reflecting the image data. The image should
either be a JPEG or a PNG for cross-format compatibility. It's
probably a bad idea to use anything but these two formats.
"""
@classmethod
def _mime(cls, data):
"""Return the MIME type (either image/png or image/jpeg) of the
image data (a bytestring).
"""
kind = imghdr.what(None, h=data)
if kind == 'png':
return 'image/png'
else:
# Currently just fall back to JPEG.
return 'image/jpeg'
@classmethod
def _mp4kind(cls, data):
"""Return the MPEG-4 image type code of the data. If the image
is not a PNG or JPEG, JPEG is assumed.
"""
kind = imghdr.what(None, h=data)
if kind == 'png':
return mutagen.mp4.MP4Cover.FORMAT_PNG
else:
return mutagen.mp4.MP4Cover.FORMAT_JPEG
def __get__(self, obj, owner):
if obj.type == 'mp3':
# Look for APIC frames.
for frame in obj.mgfile.tags.values():
if frame.FrameID == 'APIC':
picframe = frame
break
else:
# No APIC frame.
return None
return picframe.data
elif obj.type == 'mp4':
if 'covr' in obj.mgfile:
covers = obj.mgfile['covr']
if covers:
cover = covers[0]
# cover is an MP4Cover, which is a subclass of str.
return cover
# No cover found.
return None
else:
# Here we're assuming everything but MP3 and MPEG-4 uses
# the Xiph/Vorbis Comments standard. This may not be valid.
# http://wiki.xiph.org/VorbisComment#Cover_art
if 'metadata_block_picture' not in obj.mgfile:
# Try legacy COVERART tags.
if 'coverart' in obj.mgfile and obj.mgfile['coverart']:
return base64.b64decode(obj.mgfile['coverart'][0])
return None
for data in obj.mgfile["metadata_block_picture"]:
try:
pic = mutagen.flac.Picture(base64.b64decode(data))
break
except TypeError:
pass
else:
return None
return pic.data
def __set__(self, obj, val):
if val is not None:
if not isinstance(val, str):
raise ValueError('value must be a byte string or None')
if obj.type == 'mp3':
# Clear all APIC frames.
obj.mgfile.tags.delall('APIC')
if val is None:
# If we're clearing the image, we're done.
return
picframe = mutagen.id3.APIC(
encoding = 3,
mime = self._mime(val),
type = 3, # front cover
desc = u'',
data = val,
)
obj.mgfile['APIC'] = picframe
elif obj.type == 'mp4':
if val is None:
if 'covr' in obj.mgfile:
del obj.mgfile['covr']
else:
cover = mutagen.mp4.MP4Cover(val, self._mp4kind(val))
obj.mgfile['covr'] = [cover]
else:
# Again, assuming Vorbis Comments standard.
# Strip all art, including legacy COVERART.
if 'metadata_block_picture' in obj.mgfile:
if 'metadata_block_picture' in obj.mgfile:
del obj.mgfile['metadata_block_picture']
if 'coverart' in obj.mgfile:
del obj.mgfile['coverart']
if 'coverartmime' in obj.mgfile:
del obj.mgfile['coverartmime']
# Add new art if provided.
if val is not None:
pic = mutagen.flac.Picture()
pic.data = val
pic.mime = self._mime(val)
obj.mgfile['metadata_block_picture'] = [
base64.b64encode(pic.write())
]
class FloatValueField(MediaField):
"""A field that stores a floating-point number as a string."""
def __init__(self, places=2, suffix=None, **kwargs):
"""Make a field that stores ``places`` digits after the decimal
point and appends ``suffix`` (if specified) when encoding as a
string.
"""
super(FloatValueField, self).__init__(unicode, **kwargs)
fmt = ['%.', str(places), 'f']
if suffix:
fmt += [' ', suffix]
self.fmt = ''.join(fmt)
def __get__(self, obj, owner):
valstr = super(FloatValueField, self).__get__(obj, owner)
return _safe_cast(float, valstr)
def __set__(self, obj, val):
if not val:
val = 0.0
valstr = self.fmt % val
super(FloatValueField, self).__set__(obj, valstr)
# The file (a collection of fields).
class MediaFile(object):
"""Represents a multimedia file on disk and provides access to its
metadata.
"""
def __init__(self, path):
"""Constructs a new MediaFile reflecting the file at path. May
throw UnreadableFileError.
"""
self.path = path
unreadable_exc = (
mutagen.mp3.HeaderNotFoundError,
mutagen.flac.FLACNoHeaderError,
mutagen.monkeysaudio.MonkeysAudioHeaderError,
mutagen.mp4.MP4StreamInfoError,
mutagen.oggvorbis.OggVorbisHeaderError,
)
try:
self.mgfile = mutagen.File(path)
except unreadable_exc:
log.warn('header parsing failed')
raise UnreadableFileError('Mutagen could not read file')
except IOError:
raise UnreadableFileError('could not read file')
except:
# Hide bugs in Mutagen.
log.error('uncaught Mutagen exception:\n' + traceback.format_exc())
raise UnreadableFileError('Mutagen raised an exception')
if self.mgfile is None: # Mutagen couldn't guess the type
raise FileTypeError('file type unsupported by Mutagen')
elif type(self.mgfile).__name__ == 'M4A' or \
type(self.mgfile).__name__ == 'MP4':
self.type = 'mp4'
elif type(self.mgfile).__name__ == 'ID3' or \
type(self.mgfile).__name__ == 'MP3':
self.type = 'mp3'
elif type(self.mgfile).__name__ == 'FLAC':
self.type = 'flac'
elif type(self.mgfile).__name__ == 'OggVorbis':
self.type = 'ogg'
elif type(self.mgfile).__name__ == 'MonkeysAudio':
self.type = 'ape'
elif type(self.mgfile).__name__ == 'WavPack':
self.type = 'wv'
elif type(self.mgfile).__name__ == 'Musepack':
self.type = 'mpc'
else:
raise FileTypeError('file type %s unsupported by MediaFile' %
type(self.mgfile).__name__)
# add a set of tags if it's missing
if self.mgfile.tags is None:
self.mgfile.add_tags()
def save(self):
self.mgfile.save()
#### field definitions ####
title = MediaField(
mp3 = StorageStyle('TIT2'),
mp4 = StorageStyle("\xa9nam"),
etc = StorageStyle('title'),
)
artist = MediaField(
mp3 = StorageStyle('TPE1'),
mp4 = StorageStyle("\xa9ART"),
etc = StorageStyle('artist'),
)
album = MediaField(
mp3 = StorageStyle('TALB'),
mp4 = StorageStyle("\xa9alb"),
etc = StorageStyle('album'),
)
genre = MediaField(
mp3 = StorageStyle('TCON'),
mp4 = StorageStyle("\xa9gen"),
etc = StorageStyle('genre'),
)
composer = MediaField(
mp3 = StorageStyle('TCOM'),
mp4 = StorageStyle("\xa9wrt"),
etc = StorageStyle('composer'),
)
grouping = MediaField(
mp3 = StorageStyle('TIT1'),
mp4 = StorageStyle("\xa9grp"),
etc = StorageStyle('grouping'),
)
year = MediaField(out_type=int,
mp3 = StorageStyle('TDRC',
packing = packing.DATE,
pack_pos = 0),
mp4 = StorageStyle("\xa9day",
packing = packing.DATE,
pack_pos = 0),
etc = [StorageStyle('date',
packing = packing.DATE,
pack_pos = 0),
StorageStyle('year')]
)
month = MediaField(out_type=int,
mp3 = StorageStyle('TDRC',
packing = packing.DATE,
pack_pos = 1),
mp4 = StorageStyle("\xa9day",
packing = packing.DATE,
pack_pos = 1),
etc = StorageStyle('date',
packing = packing.DATE,
pack_pos = 1)
)
day = MediaField(out_type=int,
mp3 = StorageStyle('TDRC',
packing = packing.DATE,
pack_pos = 2),
mp4 = StorageStyle("\xa9day",
packing = packing.DATE,
pack_pos = 2),
etc = StorageStyle('date',
packing = packing.DATE,
pack_pos = 2)
)
date = CompositeDateField(year, month, day)
track = MediaField(out_type = int,
mp3 = StorageStyle('TRCK',
packing = packing.SLASHED,
pack_pos = 0),
mp4 = StorageStyle('trkn',
packing = packing.TUPLE,
pack_pos = 0),
etc = [StorageStyle('track'),
StorageStyle('tracknumber')]
)
tracktotal = MediaField(out_type = int,
mp3 = StorageStyle('TRCK',
packing = packing.SLASHED,
pack_pos = 1),
mp4 = StorageStyle('trkn',
packing = packing.TUPLE,
pack_pos = 1),
etc = [StorageStyle('tracktotal'),
StorageStyle('trackc'),
StorageStyle('totaltracks')]
)
disc = MediaField(out_type = int,
mp3 = StorageStyle('TPOS',
packing = packing.SLASHED,
pack_pos = 0),
mp4 = StorageStyle('disk',
packing = packing.TUPLE,
pack_pos = 0),
etc = [StorageStyle('disc'),
StorageStyle('discnumber')]
)
disctotal = MediaField(out_type = int,
mp3 = StorageStyle('TPOS',
packing = packing.SLASHED,
pack_pos = 1),
mp4 = StorageStyle('disk',
packing = packing.TUPLE,
pack_pos = 1),
etc = [StorageStyle('disctotal'),
StorageStyle('discc'),
StorageStyle('totaldiscs')]
)
lyrics = MediaField(
mp3 = StorageStyle('USLT',
list_elem = False,
id3_desc = u''),
mp4 = StorageStyle("\xa9lyr"),
etc = StorageStyle('lyrics')
)
comments = MediaField(
mp3 = StorageStyle('COMM', id3_desc = u''),
mp4 = StorageStyle("\xa9cmt"),
etc = [StorageStyle('description'),
StorageStyle('comment')]
)
bpm = MediaField(out_type = int,
mp3 = StorageStyle('TBPM'),
mp4 = StorageStyle('tmpo', as_type = int),
etc = StorageStyle('bpm')
)
comp = MediaField(out_type = bool,
mp3 = StorageStyle('TCMP'),
mp4 = StorageStyle('cpil',
list_elem = False,
as_type = bool),
etc = StorageStyle('compilation')
)
albumartist = MediaField(
mp3 = StorageStyle('TPE2'),
mp4 = StorageStyle('aART'),
etc = [StorageStyle('album artist'),
StorageStyle('albumartist')]
)
albumtype = MediaField(
mp3 = StorageStyle('TXXX', id3_desc=u'MusicBrainz Album Type'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Album Type'),
etc = StorageStyle('musicbrainz_albumtype')
)
label = MediaField(
mp3 = StorageStyle('TPUB'),
mp4 = [StorageStyle('----:com.apple.iTunes:Label'),
StorageStyle('----:com.apple.iTunes:publisher')],
etc = [StorageStyle('label'),
StorageStyle('publisher')] # Traktor
)
# Album art.
art = ImageField()
# MusicBrainz IDs.
mb_trackid = MediaField(
mp3 = StorageStyle('UFID:http://musicbrainz.org',
list_elem = False,
id3_frame_field = 'data'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Track Id',
as_type=str),
etc = StorageStyle('musicbrainz_trackid')
)
mb_albumid = MediaField(
mp3 = StorageStyle('TXXX', id3_desc=u'MusicBrainz Album Id'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Album Id',
as_type=str),
etc = StorageStyle('musicbrainz_albumid')
)
mb_artistid = MediaField(
mp3 = StorageStyle('TXXX', id3_desc=u'MusicBrainz Artist Id'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Artist Id',
as_type=str),
etc = StorageStyle('musicbrainz_artistid')
)
mb_albumartistid = MediaField(
mp3 = StorageStyle('TXXX',
id3_desc=u'MusicBrainz Album Artist Id'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Album Artist Id',
as_type=str),
etc = StorageStyle('musicbrainz_albumartistid')
)
# ReplayGain fields.
rg_track_gain = FloatValueField(2, 'dB',
mp3 = StorageStyle('TXXX',
id3_desc=u'REPLAYGAIN_TRACK_GAIN'),
mp4 = None,
etc = StorageStyle(u'REPLAYGAIN_TRACK_GAIN')
)
rg_album_gain = FloatValueField(2, 'dB',
mp3 = StorageStyle('TXXX',
id3_desc=u'REPLAYGAIN_ALBUM_GAIN'),
mp4 = None,
etc = StorageStyle(u'REPLAYGAIN_ALBUM_GAIN')
)
rg_track_peak = FloatValueField(6, None,
mp3 = StorageStyle('TXXX',
id3_desc=u'REPLAYGAIN_TRACK_PEAK'),
mp4 = None,
etc = StorageStyle(u'REPLAYGAIN_TRACK_PEAK')
)
rg_album_peak = FloatValueField(6, None,
mp3 = StorageStyle('TXXX',
id3_desc=u'REPLAYGAIN_ALBUM_PEAK'),
mp4 = None,
etc = StorageStyle(u'REPLAYGAIN_ALBUM_PEAK')
)
@property
def length(self):
"""The duration of the audio in seconds (a float)."""
return self.mgfile.info.length
@property
def samplerate(self):
"""The audio's sample rate (an int)."""
if hasattr(self.mgfile.info, 'sample_rate'):
return self.mgfile.info.sample_rate
return 0
@property
def bitdepth(self):
"""The number of bits per sample in the audio encoding (an int).
Only available for certain file formats (zero where
unavailable).
"""
if hasattr(self.mgfile.info, 'bits_per_sample'):
return self.mgfile.info.bits_per_sample
return 0
@property
def channels(self):
"""The number of channels in the audio (an int)."""
if isinstance(self.mgfile.info, mutagen.mp3.MPEGInfo):
return {
mutagen.mp3.STEREO: 2,
mutagen.mp3.JOINTSTEREO: 2,
mutagen.mp3.DUALCHANNEL: 2,
mutagen.mp3.MONO: 1,
}[self.mgfile.info.mode]
if hasattr(self.mgfile.info, 'channels'):
return self.mgfile.info.channels
return 0
@property
def bitrate(self):
"""The number of bits per seconds used in the audio coding (an
int). If this is provided explicitly by the compressed file
format, this is a precise reflection of the encoding. Otherwise,
it is estimated from the on-disk file size. In this case, some
imprecision is possible because the file header is incorporated
in the file size.
"""
if hasattr(self.mgfile.info, 'bitrate') and self.mgfile.info.bitrate:
# Many formats provide it explicitly.
return self.mgfile.info.bitrate
else:
# Otherwise, we calculate bitrate from the file size. (This
# is the case for all of the lossless formats.)
if not self.length:
# Avoid division by zero if length is not available.
return 0
size = os.path.getsize(self.path)
return int(size * 8 / self.length)
@property
def format(self):
"""A string describing the file format/codec."""
return TYPES[self.type]
| mit |
Ramanujakalyan/Inherit | gis-tools-101/rev_geo.py | 29 | 6294 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = "Brian Lehman, Scott Hendrickson"
import sys
import re
import codecs
reload(sys)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
import math
import pprint
import json
import fiona
from shapely.geometry import Point, shape, Polygon, box
from collections import defaultdict
import argparse
import os
import pickle
########################
# functions
def tree(): return defaultdict(tree)
def grid_finder(x,y):
return (int((math.floor(x)-grid_boundaries[0])/delta)
,int((math.floor(y)-grid_boundaries[1])/delta))
def topic_args():
parser = argparse.ArgumentParser(description="Reverse geo coder returns location info given a set of lon,lat")
parser.add_argument("file_name"
, metavar= "file_name"
, nargs="?"
, default=[]
, help="Input file name (optional).")
parser.add_argument("-b"
, "--bounding-box"
, dest="grid_boundaries"
, default="-185,15,-65,70"
, help="Set bounding box for region to include (default: [-185,15,-65,70])")
parser.add_argument("-d"
, "--delta"
, dest="delta"
, default=5
, help="Set the number of degrees between grid coords (default: 5)")
parser.add_argument("-g"
, "--use-saved-grid"
, dest="use_saved_grid"
, default=False
, action="store_true"
, help="Save grid or use previously saved version in data/grid.json")
parser.add_argument("-s"
, "--shape-file-path"
, dest="shape_file_path"
, default="data/tl_2013_us_county.shp"
, help="Set shapefile path (default: data/tl_2013_us_county.shp)")
parser.add_argument("-t"
, "--tweet-input"
, dest="tweet_input"
, default=False
, action="store_true"
, help="Set input as tweet payload instead of coordinates (in progress)")
return parser
def build_grid():
#grid_boundaries=(-185,15,-65,70) # upright edge is plus delta (lower 48 states)
grid={(i,j):{}
for i in range((grid_boundaries[2]-grid_boundaries[0])/delta)
for j in range((grid_boundaries[3]-grid_boundaries[1])/delta) }
with fiona.open(options.shape_file_path) as fc:
print >>sys.stderr, fc.driver,"###",fc.schema,"###", len(fc),"###",fc.crs
print >> sys.stderr,fc.schema
print >>sys.stderr, "Number of records:", len(fc)
print >>sys.stderr, "Bounds of all records:", fc.bounds
print >>sys.stderr, "Bounds applied:",grid_boundaries
print >> sys.stderr,"######## indexing shapes to grid ########"
print >> sys.stderr,"shapes complete:"
c=0
for feature in fc:
c+=1
GEOID=str(feature['properties']['GEOID'])
NAME=feature['properties']['NAME']
INTPTLON=float(feature['properties']['INTPTLON'])
INTPTLAT=float(feature['properties']['INTPTLAT'])
shp=shape(feature['geometry']) # list of coordinates of geometric shape
bb=box(*shp.bounds) #box(minx,miny,maxx,maxy)) creates one boxlike shape to rule them all
for i,j in grid:
grid_box=box(i*delta+grid_boundaries[0]
,j*delta+grid_boundaries[1]
,(i+1)*delta+grid_boundaries[0]
,(j+1)*delta+grid_boundaries[1] )
if grid_box.intersects(bb): #http://toblerity.org/shapely/manual.html#object.intersects
grid[(i,j)][bb]=(shp,GEOID,NAME,INTPTLON,INTPTLAT) # (county shape, countyID)
if c%100==0:
print >> sys.stderr, c
return grid
if __name__ == '__main__':
options = topic_args().parse_args()
grid_boundaries=[int(item) for item in options.grid_boundaries.split(",")]
delta=int(options.delta)
if not options.use_saved_grid:
grid=build_grid()
else:
if not os.path.isfile("./data/grid.json"):
print >>sys.stderr, "creating ./data/grid.json"
grid=build_grid()
if not os.path.exists("./data"):
os.makedirs("./data")
print >>sys.stderr, "saving file ./data/grid.json"
with open("./data/grid.json","wb") as g:
pickle.dump(grid,g)
else:
print >>sys.stderr, "using ./data/grid.json"
grid=pickle.load(open("./data/grid.json"))
counter=0
in_grid_not_in_county=0
grid_counter=0
print >> sys.stderr,"######## locating geo coords in grid ########"
for line in sys.stdin:
#( lng, lat ) = coord #NOTE:the input file must contain (lng,lat)
values=line.replace("(","").replace(")","").replace("[","").replace("]","").strip().split(",")
lng = float(values[0])
lat = float(values[1])
point = Point(float(lng), float(lat))
coords=grid_finder(lng,lat)
found=False
if coords not in grid:
counter+=1
print >> sys.stderr,"not in grid:{},not in county:{},found{}".format(counter,in_grid_not_in_county,grid_counter)
print >> sys.stderr,"{},{}: not in grid".format(lng,lat)
continue
for box in grid[coords]:
if box.contains(point):
if grid[coords][box][0].contains(point):
e=tree()
found=True
grid_counter+=1
e["coords"]=(lng,lat)
e["GEOID"]=grid[coords][box][1]
e["centroid"]=(grid[coords][box][3],grid[coords][box][4])
e["county"]=grid[coords][box][2]
print json.dumps(e)
break #point found, no need to continue searching
if not found:
in_grid_not_in_county+=1
print >> sys.stderr,"######## DONE ########"
print >> sys.stderr, "{} points outside of grid".format(counter)
print >> sys.stderr, "{} points in grid but not in a county".format(in_grid_not_in_county)
print >> sys.stderr, "{} points in grid and in county".format(grid_counter)
| unlicense |
kennedyshead/home-assistant | tests/components/pushbullet/test_notify.py | 8 | 8722 | """The tests for the pushbullet notification platform."""
import json
from unittest.mock import patch
from pushbullet import PushBullet
import pytest
import homeassistant.components.notify as notify
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component, load_fixture
@pytest.fixture
def mock_pushbullet():
"""Mock pushbullet."""
with patch.object(
PushBullet,
"_get_data",
return_value=json.loads(load_fixture("pushbullet_devices.json")),
):
yield
async def test_pushbullet_config(hass, mock_pushbullet):
"""Test setup."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
async def test_pushbullet_config_bad(hass):
"""Test set up the platform with bad/missing configuration."""
config = {notify.DOMAIN: {"platform": "pushbullet"}}
with assert_setup_component(0) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert not handle_config[notify.DOMAIN]
async def test_pushbullet_push_default(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {"title": "Test Title", "message": "Test Message"}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 1
expected_body = {"body": "Test Message", "title": "Test Title", "type": "note"}
assert requests_mock.last_request.json() == expected_body
async def test_pushbullet_push_device(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 1
expected_body = {
"body": "Test Message",
"device_iden": "identity1",
"title": "Test Title",
"type": "note",
}
assert requests_mock.last_request.json() == expected_body
async def test_pushbullet_push_devices(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP", "device/My iPhone"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 2
assert len(requests_mock.request_history) == 2
expected_body = {
"body": "Test Message",
"device_iden": "identity1",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[0].json() == expected_body
expected_body = {
"body": "Test Message",
"device_iden": "identity2",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[1].json() == expected_body
async def test_pushbullet_push_email(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["email/[email protected]"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 1
assert len(requests_mock.request_history) == 1
expected_body = {
"body": "Test Message",
"email": "[email protected]",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[0].json() == expected_body
async def test_pushbullet_push_mixed(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP", "email/[email protected]"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 2
assert len(requests_mock.request_history) == 2
expected_body = {
"body": "Test Message",
"device_iden": "identity1",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[0].json() == expected_body
expected_body = {
"body": "Test Message",
"email": "[email protected]",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[1].json() == expected_body
async def test_pushbullet_push_no_file(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP", "device/My iPhone"],
"data": {"file": "not_a_file"},
}
assert not await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.