repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
paultcochrane/pyvisi | examples/simpleLinePlotMovie.py | 1 | 5232 | # Copyright (C) 2004-2008 Paul Cochrane
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Example of plotting a changing function with pyvisi
"""
# what plotting method are we using?
method = 'pyvisi'
# set up some data to plot
from numpy import *
x = arange(10, dtype=floating)
y = x**2
# plot it using one of the three methods
if method == 'pyvisi':
# example code for how a user would write a script in pyvisi
from pyvisi import * # base level visualisation stuff
#from pyvisi.utils import * # pyvisi specific utils
# import the objects to render the scene using the specific renderer
from pyvisi.renderers.gnuplot import * # gnuplot
#from pyvisi.renderers.vtk import * # vtk
# define the scene object
# a Scene is a container for all of the kinds of things you want to put
# into your plot for instance, images, meshes, arrow/vector/quiver plots,
# contour plots, spheres etc.
scene = Scene()
# create a LinePlot object
plot = LinePlot(scene)
# add some helpful info to the plot
plot.title = 'Example 2D plot'
plot.xlabel = 'x'
plot.ylabel = 'x^2'
plot.linestyle = 'lines'
for i in range(100):
# assign some data to the plot
plot.setData(x, y)
# render the scene to screen
#scene.render(pause=True, interactive=True)
# save the scene out to file
scene.save(fname="simpleLinePlotMovie%03d.png"%i, format=PngImage())
#scene.save(fname="simplePlotMovieExample.ps", format=PsImage())
y = y*0.9
elif method == 'gnuplot':
#### original gnuplot code
import Gnuplot
# set the plot up
_gnuplot = Gnuplot.Gnuplot()
_gnuplot.title('Example 2D plot')
_gnuplot.xlabel('x')
_gnuplot.ylabel('x^2')
for i in range(100):
# set up the data
_data = Gnuplot.Data(x, y, with='lines')
# set up to save to file
_gnuplot('set terminal png')
_gnuplot('set output \"simpleLinePlotMovie%03d.png\"'%i)
# save it
_gnuplot.plot(_data)
y = y*0.9
raw_input('Press enter to continue...\n')
elif method == 'vtk':
#### original vtk code
import vtk
# set up the renderer and the render window
_ren = vtk.vtkRenderer()
_renWin = vtk.vtkRenderWindow()
_renWin.AddRenderer(_ren)
# do a quick check to make sure x and y are same length
if len(x) != len(y):
raise ValueError, "x and y vectors must be same length"
# set up the x and y data arrays to be able to accept the data (code
# here adapted from the C++ of a vtk-users mailing list reply by Sander
# Niemeijer)
_xData = vtk.vtkDataArray.CreateDataArray(vtk.VTK_FLOAT)
_xData.SetNumberOfTuples(len(x))
_yData = vtk.vtkDataArray.CreateDataArray(vtk.VTK_FLOAT)
_yData.SetNumberOfTuples(len(y))
# put the data into the data arrays
for i in range(len(x)):
_xData.SetTuple1(i,x[i])
_yData.SetTuple1(i,y[i])
# create a field data object
# (I think this is as a containter to hold the data arrays)
_fieldData = vtk.vtkFieldData()
_fieldData.AllocateArrays(2)
_fieldData.AddArray(_xData)
_fieldData.AddArray(_yData)
# now put the field data object into a data object so that can add it as
# input to the xyPlotActor
_dataObject = vtk.vtkDataObject()
_dataObject.SetFieldData(_fieldData)
# set up the actor
_plot = vtk.vtkXYPlotActor()
_plot.AddDataObjectInput(_dataObject)
# set the title and stuff
_plot.SetTitle("Example 2D plot")
_plot.SetXTitle("x")
_plot.SetYTitle("x^2")
_plot.SetXValuesToValue()
# set which parts of the data object are to be used for which axis
_plot.SetDataObjectXComponent(0,0)
_plot.SetDataObjectYComponent(0,1)
# add the actor
_ren.AddActor2D(_plot)
# render the scene
_iren = vtk.vtkRenderWindowInteractor()
_iren.SetRenderWindow(_renWin)
_iren.Initialize()
_renWin.Render()
_iren.Start()
# convert the render window to an image
_win2imgFilter = vtk.vtkWindowToImageFilter()
_win2imgFilter.SetInput(_renWin)
# save the image to file
_outWriter = vtk.vtkPNGWriter()
_outWriter.SetInput(_win2imgFilter.GetOutput())
_outWriter.SetFileName("simpleLinePlot.png")
_outWriter.Write()
# pause for input
#raw_input('Press enter to continue...\n')
else:
print "Eeek! What plotting method am I supposed to use???"
# vim: expandtab shiftwidth=4:
| gpl-2.0 |
tuxxy/CloudBot | plugins/wikipedia.py | 22 | 1792 | """Searches wikipedia and returns first sentence of article
Scaevolus 2009"""
import re
import requests
from lxml import etree
from cloudbot import hook
from cloudbot.util import formatting
# security
parser = etree.XMLParser(resolve_entities=False, no_network=True)
api_prefix = "http://en.wikipedia.org/w/api.php"
search_url = api_prefix + "?action=opensearch&format=xml"
random_url = api_prefix + "?action=query&format=xml&list=random&rnlimit=1&rnnamespace=0"
paren_re = re.compile('\s*\(.*\)$')
@hook.command("wiki", "wikipedia", "w")
def wiki(text):
"""wiki <phrase> -- Gets first sentence of Wikipedia article on <phrase>."""
try:
request = requests.get(search_url, params={'search': text.strip()})
request.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
return "Could not get Wikipedia page: {}".format(e)
x = etree.fromstring(request.text, parser=parser)
ns = '{http://opensearch.org/searchsuggest2}'
items = x.findall(ns + 'Section/' + ns + 'Item')
if not items:
if x.find('error') is not None:
return 'Could not get Wikipedia page: %(code)s: %(info)s' % x.find('error').attrib
else:
return 'No results found.'
def extract(item):
return [item.find(ns + i).text for i in
('Text', 'Description', 'Url')]
title, desc, url = extract(items[0])
if 'may refer to' in desc:
title, desc, url = extract(items[1])
title = paren_re.sub('', title)
if title.lower() not in desc.lower():
desc = title + desc
desc = ' '.join(desc.split()) # remove excess spaces
desc = formatting.truncate(desc, 200)
return '{} :: {}'.format(desc, requests.utils.quote(url, ':/'))
| gpl-3.0 |
lucashmorais/x-Bench | mozmill-env/python/Lib/site-packages/mercurial/httpconnection.py | 91 | 10916 | # httpconnection.py - urllib2 handler for new http support
#
# Copyright 2005, 2006, 2007, 2008 Matt Mackall <[email protected]>
# Copyright 2006, 2007 Alexis S. L. Carvalho <[email protected]>
# Copyright 2006 Vadim Gelfer <[email protected]>
# Copyright 2011 Google, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import logging
import socket
import urllib
import urllib2
import os
from mercurial import httpclient
from mercurial import sslutil
from mercurial import util
from mercurial.i18n import _
# moved here from url.py to avoid a cycle
class httpsendfile(object):
"""This is a wrapper around the objects returned by python's "open".
Its purpose is to send file-like objects via HTTP.
It do however not define a __len__ attribute because the length
might be more than Py_ssize_t can handle.
"""
def __init__(self, ui, *args, **kwargs):
# We can't just "self._data = open(*args, **kwargs)" here because there
# is an "open" function defined in this module that shadows the global
# one
self.ui = ui
self._data = open(*args, **kwargs)
self.seek = self._data.seek
self.close = self._data.close
self.write = self._data.write
self.length = os.fstat(self._data.fileno()).st_size
self._pos = 0
self._total = self.length // 1024 * 2
def read(self, *args, **kwargs):
try:
ret = self._data.read(*args, **kwargs)
except EOFError:
self.ui.progress(_('sending'), None)
self._pos += len(ret)
# We pass double the max for total because we currently have
# to send the bundle twice in the case of a server that
# requires authentication. Since we can't know until we try
# once whether authentication will be required, just lie to
# the user and maybe the push succeeds suddenly at 50%.
self.ui.progress(_('sending'), self._pos // 1024,
unit=_('kb'), total=self._total)
return ret
# moved here from url.py to avoid a cycle
def readauthforuri(ui, uri, user):
# Read configuration
config = dict()
for key, val in ui.configitems('auth'):
if '.' not in key:
ui.warn(_("ignoring invalid [auth] key '%s'\n") % key)
continue
group, setting = key.rsplit('.', 1)
gdict = config.setdefault(group, dict())
if setting in ('username', 'cert', 'key'):
val = util.expandpath(val)
gdict[setting] = val
# Find the best match
if '://' in uri:
scheme, hostpath = uri.split('://', 1)
else:
# Python 2.4.1 doesn't provide the full URI
scheme, hostpath = 'http', uri
bestuser = None
bestlen = 0
bestauth = None
for group, auth in config.iteritems():
if user and user != auth.get('username', user):
# If a username was set in the URI, the entry username
# must either match it or be unset
continue
prefix = auth.get('prefix')
if not prefix:
continue
p = prefix.split('://', 1)
if len(p) > 1:
schemes, prefix = [p[0]], p[1]
else:
schemes = (auth.get('schemes') or 'https').split()
if (prefix == '*' or hostpath.startswith(prefix)) and \
(len(prefix) > bestlen or (len(prefix) == bestlen and \
not bestuser and 'username' in auth)) \
and scheme in schemes:
bestlen = len(prefix)
bestauth = group, auth
bestuser = auth.get('username')
if user and not bestuser:
auth['username'] = user
return bestauth
# Mercurial (at least until we can remove the old codepath) requires
# that the http response object be sufficiently file-like, so we
# provide a close() method here.
class HTTPResponse(httpclient.HTTPResponse):
def close(self):
pass
class HTTPConnection(httpclient.HTTPConnection):
response_class = HTTPResponse
def request(self, method, uri, body=None, headers={}):
if isinstance(body, httpsendfile):
body.seek(0)
httpclient.HTTPConnection.request(self, method, uri, body=body,
headers=headers)
_configuredlogging = False
LOGFMT = '%(levelname)s:%(name)s:%(lineno)d:%(message)s'
# Subclass BOTH of these because otherwise urllib2 "helpfully"
# reinserts them since it notices we don't include any subclasses of
# them.
class http2handler(urllib2.HTTPHandler, urllib2.HTTPSHandler):
def __init__(self, ui, pwmgr):
global _configuredlogging
urllib2.AbstractHTTPHandler.__init__(self)
self.ui = ui
self.pwmgr = pwmgr
self._connections = {}
loglevel = ui.config('ui', 'http2debuglevel', default=None)
if loglevel and not _configuredlogging:
_configuredlogging = True
logger = logging.getLogger('mercurial.httpclient')
logger.setLevel(getattr(logging, loglevel.upper()))
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(LOGFMT))
logger.addHandler(handler)
def close_all(self):
"""Close and remove all connection objects being kept for reuse."""
for openconns in self._connections.values():
for conn in openconns:
conn.close()
self._connections = {}
# shamelessly borrowed from urllib2.AbstractHTTPHandler
def do_open(self, http_class, req, use_ssl):
"""Return an addinfourl object for the request, using http_class.
http_class must implement the HTTPConnection API from httplib.
The addinfourl return value is a file-like object. It also
has methods and attributes including:
- info(): return a mimetools.Message object for the headers
- geturl(): return the original request URL
- code: HTTP status code
"""
# If using a proxy, the host returned by get_host() is
# actually the proxy. On Python 2.6.1, the real destination
# hostname is encoded in the URI in the urllib2 request
# object. On Python 2.6.5, it's stored in the _tunnel_host
# attribute which has no accessor.
tunhost = getattr(req, '_tunnel_host', None)
host = req.get_host()
if tunhost:
proxyhost = host
host = tunhost
elif req.has_proxy():
proxyhost = req.get_host()
host = req.get_selector().split('://', 1)[1].split('/', 1)[0]
else:
proxyhost = None
if proxyhost:
if ':' in proxyhost:
# Note: this means we'll explode if we try and use an
# IPv6 http proxy. This isn't a regression, so we
# won't worry about it for now.
proxyhost, proxyport = proxyhost.rsplit(':', 1)
else:
proxyport = 3128 # squid default
proxy = (proxyhost, proxyport)
else:
proxy = None
if not host:
raise urllib2.URLError('no host given')
connkey = use_ssl, host, proxy
allconns = self._connections.get(connkey, [])
conns = [c for c in allconns if not c.busy()]
if conns:
h = conns[0]
else:
if allconns:
self.ui.debug('all connections for %s busy, making a new '
'one\n' % host)
timeout = None
if req.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
timeout = req.timeout
h = http_class(host, timeout=timeout, proxy_hostport=proxy)
self._connections.setdefault(connkey, []).append(h)
headers = dict(req.headers)
headers.update(req.unredirected_hdrs)
headers = dict(
(name.title(), val) for name, val in headers.items())
try:
path = req.get_selector()
if '://' in path:
path = path.split('://', 1)[1].split('/', 1)[1]
if path[0] != '/':
path = '/' + path
h.request(req.get_method(), path, req.data, headers)
r = h.getresponse()
except socket.error, err: # XXX what error?
raise urllib2.URLError(err)
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
r.recv = r.read
resp = urllib.addinfourl(r, r.headers, req.get_full_url())
resp.code = r.status
resp.msg = r.reason
return resp
# httplib always uses the given host/port as the socket connect
# target, and then allows full URIs in the request path, which it
# then observes and treats as a signal to do proxying instead.
def http_open(self, req):
if req.get_full_url().startswith('https'):
return self.https_open(req)
def makehttpcon(*args, **kwargs):
k2 = dict(kwargs)
k2['use_ssl'] = False
return HTTPConnection(*args, **k2)
return self.do_open(makehttpcon, req, False)
def https_open(self, req):
# req.get_full_url() does not contain credentials and we may
# need them to match the certificates.
url = req.get_full_url()
user, password = self.pwmgr.find_stored_password(url)
res = readauthforuri(self.ui, url, user)
if res:
group, auth = res
self.auth = auth
self.ui.debug("using auth.%s.* for authentication\n" % group)
else:
self.auth = None
return self.do_open(self._makesslconnection, req, True)
def _makesslconnection(self, host, port=443, *args, **kwargs):
keyfile = None
certfile = None
if args: # key_file
keyfile = args.pop(0)
if args: # cert_file
certfile = args.pop(0)
# if the user has specified different key/cert files in
# hgrc, we prefer these
if self.auth and 'key' in self.auth and 'cert' in self.auth:
keyfile = self.auth['key']
certfile = self.auth['cert']
# let host port take precedence
if ':' in host and '[' not in host or ']:' in host:
host, port = host.rsplit(':', 1)
port = int(port)
if '[' in host:
host = host[1:-1]
if keyfile:
kwargs['keyfile'] = keyfile
if certfile:
kwargs['certfile'] = certfile
kwargs.update(sslutil.sslkwargs(self.ui, host))
con = HTTPConnection(host, port, use_ssl=True,
ssl_validator=sslutil.validator(self.ui, host),
**kwargs)
return con
| mit |
mrshelly/openerp71313 | openerp/addons/mail/wizard/__init__.py | 438 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import invite
import mail_compose_message
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Kryz/sentry | src/sentry/templatetags/sentry_react.py | 8 | 1878 | from __future__ import absolute_import
import sentry
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.html import mark_safe
from pkg_resources import parse_version
from sentry import features, options
from sentry.api.serializers.base import serialize
from sentry.utils import json
from sentry.utils.functional import extract_lazy_object
register = template.Library()
def _get_version_info():
current = sentry.VERSION
latest = options.get('sentry:latest_version') or current
upgrade_available = parse_version(latest) > parse_version(current)
build = sentry.__build__ or current
return {
'current': current,
'latest': latest,
'build': build,
'upgradeAvailable': upgrade_available,
}
@register.simple_tag(takes_context=True)
def get_react_config(context):
if 'request' in context:
user = context['request'].user
else:
user = None
if user:
user = extract_lazy_object(user)
enabled_features = []
if features.has('organizations:create', actor=user):
enabled_features.append('organizations:create')
if features.has('auth:register', actor=user):
enabled_features.append('auth:register')
context = {
'singleOrganization': settings.SENTRY_SINGLE_ORGANIZATION,
'urlPrefix': settings.SENTRY_URL_PREFIX,
'version': _get_version_info(),
'features': enabled_features,
'mediaUrl': reverse('sentry-media', args=['sentry', '']),
}
if user and user.is_authenticated():
context.update({
'isAuthenticated': True,
'user': serialize(user, user),
})
else:
context.update({
'isAuthenticated': False,
'user': None,
})
return mark_safe(json.dumps(context))
| bsd-3-clause |
dufferzafar/picard-plugins-testing | plugins/viewvariables/__init__.py | 5 | 4148 | # -*- coding: utf-8 -*-
PLUGIN_NAME = u'View script variables'
PLUGIN_AUTHOR = u'Sophist'
PLUGIN_DESCRIPTION = u'''Display a dialog box listing the metadata variables for the track / file.'''
PLUGIN_VERSION = '0.5'
PLUGIN_API_VERSIONS = ['1.0']
from PyQt4 import QtGui, QtCore
try:
from picard.util.tags import PRESERVED_TAGS
except ImportError:
from picard.file import File
PRESERVED_TAGS = File._default_preserved_tags
from picard.file import File
from picard.track import Track
from picard.ui.itemviews import BaseAction, register_file_action, register_track_action
from picard.plugins.viewvariables.ui_variables_dialog import Ui_VariablesDialog
class ViewVariables(BaseAction):
NAME = 'View script variables'
def callback(self, objs):
obj = objs[0]
files = self.tagger.get_files_from_objects(objs)
if files:
obj = files[0]
dialog = ViewVariablesDialog(obj)
dialog.exec_()
class ViewVariablesDialog(QtGui.QDialog):
def __init__(self, obj, parent=None):
QtGui.QDialog.__init__(self, parent)
self.ui = Ui_VariablesDialog()
self.ui.setupUi(self)
self.ui.buttonBox.accepted.connect(self.accept)
self.ui.buttonBox.rejected.connect(self.reject)
metadata = obj.metadata
if isinstance(obj,File):
self.setWindowTitle(_("File: %s") % obj.base_filename)
elif isinstance(obj,Track):
tn = metadata['tracknumber']
if len(tn) == 1:
tn = u"0" + tn
self.setWindowTitle(_("Track: %s %s ") % (tn, metadata['title']))
else:
self.setWindowTitle(_("Variables"))
self._display_metadata(metadata)
def _display_metadata(self, metadata):
keys = metadata.keys()
keys.sort(key=lambda key:
'0' + key if key in PRESERVED_TAGS and key.startswith('~') else
'1' + key if key.startswith('~') else
'2' + key
)
media = hidden = album = False
table = self.ui.metadata_table
key_example, value_example = self.get_table_items(table, 0)
self.key_flags = key_example.flags()
self.value_flags = value_example.flags()
table.setRowCount(len(keys)+3)
i = 0
for key in keys:
if key in PRESERVED_TAGS and key.startswith('~') :
if not media:
self.add_separator_row(table, i, _("File variables"))
i += 1
media = True
elif key.startswith('~'):
if not hidden:
self.add_separator_row(table, i, _("Hidden variables"))
i += 1
hidden = True
else:
if not album:
self.add_separator_row(table, i, _("Tag variables"))
i += 1
album = True
key_item, value_item = self.get_table_items(table, i)
i += 1
key_item.setText(u"_" + key[1:] if key.startswith('~') else key)
if key in metadata:
value = dict.get(metadata, key)
if len(value) == 1 and value[0] != '':
value = value[0]
else:
value = repr(value)
value_item.setText(value)
def add_separator_row(self, table, i, title):
key_item, value_item = self.get_table_items(table, i)
font = key_item.font()
font.setBold(True)
key_item.setFont(font)
key_item.setText(title)
def get_table_items(self, table, i):
key_item = table.item(i, 0)
value_item = table.item(i, 1)
if not key_item:
key_item = QtGui.QTableWidgetItem()
key_item.setFlags(self.key_flags)
table.setItem(i, 0, key_item)
if not value_item:
value_item = QtGui.QTableWidgetItem()
value_item.setFlags(self.value_flags)
table.setItem(i, 1, value_item)
return key_item, value_item
vv = ViewVariables()
register_file_action(vv)
register_track_action(vv)
| gpl-3.0 |
tejasnikumbh/ThesisCode | lib/python2.7/site-packages/numpy/distutils/unixccompiler.py | 55 | 4106 | """
unixccompiler - can handle very long argument lists for ar.
"""
from __future__ import division, absolute_import, print_function
import os
from distutils.errors import DistutilsExecError, CompileError
from distutils.unixccompiler import *
from numpy.distutils.ccompiler import replace_method
from numpy.distutils.compat import get_exception
if sys.version_info[0] < 3:
from . import log
else:
from numpy.distutils import log
# Note that UnixCCompiler._compile appeared in Python 2.3
def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile a single source files with a Unix-style compiler."""
# HP ad-hoc fix, see ticket 1383
ccomp = self.compiler_so
if ccomp[0] == 'aCC':
# remove flags that will trigger ANSI-C mode for aCC
if '-Ae' in ccomp:
ccomp.remove('-Ae')
if '-Aa' in ccomp:
ccomp.remove('-Aa')
# add flags for (almost) sane C++ handling
ccomp += ['-AA']
self.compiler_so = ccomp
display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src)
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs, display = display)
except DistutilsExecError:
msg = str(get_exception())
raise CompileError(msg)
replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile)
def UnixCCompiler_create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
"""
Build a static library in a separate sub-process.
Parameters
----------
objects : list or tuple of str
List of paths to object files used to build the static library.
output_libname : str
The library name as an absolute or relative (if `output_dir` is used)
path.
output_dir : str, optional
The path to the output directory. Default is None, in which case
the ``output_dir`` attribute of the UnixCCompiler instance.
debug : bool, optional
This parameter is not used.
target_lang : str, optional
This parameter is not used.
Returns
-------
None
"""
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
try:
# previous .a may be screwed up; best to remove it first
# and recreate.
# Also, ar on OS X doesn't handle updating universal archives
os.unlink(output_filename)
except (IOError, OSError):
pass
self.mkpath(os.path.dirname(output_filename))
tmp_objects = objects + self.objects
while tmp_objects:
objects = tmp_objects[:50]
tmp_objects = tmp_objects[50:]
display = '%s: adding %d object files to %s' % (
os.path.basename(self.archiver[0]),
len(objects), output_filename)
self.spawn(self.archiver + [output_filename] + objects,
display = display)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
display = '%s:@ %s' % (os.path.basename(self.ranlib[0]),
output_filename)
try:
self.spawn(self.ranlib + [output_filename],
display = display)
except DistutilsExecError:
msg = str(get_exception())
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
return
replace_method(UnixCCompiler, 'create_static_lib',
UnixCCompiler_create_static_lib)
| mit |
hmgaudecker/econ-project-templates | {{cookiecutter.project_slug}}/.mywaflib/waflib/extras/scala.py | 55 | 3327 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2010 (ita)
"""
Scala support
scalac outputs files a bit where it wants to
"""
import os
from waflib import Task, Utils, Node
from waflib.TaskGen import feature, before_method, after_method
from waflib.Tools import ccroot
ccroot.USELIB_VARS['scalac'] = set(['CLASSPATH', 'SCALACFLAGS'])
from waflib.Tools import javaw
@feature('scalac')
@before_method('process_source')
def apply_scalac(self):
Utils.def_attrs(self, jarname='', classpath='',
sourcepath='.', srcdir='.',
jar_mf_attributes={}, jar_mf_classpath=[])
outdir = getattr(self, 'outdir', None)
if outdir:
if not isinstance(outdir, Node.Node):
outdir = self.path.get_bld().make_node(self.outdir)
else:
outdir = self.path.get_bld()
outdir.mkdir()
self.env['OUTDIR'] = outdir.abspath()
self.scalac_task = tsk = self.create_task('scalac')
tmp = []
srcdir = getattr(self, 'srcdir', '')
if isinstance(srcdir, Node.Node):
srcdir = [srcdir]
for x in Utils.to_list(srcdir):
if isinstance(x, Node.Node):
y = x
else:
y = self.path.find_dir(x)
if not y:
self.bld.fatal('Could not find the folder %s from %s' % (x, self.path))
tmp.append(y)
tsk.srcdir = tmp
# reuse some code
feature('scalac')(javaw.use_javac_files)
after_method('apply_scalac')(javaw.use_javac_files)
feature('scalac')(javaw.set_classpath)
after_method('apply_scalac', 'use_scalac_files')(javaw.set_classpath)
SOURCE_RE = '**/*.scala'
class scalac(javaw.javac):
color = 'GREEN'
vars = ['CLASSPATH', 'SCALACFLAGS', 'SCALAC', 'OUTDIR']
def runnable_status(self):
"""
Wait for dependent tasks to be complete, then read the file system to find the input nodes.
"""
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
if not self.inputs:
global SOURCE_RE
self.inputs = []
for x in self.srcdir:
self.inputs.extend(x.ant_glob(SOURCE_RE, remove=False))
return super(javaw.javac, self).runnable_status()
def run(self):
"""
Execute the scalac compiler
"""
env = self.env
gen = self.generator
bld = gen.bld
wd = bld.bldnode.abspath()
def to_list(xx):
if isinstance(xx, str):
return [xx]
return xx
self.last_cmd = lst = []
lst.extend(to_list(env['SCALAC']))
lst.extend(['-classpath'])
lst.extend(to_list(env['CLASSPATH']))
lst.extend(['-d'])
lst.extend(to_list(env['OUTDIR']))
lst.extend(to_list(env['SCALACFLAGS']))
lst.extend([a.abspath() for a in self.inputs])
lst = [x for x in lst if x]
try:
self.out = self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None, output=0, quiet=0)[1]
except:
self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None)
def configure(self):
"""
Detect the scalac program
"""
# If SCALA_HOME is set, we prepend it to the path list
java_path = self.environ['PATH'].split(os.pathsep)
v = self.env
if 'SCALA_HOME' in self.environ:
java_path = [os.path.join(self.environ['SCALA_HOME'], 'bin')] + java_path
self.env['SCALA_HOME'] = [self.environ['SCALA_HOME']]
for x in 'scalac scala'.split():
self.find_program(x, var=x.upper(), path_list=java_path)
if 'CLASSPATH' in self.environ:
v['CLASSPATH'] = self.environ['CLASSPATH']
v.SCALACFLAGS = ['-verbose']
if not v['SCALAC']:
self.fatal('scalac is required for compiling scala classes')
| bsd-3-clause |
krivenko/pomerol2triqs | test/python/slater_gf.py | 1 | 2761 | from h5 import HDFArchive
from triqs.gf import *
from triqs.operators import *
from triqs.operators.util.op_struct import set_operator_structure, get_mkind
from triqs.operators.util.U_matrix import U_matrix
from triqs.operators.util.hamiltonians import h_int_slater
from triqs.operators.util.observables import N_op, S_op, L_op
from triqs.utility import mpi
from triqs.utility.comparison_tests import *
from pomerol2triqs import PomerolED
from itertools import product
# 5-orbital atom with Slater interaction term
####################
# Input parameters #
####################
L = 2 # Angular momentum
beta = 10.0 # Inverse temperature
mu = 32.5 # Chemical potential (3 electrons in 5 bands)
# Slater parameters
U = 5.0
J = 0.1
F0 = U
F2 = J*(14.0/(1.0 + 0.625))
F4 = F2*0.625
spin_names = ("up", "dn")
orb_names = list(range(-L, L+1))
U_mat = U_matrix(L, radial_integrals = [F0,F2,F4], basis='spherical')
# Number of Matsubara frequencies for GF calculation
n_iw = 200
# Number of imaginary time slices for GF calculation
n_tau = 1001
# Energy window for real frequency GF calculation
energy_window = (-5, 5)
# Number of frequency points for real frequency GF calculation
n_w = 1000
# GF structure
gf_struct = set_operator_structure(spin_names, orb_names, False)
mkind = get_mkind(False, None)
# Conversion from TRIQS to Pomerol notation for operator indices
index_converter = {mkind(sn, bn) : ("atom", bi, "down" if sn == "dn" else "up")
for sn, (bi, bn) in product(spin_names, enumerate(orb_names))}
# Make PomerolED solver object
ed = PomerolED(index_converter, verbose = True)
# Hamiltonian
H = h_int_slater(spin_names, orb_names, U_mat, False)
# Number of particles
N = N_op(spin_names, orb_names, False)
# z-component of spin
Sz = S_op('z', spin_names, orb_names, False)
# z-component of angular momentum
Lz = L_op('z', spin_names, orb_names, off_diag = False, basis = 'spherical')
# Double check that we are actually using integrals of motion
h_comm = lambda op: H*op - op*H
assert h_comm(N).is_zero()
assert h_comm(Sz).is_zero()
assert h_comm(Lz).is_zero()
# Diagonalize H
# Do not split H into blocks (uncomment to generate reference data)
#ed.diagonalize(H, True)
ed.diagonalize(H, [N, Sz, Lz])
# Compute G(i\omega)
G_iw = ed.G_iw(gf_struct, beta, n_iw)
# Compute G(\tau)
G_tau = ed.G_tau(gf_struct, beta, n_tau)
if mpi.is_master_node():
with HDFArchive('slater_gf.out.h5', 'w') as ar:
ar['H'] = H
ar['G_iw'] = G_iw
ar['G_tau'] = G_tau
with HDFArchive("slater_gf.ref.h5", 'r') as ar:
assert (ar['H'] - H).is_zero()
assert_block_gfs_are_close(ar['G_iw'], G_iw)
assert_block_gfs_are_close(ar['G_tau'], G_tau)
| gpl-3.0 |
suninsky/ReceiptOCR | Python/server/bin/pilprint.py | 1 | 2635 | #!/home/haop/code/ReceiptOCR/Python/server/bin/python2.7
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.3/2003-05-05 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -d debug (show available drivers)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| mit |
shenqicang/openmc | tests/test_statepoint_interval/test_statepoint_interval.py | 4 | 3054 | #!/usr/bin/env python
import os
import glob
from subprocess import Popen, STDOUT, PIPE, call
import filecmp
import glob
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--mpi_exec', dest='mpi_exec', default='')
parser.add_option('--mpi_np', dest='mpi_np', default='3')
parser.add_option('--exe', dest='exe')
(opts, args) = parser.parse_args()
cwd = os.getcwd()
def test_run():
if opts.mpi_exec != '':
proc = Popen([opts.mpi_exec, '-np', opts.mpi_np, opts.exe, cwd],
stderr=STDOUT, stdout=PIPE)
else:
proc = Popen([opts.exe, cwd], stderr=STDOUT, stdout=PIPE)
print(proc.communicate()[0])
returncode = proc.returncode
assert returncode == 0, 'OpenMC did not exit successfully.'
def test_statepoints_exist():
statepoint = glob.glob(os.path.join(cwd, 'statepoint.02.*'))
assert len(statepoint) == 1, 'Either multiple or no statepoint.02 files exist.'
assert statepoint[0].endswith('binary') or statepoint[0].endswith('h5'),\
'Statepoint.2 file is not a binary or hdf5 file.'
statepoint = glob.glob(os.path.join(cwd, 'statepoint.04.*'))
assert len(statepoint) == 1, 'Either multiple or no statepoint.04 files exist.'
assert statepoint[0].endswith('binary') or statepoint[0].endswith('h5'),\
'Statepoint.4 file is not a binary or hdf5 file.'
statepoint = glob.glob(os.path.join(cwd, 'statepoint.06.*'))
assert len(statepoint) == 1, 'Either multiple or no statepoint.06 files exist.'
assert statepoint[0].endswith('binary') or statepoint[0].endswith('h5'),\
'Statepoint.6 file is not a binary or hdf5 file.'
statepoint = glob.glob(os.path.join(cwd, 'statepoint.08.*'))
assert len(statepoint) == 1, 'Either multiple or no statepoint.08 files exist.'
assert statepoint[0].endswith('binary') or statepoint[0].endswith('h5'),\
'Statepoint.8 file is not a binary or hdf5 file.'
statepoint = glob.glob(os.path.join(cwd, 'statepoint.10.*'))
assert len(statepoint) == 1, 'Either multiple or no statepoint.10 files exist.'
assert statepoint[0].endswith('binary') or statepoint[0].endswith('h5'),\
'Statepoint.10 file is not a binary or hdf5 file.'
def test_results():
statepoint = glob.glob(os.path.join(cwd, 'statepoint.10.*'))
call(['python', 'results.py', statepoint[0]])
compare = filecmp.cmp('results_test.dat', 'results_true.dat')
if not compare:
os.rename('results_test.dat', 'results_error.dat')
assert compare, 'Results do not agree.'
def teardown():
output = glob.glob(os.path.join(cwd, 'statepoint.*'))
output.append(os.path.join(cwd, 'results_test.dat'))
for f in output:
if os.path.exists(f):
os.remove(f)
if __name__ == '__main__':
# test for openmc executable
if opts.exe is None:
raise Exception('Must specify OpenMC executable from command line with --exe.')
# run tests
try:
test_run()
test_statepoints_exist()
test_results()
finally:
teardown()
| mit |
tudorvio/tempest | tempest/services/network/resources.py | 82 | 5543 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
class AttributeDict(dict):
"""
Provide attribute access (dict.key) to dictionary values.
"""
def __getattr__(self, name):
"""Allow attribute access for all keys in the dict."""
if name in self:
return self[name]
return super(AttributeDict, self).__getattribute__(name)
@six.add_metaclass(abc.ABCMeta)
class DeletableResource(AttributeDict):
"""
Support deletion of neutron resources (networks, subnets) via a
delete() method, as is supported by keystone and nova resources.
"""
def __init__(self, *args, **kwargs):
self.client = kwargs.pop('client', None)
super(DeletableResource, self).__init__(*args, **kwargs)
def __str__(self):
return '<%s id="%s" name="%s">' % (self.__class__.__name__,
self.id, self.name)
@abc.abstractmethod
def delete(self):
return
@abc.abstractmethod
def refresh(self):
return
def __hash__(self):
return hash(self.id)
def wait_for_status(self, status):
if not hasattr(self, 'status'):
return
def helper_get():
self.refresh()
return self
return self.client.wait_for_resource_status(helper_get, status)
class DeletableNetwork(DeletableResource):
def delete(self):
self.client.delete_network(self.id)
class DeletableSubnet(DeletableResource):
def __init__(self, *args, **kwargs):
super(DeletableSubnet, self).__init__(*args, **kwargs)
self._router_ids = set()
def update(self, *args, **kwargs):
result = self.client.update_subnet(self.id,
*args,
**kwargs)
return super(DeletableSubnet, self).update(**result['subnet'])
def add_to_router(self, router_id):
self._router_ids.add(router_id)
self.client.add_router_interface_with_subnet_id(router_id,
subnet_id=self.id)
def delete(self):
for router_id in self._router_ids.copy():
self.client.remove_router_interface_with_subnet_id(
router_id,
subnet_id=self.id)
self._router_ids.remove(router_id)
self.client.delete_subnet(self.id)
class DeletableRouter(DeletableResource):
def set_gateway(self, network_id):
return self.update(external_gateway_info=dict(network_id=network_id))
def unset_gateway(self):
return self.update(external_gateway_info=dict())
def update(self, *args, **kwargs):
result = self.client.update_router(self.id,
*args,
**kwargs)
return super(DeletableRouter, self).update(**result['router'])
def delete(self):
self.unset_gateway()
self.client.delete_router(self.id)
class DeletableFloatingIp(DeletableResource):
def refresh(self, *args, **kwargs):
result = self.client.show_floatingip(self.id,
*args,
**kwargs)
super(DeletableFloatingIp, self).update(**result['floatingip'])
def update(self, *args, **kwargs):
result = self.client.update_floatingip(self.id,
*args,
**kwargs)
super(DeletableFloatingIp, self).update(**result['floatingip'])
def __repr__(self):
return '<%s addr="%s">' % (self.__class__.__name__,
self.floating_ip_address)
def __str__(self):
return '<"FloatingIP" addr="%s" id="%s">' % (self.floating_ip_address,
self.id)
def delete(self):
self.client.delete_floatingip(self.id)
class DeletablePort(DeletableResource):
def delete(self):
self.client.delete_port(self.id)
class DeletableSecurityGroup(DeletableResource):
def delete(self):
self.client.delete_security_group(self.id)
class DeletableSecurityGroupRule(DeletableResource):
def __repr__(self):
return '<%s id="%s">' % (self.__class__.__name__, self.id)
def delete(self):
self.client.delete_security_group_rule(self.id)
class DeletablePool(DeletableResource):
def delete(self):
self.client.delete_pool(self.id)
class DeletableMember(DeletableResource):
def delete(self):
self.client.delete_member(self.id)
class DeletableVip(DeletableResource):
def delete(self):
self.client.delete_vip(self.id)
def refresh(self):
result = self.client.show_vip(self.id)
super(DeletableVip, self).update(**result['vip'])
| apache-2.0 |
shinglyu/ns3-h264-svc | src/applications/.waf-1.5.16-e6d03192b5ddfa5ef2c8d65308e48e42/wafadmin/Tools/msvc.py | 6 | 20977 | #! /usr/bin/env python
# encoding: utf-8
import os,sys,re,string,optparse
import Utils,TaskGen,Runner,Configure,Task,Options
from Logs import debug,info,warn,error
from TaskGen import after,before,feature
from Configure import conftest,conf
import ccroot,cc,cxx,ar,winres
from libtool import read_la_file
import _winreg
pproc=Utils.pproc
g_msvc_systemlibs="""
aclui activeds ad1 adptif adsiid advapi32 asycfilt authz bhsupp bits bufferoverflowu cabinet
cap certadm certidl ciuuid clusapi comctl32 comdlg32 comsupp comsuppd comsuppw comsuppwd comsvcs
credui crypt32 cryptnet cryptui d3d8thk daouuid dbgeng dbghelp dciman32 ddao35 ddao35d
ddao35u ddao35ud delayimp dhcpcsvc dhcpsapi dlcapi dnsapi dsprop dsuiext dtchelp
faultrep fcachdll fci fdi framedyd framedyn gdi32 gdiplus glauxglu32 gpedit gpmuuid
gtrts32w gtrtst32hlink htmlhelp httpapi icm32 icmui imagehlp imm32 iphlpapi iprop
kernel32 ksguid ksproxy ksuser libcmt libcmtd libcpmt libcpmtd loadperf lz32 mapi
mapi32 mgmtapi minidump mmc mobsync mpr mprapi mqoa mqrt msacm32 mscms mscoree
msdasc msimg32 msrating mstask msvcmrt msvcurt msvcurtd mswsock msxml2 mtx mtxdm
netapi32 nmapinmsupp npptools ntdsapi ntdsbcli ntmsapi ntquery odbc32 odbcbcp
odbccp32 oldnames ole32 oleacc oleaut32 oledb oledlgolepro32 opends60 opengl32
osptk parser pdh penter pgobootrun pgort powrprof psapi ptrustm ptrustmd ptrustu
ptrustud qosname rasapi32 rasdlg rassapi resutils riched20 rpcndr rpcns4 rpcrt4 rtm
rtutils runtmchk scarddlg scrnsave scrnsavw secur32 sensapi setupapi sfc shell32
shfolder shlwapi sisbkup snmpapi sporder srclient sti strsafe svcguid tapi32 thunk32
traffic unicows url urlmon user32 userenv usp10 uuid uxtheme vcomp vcompd vdmdbg
version vfw32 wbemuuid webpost wiaguid wininet winmm winscard winspool winstrm
wintrust wldap32 wmiutils wow32 ws2_32 wsnmp32 wsock32 wst wtsapi32 xaswitch xolehlp
""".split()
all_msvc_platforms=[('x64','amd64'),('x86','x86'),('ia64','ia64'),('x86_amd64','amd64'),('x86_ia64','ia64')]
all_wince_platforms=[('armv4','arm'),('armv4i','arm'),('mipsii','mips'),('mipsii_fp','mips'),('mipsiv','mips'),('mipsiv_fp','mips'),('sh4','sh'),('x86','cex86')]
all_icl_platforms=[('intel64','amd64'),('em64t','amd64'),('ia32','x86'),('Itanium','ia64')]
def setup_msvc(conf,versions):
platforms=Utils.to_list(conf.env['MSVC_TARGETS'])or[i for i,j in all_msvc_platforms+all_icl_platforms+all_wince_platforms]
desired_versions=conf.env['MSVC_VERSIONS']or[v for v,_ in versions][::-1]
versiondict=dict(versions)
for version in desired_versions:
try:
targets=dict(versiondict[version])
for target in platforms:
try:
arch,(p1,p2,p3)=targets[target]
compiler,revision=version.split()
return compiler,revision,p1,p2,p3
except KeyError:continue
except KeyError:continue
conf.fatal('msvc: Impossible to find a valid architecture for building (in setup_msvc)')
def get_msvc_version(conf,compiler,version,target,vcvars):
debug('msvc: get_msvc_version: %r %r %r',compiler,version,target)
batfile=os.path.join(conf.blddir,'waf-print-msvc.bat')
f=open(batfile,'w')
f.write("""@echo off
set INCLUDE=
set LIB=
call "%s" %s
echo PATH=%%PATH%%
echo INCLUDE=%%INCLUDE%%
echo LIB=%%LIB%%
"""%(vcvars,target))
f.close()
sout=Utils.cmd_output(['cmd','/E:on','/V:on','/C',batfile])
lines=sout.splitlines()
for x in('Setting environment','Setting SDK environment','Intel(R) C++ Compiler'):
if lines[0].find(x)!=-1:
break
else:
debug('msvc: get_msvc_version: %r %r %r -> not found',compiler,version,target)
conf.fatal('msvc: Impossible to find a valid architecture for building (in get_msvc_version)')
for line in lines[1:]:
if line.startswith('PATH='):
path=line[5:]
MSVC_PATH=path.split(';')
elif line.startswith('INCLUDE='):
MSVC_INCDIR=[i for i in line[8:].split(';')if i]
elif line.startswith('LIB='):
MSVC_LIBDIR=[i for i in line[4:].split(';')if i]
env={}
env.update(os.environ)
env.update(PATH=path)
compiler_name,linker_name,lib_name=_get_prog_names(conf,compiler)
cxx=conf.find_program(compiler_name,path_list=MSVC_PATH)
if env.has_key('CL'):
del(env['CL'])
try:
p=pproc.Popen([cxx,'/help'],env=env,stdout=pproc.PIPE,stderr=pproc.PIPE)
out,err=p.communicate()
if p.returncode!=0:
raise Exception('return code: %r: %r'%(p.returncode,err))
except Exception,e:
debug('msvc: get_msvc_version: %r %r %r -> failure',compiler,version,target)
debug(str(e))
conf.fatal('msvc: cannot run the compiler (in get_msvc_version)')
else:
debug('msvc: get_msvc_version: %r %r %r -> OK',compiler,version,target)
return(MSVC_PATH,MSVC_INCDIR,MSVC_LIBDIR)
def gather_wsdk_versions(conf,versions):
version_pattern=re.compile('^v..?.?\...?.?')
try:
all_versions=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
try:
all_versions=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,'SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
return
index=0
while 1:
try:
version=_winreg.EnumKey(all_versions,index)
except WindowsError:
break
index=index+1
if not version_pattern.match(version):
continue
try:
msvc_version=_winreg.OpenKey(all_versions,version)
path,type=_winreg.QueryValueEx(msvc_version,'InstallationFolder')
except WindowsError:
continue
if os.path.isfile(os.path.join(path,'bin','SetEnv.cmd')):
targets=[]
for target,arch in all_msvc_platforms:
try:
targets.append((target,(arch,conf.get_msvc_version('wsdk',version,'/'+target,os.path.join(path,'bin','SetEnv.cmd')))))
except Configure.ConfigurationError:
pass
versions.append(('wsdk '+version[1:],targets))
def gather_msvc_versions(conf,versions):
try:
ce_sdk=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,'SOFTWARE\\Wow6432node\\Microsoft\\Windows CE Tools\\SDKs')
except WindowsError:
try:
ce_sdk=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,'SOFTWARE\\Microsoft\\Windows CE Tools\\SDKs')
except WindowsError:
ce_sdk=''
if ce_sdk:
supported_wince_platforms=[]
ce_index=0
while 1:
try:
sdk_device=_winreg.EnumKey(ce_sdk,ce_index)
except WindowsError:
break
ce_index=ce_index+1
sdk=_winreg.OpenKey(ce_sdk,sdk_device)
path,type=_winreg.QueryValueEx(sdk,'SDKRootDir')
path=str(path)
path,device=os.path.split(path)
if not device:
path,device=os.path.split(path)
for arch,compiler in all_wince_platforms:
platforms=[]
if os.path.isdir(os.path.join(path,device,'Lib',arch)):
platforms.append((arch,compiler,os.path.join(path,device,'Include',arch),os.path.join(path,device,'Lib',arch)))
if platforms:
supported_wince_platforms.append((device,platforms))
version_pattern=re.compile('^..?\...?')
for vcver,vcvar in[('VCExpress','exp'),('VisualStudio','')]:
try:
all_versions=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,'SOFTWARE\\Wow6432node\\Microsoft\\'+vcver)
except WindowsError:
try:
all_versions=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,'SOFTWARE\\Microsoft\\'+vcver)
except WindowsError:
continue
index=0
while 1:
try:
version=_winreg.EnumKey(all_versions,index)
except WindowsError:
break
index=index+1
if not version_pattern.match(version):
continue
try:
msvc_version=_winreg.OpenKey(all_versions,version+"\\Setup\\VS")
path,type=_winreg.QueryValueEx(msvc_version,'ProductDir')
path=str(path)
targets=[]
if ce_sdk:
for device,platforms in supported_wince_platforms:
cetargets=[]
for platform,compiler,include,lib in platforms:
winCEpath=os.path.join(path,'VC','ce')
if os.path.isdir(winCEpath):
common_bindirs,_1,_2=conf.get_msvc_version('msvc',version,'x86',os.path.join(path,'Common7','Tools','vsvars32.bat'))
if os.path.isdir(os.path.join(winCEpath,'lib',platform)):
bindirs=[os.path.join(winCEpath,'bin',compiler),os.path.join(winCEpath,'bin','x86_'+compiler)]+common_bindirs
incdirs=[include,os.path.join(winCEpath,'include'),os.path.join(winCEpath,'atlmfc','include')]
libdirs=[lib,os.path.join(winCEpath,'lib',platform),os.path.join(winCEpath,'atlmfc','lib',platform)]
cetargets.append((platform,(platform,(bindirs,incdirs,libdirs))))
versions.append((device+' '+version,cetargets))
if os.path.isfile(os.path.join(path,'VC','vcvarsall.bat')):
for target,realtarget in all_msvc_platforms[::-1]:
try:
targets.append((target,(realtarget,conf.get_msvc_version('msvc',version,target,os.path.join(path,'VC','vcvarsall.bat')))))
except:
pass
elif os.path.isfile(os.path.join(path,'Common7','Tools','vsvars32.bat')):
try:
targets.append(('x86',('x86',conf.get_msvc_version('msvc',version,'x86',os.path.join(path,'Common7','Tools','vsvars32.bat')))))
except Configure.ConfigurationError:
pass
versions.append(('msvc '+version,targets))
except WindowsError:
continue
def gather_icl_versions(conf,versions):
version_pattern=re.compile('^...?.?\....?.?')
try:
all_versions=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,'SOFTWARE\\Wow6432node\\Intel\\Compilers\\C++')
except WindowsError:
try:
all_versions=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,'SOFTWARE\\Intel\\Compilers\\C++')
except WindowsError:
return
index=0
while 1:
try:
version=_winreg.EnumKey(all_versions,index)
except WindowsError:
break
index=index+1
if not version_pattern.match(version):
continue
targets=[]
for target,arch in all_icl_platforms:
try:
icl_version=_winreg.OpenKey(all_versions,version+'\\'+target)
path,type=_winreg.QueryValueEx(icl_version,'ProductDir')
if os.path.isfile(os.path.join(path,'bin','iclvars.bat')):
try:
targets.append((target,(arch,conf.get_msvc_version('intel',version,target,os.path.join(path,'bin','iclvars.bat')))))
except Configure.ConfigurationError:
pass
except WindowsError:
continue
major=version[0:2]
versions.append(('intel '+major,targets))
def get_msvc_versions(conf):
if not conf.env.MSVC_INSTALLED_VERSIONS:
lst=[]
conf.gather_msvc_versions(lst)
conf.gather_wsdk_versions(lst)
conf.gather_icl_versions(lst)
conf.env.MSVC_INSTALLED_VERSIONS=lst
return conf.env.MSVC_INSTALLED_VERSIONS
def print_all_msvc_detected(conf):
for version,targets in conf.env['MSVC_INSTALLED_VERSIONS']:
info(version)
for target,l in targets:
info("\t"+target)
def detect_msvc(conf):
versions=get_msvc_versions(conf)
return setup_msvc(conf,versions)
def find_lt_names_msvc(self,libname,is_static=False):
lt_names=['lib%s.la'%libname,'%s.la'%libname,]
for path in self.env['LIBPATH']:
for la in lt_names:
laf=os.path.join(path,la)
dll=None
if os.path.exists(laf):
ltdict=read_la_file(laf)
lt_libdir=None
if ltdict.get('libdir',''):
lt_libdir=ltdict['libdir']
if not is_static and ltdict.get('library_names',''):
dllnames=ltdict['library_names'].split()
dll=dllnames[0].lower()
dll=re.sub('\.dll$','',dll)
return(lt_libdir,dll,False)
elif ltdict.get('old_library',''):
olib=ltdict['old_library']
if os.path.exists(os.path.join(path,olib)):
return(path,olib,True)
elif lt_libdir!=''and os.path.exists(os.path.join(lt_libdir,olib)):
return(lt_libdir,olib,True)
else:
return(None,olib,True)
else:
raise Utils.WafError('invalid libtool object file: %s'%laf)
return(None,None,None)
def libname_msvc(self,libname,is_static=False,mandatory=False):
lib=libname.lower()
lib=re.sub('\.lib$','',lib)
if lib in g_msvc_systemlibs:
return lib
lib=re.sub('^lib','',lib)
if lib=='m':
return None
(lt_path,lt_libname,lt_static)=self.find_lt_names_msvc(lib,is_static)
if lt_path!=None and lt_libname!=None:
if lt_static==True:
return os.path.join(lt_path,lt_libname)
if lt_path!=None:
_libpaths=[lt_path]+self.env['LIBPATH']
else:
_libpaths=self.env['LIBPATH']
static_libs=['lib%ss.lib'%lib,'lib%s.lib'%lib,'%ss.lib'%lib,'%s.lib'%lib,]
dynamic_libs=['lib%s.dll.lib'%lib,'lib%s.dll.a'%lib,'%s.dll.lib'%lib,'%s.dll.a'%lib,'lib%s_d.lib'%lib,'%s_d.lib'%lib,'%s.lib'%lib,]
libnames=static_libs
if not is_static:
libnames=dynamic_libs+static_libs
for path in _libpaths:
for libn in libnames:
if os.path.exists(os.path.join(path,libn)):
debug('msvc: lib found: %s',os.path.join(path,libn))
return re.sub('\.lib$','',libn)
if mandatory:
self.fatal("The library %r could not be found"%libname)
return re.sub('\.lib$','',libname)
def check_lib_msvc(self,libname,is_static=False,uselib_store=None,mandatory=False):
libn=self.libname_msvc(libname,is_static,mandatory)
if not uselib_store:
uselib_store=libname.upper()
if False and is_static:
self.env['STATICLIB_'+uselib_store]=[libn]
else:
self.env['LIB_'+uselib_store]=[libn]
def check_libs_msvc(self,libnames,is_static=False,mandatory=False):
for libname in Utils.to_list(libnames):
self.check_lib_msvc(libname,is_static,mandatory=mandatory)
def no_autodetect(conf):
conf.eval_rules(detect.replace('autodetect',''))
detect='''
autodetect
find_msvc
msvc_common_flags
cc_load_tools
cxx_load_tools
cc_add_flags
cxx_add_flags
link_add_flags
'''
def autodetect(conf):
v=conf.env
compiler,version,path,includes,libdirs=detect_msvc(conf)
v['PATH']=path
v['CPPPATH']=includes
v['LIBPATH']=libdirs
v['MSVC_COMPILER']=compiler
def _get_prog_names(conf,compiler):
if compiler=='intel':
compiler_name='ICL'
linker_name='XILINK'
lib_name='XILIB'
else:
compiler_name='CL'
linker_name='LINK'
lib_name='LIB'
return compiler_name,linker_name,lib_name
def find_msvc(conf):
if sys.platform!='win32':
conf.fatal('MSVC module only works under native Win32 Python! cygwin is not supported yet')
v=conf.env
compiler,version,path,includes,libdirs=detect_msvc(conf)
compiler_name,linker_name,lib_name=_get_prog_names(conf,compiler)
has_msvc_manifest=(compiler=='msvc'and float(version)>=8)or(compiler=='wsdk'and float(version)>=6)or(compiler=='intel'and float(version)>=11)
cxx=None
if v.CXX:cxx=v.CXX
elif'CXX'in conf.environ:cxx=conf.environ['CXX']
if not cxx:cxx=conf.find_program(compiler_name,var='CXX',path_list=path,mandatory=True)
cxx=conf.cmd_to_list(cxx)
env=dict(conf.environ)
env.update(PATH=';'.join(path))
if not Utils.cmd_output([cxx,'/nologo','/?'],silent=True,env=env):
conf.fatal('the msvc compiler could not be identified')
link=v.LINK_CXX
if not link:
link=conf.find_program(linker_name,path_list=path,mandatory=True)
ar=v.AR
if not ar:
ar=conf.find_program(lib_name,path_list=path,mandatory=True)
mt=v.MT
if has_msvc_manifest:
mt=conf.find_program('MT',path_list=path,mandatory=True)
v.MSVC_MANIFEST=has_msvc_manifest
v.PATH=path
v.CPPPATH=includes
v.LIBPATH=libdirs
v.CC=v.CXX=cxx
v.CC_NAME=v.CXX_NAME='msvc'
v.LINK=v.LINK_CXX=link
if not v.LINK_CC:
v.LINK_CC=v.LINK_CXX
v.AR=ar
v.MT=mt
v.MTFLAGS=v.ARFLAGS=['/NOLOGO']
conf.check_tool('winres')
if not conf.env.WINRC:
warn('Resource compiler not found. Compiling resource file is disabled')
try:v.prepend_value('CPPPATH',conf.environ['INCLUDE'])
except KeyError:pass
try:v.prepend_value('LIBPATH',conf.environ['LIB'])
except KeyError:pass
def msvc_common_flags(conf):
v=conf.env
v['CPPFLAGS']=['/W3','/nologo']
v['CCDEFINES_ST']='/D%s'
v['CXXDEFINES_ST']='/D%s'
v['CCDEFINES']=['WIN32']
v['CXXDEFINES']=['WIN32']
v['_CCINCFLAGS']=[]
v['_CCDEFFLAGS']=[]
v['_CXXINCFLAGS']=[]
v['_CXXDEFFLAGS']=[]
v['CC_SRC_F']=''
v['CC_TGT_F']=['/c','/Fo']
v['CXX_SRC_F']=''
v['CXX_TGT_F']=['/c','/Fo']
v['CPPPATH_ST']='/I%s'
v['AR_TGT_F']=v['CCLNK_TGT_F']=v['CXXLNK_TGT_F']='/OUT:'
v['CPPFLAGS_CONSOLE']=['/SUBSYSTEM:CONSOLE']
v['CPPFLAGS_NATIVE']=['/SUBSYSTEM:NATIVE']
v['CPPFLAGS_POSIX']=['/SUBSYSTEM:POSIX']
v['CPPFLAGS_WINDOWS']=['/SUBSYSTEM:WINDOWS']
v['CPPFLAGS_WINDOWSCE']=['/SUBSYSTEM:WINDOWSCE']
v['CPPFLAGS_CRT_MULTITHREADED']=['/MT']
v['CPPFLAGS_CRT_MULTITHREADED_DLL']=['/MD']
v['CPPDEFINES_CRT_MULTITHREADED']=['_MT']
v['CPPDEFINES_CRT_MULTITHREADED_DLL']=['_MT','_DLL']
v['CPPFLAGS_CRT_MULTITHREADED_DBG']=['/MTd']
v['CPPFLAGS_CRT_MULTITHREADED_DLL_DBG']=['/MDd']
v['CPPDEFINES_CRT_MULTITHREADED_DBG']=['_DEBUG','_MT']
v['CPPDEFINES_CRT_MULTITHREADED_DLL_DBG']=['_DEBUG','_MT','_DLL']
v['CCFLAGS']=['/TC']
v['CCFLAGS_OPTIMIZED']=['/O2','/DNDEBUG']
v['CCFLAGS_RELEASE']=['/O2','/DNDEBUG']
v['CCFLAGS_DEBUG']=['/Od','/RTC1','/ZI']
v['CCFLAGS_ULTRADEBUG']=['/Od','/RTC1','/ZI']
v['CXXFLAGS']=['/TP','/EHsc']
v['CXXFLAGS_OPTIMIZED']=['/O2','/DNDEBUG']
v['CXXFLAGS_RELEASE']=['/O2','/DNDEBUG']
v['CXXFLAGS_DEBUG']=['/Od','/RTC1','/ZI']
v['CXXFLAGS_ULTRADEBUG']=['/Od','/RTC1','/ZI']
v['LIB']=[]
v['LIB_ST']='%s.lib'
v['LIBPATH_ST']='/LIBPATH:%s'
v['STATICLIB_ST']='lib%s.lib'
v['STATICLIBPATH_ST']='/LIBPATH:%s'
v['LINKFLAGS']=['/NOLOGO','/MANIFEST']
v['LINKFLAGS_DEBUG']=['/DEBUG']
v['LINKFLAGS_ULTRADEBUG']=['/DEBUG']
v['shlib_CCFLAGS']=['']
v['shlib_CXXFLAGS']=['']
v['shlib_LINKFLAGS']=['/DLL']
v['shlib_PATTERN']='%s.dll'
v['implib_PATTERN']='%s.lib'
v['IMPLIB_ST']='/IMPLIB:%s'
v['staticlib_LINKFLAGS']=['']
v['staticlib_PATTERN']='lib%s.lib'
v['program_PATTERN']='%s.exe'
def apply_flags_msvc(self):
if self.env.CC_NAME!='msvc'or not self.link_task:
return
subsystem=getattr(self,'subsystem','')
if subsystem:
subsystem='/subsystem:%s'%subsystem
flags='cstaticlib'in self.features and'ARFLAGS'or'LINKFLAGS'
self.env.append_value(flags,subsystem)
if getattr(self,'link_task',None)and not'cstaticlib'in self.features:
for f in self.env.LINKFLAGS:
d=f.lower()
if d[1:]=='debug':
pdbnode=self.link_task.outputs[0].change_ext('.pdb')
pdbfile=pdbnode.bldpath(self.env)
self.link_task.outputs.append(pdbnode)
self.bld.install_files(self.install_path,[pdbnode],env=self.env)
break
def apply_obj_vars_msvc(self):
if self.env['CC_NAME']!='msvc':
return
try:
self.meths.remove('apply_obj_vars')
except ValueError:
pass
libpaths=getattr(self,'libpaths',[])
if not libpaths:self.libpaths=libpaths
env=self.env
app=env.append_unique
cpppath_st=env['CPPPATH_ST']
lib_st=env['LIB_ST']
staticlib_st=env['STATICLIB_ST']
libpath_st=env['LIBPATH_ST']
staticlibpath_st=env['STATICLIBPATH_ST']
for i in env['LIBPATH']:
app('LINKFLAGS',libpath_st%i)
if not libpaths.count(i):
libpaths.append(i)
for i in env['LIBPATH']:
app('LINKFLAGS',staticlibpath_st%i)
if not libpaths.count(i):
libpaths.append(i)
if not env['FULLSTATIC']:
if env['STATICLIB']or env['LIB']:
app('LINKFLAGS',env['SHLIB_MARKER'])
for i in env['STATICLIB']:
app('LINKFLAGS',staticlib_st%i)
for i in env['LIB']:
app('LINKFLAGS',lib_st%i)
def apply_manifest(self):
if self.env.CC_NAME=='msvc'and self.env.MSVC_MANIFEST:
out_node=self.link_task.outputs[0]
man_node=out_node.parent.find_or_declare(out_node.name+'.manifest')
self.link_task.outputs.append(man_node)
self.link_task.do_manifest=True
def exec_mf(self):
env=self.env
mtool=env['MT']
if not mtool:
return 0
self.do_manifest=False
outfile=self.outputs[0].bldpath(env)
manifest=None
for out_node in self.outputs:
if out_node.name.endswith('.manifest'):
manifest=out_node.bldpath(env)
break
if manifest is None:
return 0
mode=''
if'cprogram'in self.generator.features:
mode='1'
elif'cshlib'in self.generator.features:
mode='2'
debug('msvc: embedding manifest')
lst=[]
lst.extend([env['MT']])
lst.extend(Utils.to_list(env['MTFLAGS']))
lst.extend(Utils.to_list("-manifest"))
lst.extend(Utils.to_list(manifest))
lst.extend(Utils.to_list("-outputresource:%s;%s"%(outfile,mode)))
lst=[lst]
return self.exec_command(*lst)
def exec_command_msvc(self,*k,**kw):
if self.env['CC_NAME']=='msvc':
if isinstance(k[0],list):
lst=[]
carry=''
for a in k[0]:
if len(a)==3 and a.startswith('/F')or a=='/doc'or a[-1]==':':
carry=a
else:
lst.append(carry+a)
carry=''
k=[lst]
env=dict(os.environ)
env.update(PATH=';'.join(self.env['PATH']))
kw['env']=env
ret=self.generator.bld.exec_command(*k,**kw)
if ret:return ret
if getattr(self,'do_manifest',None):
ret=exec_mf(self)
return ret
for k in'cc cxx winrc cc_link cxx_link static_link qxx'.split():
cls=Task.TaskBase.classes.get(k,None)
if cls:
cls.exec_command=exec_command_msvc
conf(get_msvc_version)
conf(gather_wsdk_versions)
conf(gather_msvc_versions)
conf(gather_icl_versions)
conf(get_msvc_versions)
conf(print_all_msvc_detected)
conf(find_lt_names_msvc)
conf(libname_msvc)
conf(check_lib_msvc)
conf(check_libs_msvc)
conftest(no_autodetect)
conftest(autodetect)
conftest(find_msvc)
conftest(msvc_common_flags)
after('apply_link')(apply_flags_msvc)
feature('cc','cxx')(apply_flags_msvc)
feature('cprogram','cshlib','cstaticlib')(apply_obj_vars_msvc)
after('apply_lib_vars')(apply_obj_vars_msvc)
before('apply_obj_vars')(apply_obj_vars_msvc)
feature('cprogram','cshlib')(apply_manifest)
after('apply_link')(apply_manifest)
| gpl-2.0 |
jcasner/nupic | src/nupic/data/record_stream.py | 39 | 15221 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Interface for different types of storages (file, hbase, rio, etc)."""
from abc import ABCMeta, abstractmethod
import datetime
from nupic.data.fieldmeta import FieldMetaSpecial
def _getFieldIndexBySpecial(fields, special):
""" Return index of the field matching the field meta special value.
:param fields: sequence of nupic.data.fieldmeta.FieldMetaInfo objects
representing the fields of a stream
:param special: one of the special field attribute values from
nupic.data.fieldmeta.FieldMetaSpecial
:returns: first zero-based index of the field tagged with the target field
meta special attribute; None if no such field
"""
for i, field in enumerate(fields):
if field.special == special:
return i
return None
class ModelRecordEncoder(object):
"""Encodes metric data input rows for consumption by OPF models. See
the `ModelRecordEncoder.encode` method for more details.
"""
def __init__(self, fields, aggregationPeriod=None):
"""
:param fields: non-empty sequence of nupic.data.fieldmeta.FieldMetaInfo
objects corresponding to fields in input rows.
:param dict aggregationPeriod: aggregation period of the record stream as a
dict containing 'months' and 'seconds'. The months is always an integer
and seconds is a floating point. Only one is allowed to be non-zero at a
time. If there is no aggregation associated with the stream, pass None.
Typically, a raw file or hbase stream will NOT have any aggregation info,
but subclasses of RecordStreamIface, like StreamReader, will and will
provide the aggregation period. This is used by the encode method to
assign a record number to a record given its timestamp and the aggregation
interval.
"""
if not fields:
raise ValueError('fields arg must be non-empty, but got %r' % (fields,))
self._fields = fields
self._aggregationPeriod = aggregationPeriod
self._sequenceId = -1
self._fieldNames = tuple(f.name for f in fields)
self._categoryFieldIndex = _getFieldIndexBySpecial(
fields,
FieldMetaSpecial.category)
self._resetFieldIndex = _getFieldIndexBySpecial(
fields,
FieldMetaSpecial.reset)
self._sequenceFieldIndex = _getFieldIndexBySpecial(
fields,
FieldMetaSpecial.sequence)
self._timestampFieldIndex = _getFieldIndexBySpecial(
fields,
FieldMetaSpecial.timestamp)
self._learningFieldIndex = _getFieldIndexBySpecial(
fields,
FieldMetaSpecial.learning)
def rewind(self):
"""Put us back at the beginning of the file again """
self._sequenceId = -1
def encode(self, inputRow):
"""Encodes the given input row as a dict, with the
keys being the field names. This also adds in some meta fields:
'_category': The value from the category field (if any)
'_reset': True if the reset field was True (if any)
'_sequenceId': the value from the sequenceId field (if any)
:param inputRow: sequence of values corresponding to a single input metric
data row
:rtype: dict
"""
# Create the return dict
result = dict(zip(self._fieldNames, inputRow))
# Add in the special fields
if self._categoryFieldIndex is not None:
# category value can be an int or a list
if isinstance(inputRow[self._categoryFieldIndex], int):
result['_category'] = [inputRow[self._categoryFieldIndex]]
else:
result['_category'] = (inputRow[self._categoryFieldIndex]
if inputRow[self._categoryFieldIndex]
else [None])
else:
result['_category'] = [None]
if self._resetFieldIndex is not None:
result['_reset'] = int(bool(inputRow[self._resetFieldIndex]))
else:
result['_reset'] = 0
if self._learningFieldIndex is not None:
result['_learning'] = int(bool(inputRow[self._learningFieldIndex]))
result['_timestampRecordIdx'] = None
if self._timestampFieldIndex is not None:
result['_timestamp'] = inputRow[self._timestampFieldIndex]
# Compute the record index based on timestamp
result['_timestampRecordIdx'] = self._computeTimestampRecordIdx(
inputRow[self._timestampFieldIndex])
else:
result['_timestamp'] = None
# -----------------------------------------------------------------------
# Figure out the sequence ID
hasReset = self._resetFieldIndex is not None
hasSequenceId = self._sequenceFieldIndex is not None
if hasReset and not hasSequenceId:
# Reset only
if result['_reset']:
self._sequenceId += 1
sequenceId = self._sequenceId
elif not hasReset and hasSequenceId:
sequenceId = inputRow[self._sequenceFieldIndex]
result['_reset'] = int(sequenceId != self._sequenceId)
self._sequenceId = sequenceId
elif hasReset and hasSequenceId:
sequenceId = inputRow[self._sequenceFieldIndex]
else:
sequenceId = 0
if sequenceId is not None:
result['_sequenceId'] = hash(sequenceId)
else:
result['_sequenceId'] = None
return result
def _computeTimestampRecordIdx(self, recordTS):
""" Give the timestamp of a record (a datetime object), compute the record's
timestamp index - this is the timestamp divided by the aggregation period.
Parameters:
------------------------------------------------------------------------
recordTS: datetime instance
retval: record timestamp index, or None if no aggregation period
"""
if self._aggregationPeriod is None:
return None
# Base record index on number of elapsed months if aggregation is in
# months
if self._aggregationPeriod['months'] > 0:
assert self._aggregationPeriod['seconds'] == 0
result = int(
(recordTS.year * 12 + (recordTS.month-1)) /
self._aggregationPeriod['months'])
# Base record index on elapsed seconds
elif self._aggregationPeriod['seconds'] > 0:
delta = recordTS - datetime.datetime(year=1, month=1, day=1)
deltaSecs = delta.days * 24 * 60 * 60 \
+ delta.seconds \
+ delta.microseconds / 1000000.0
result = int(deltaSecs / self._aggregationPeriod['seconds'])
else:
result = None
return result
class RecordStreamIface(object):
"""This is the interface for the record input/output storage classes."""
__metaclass__ = ABCMeta
def __init__(self):
# Will be initialized on-demand in getNextRecordDict with a
# ModelRecordEncoder instance, once encoding metadata is available
self._modelRecordEncoder = None
@abstractmethod
def close(self):
""" Close the stream
"""
def rewind(self):
"""Put us back at the beginning of the file again) """
if self._modelRecordEncoder is not None:
self._modelRecordEncoder.rewind()
@abstractmethod
def getNextRecord(self, useCache=True):
"""Returns next available data record from the storage. If useCache is
False, then don't read ahead and don't cache any records.
Raises nupic.support.exceptions.StreamDisappearedError if stream
disappears (e.g., gets garbage-collected).
retval: a data row (a list or tuple) if available; None, if no more records
in the table (End of Stream - EOS); empty sequence (list or tuple)
when timing out while waiting for the next record.
"""
def getNextRecordDict(self):
"""Returns next available data record from the storage as a dict, with the
keys being the field names. This also adds in some meta fields:
'_category': The value from the category field (if any)
'_reset': True if the reset field was True (if any)
'_sequenceId': the value from the sequenceId field (if any)
"""
values = self.getNextRecord()
if values is None:
return None
if not values:
return dict()
if self._modelRecordEncoder is None:
self._modelRecordEncoder = ModelRecordEncoder(
fields=self.getFields(),
aggregationPeriod=self.getAggregationMonthsAndSeconds())
return self._modelRecordEncoder.encode(values)
def getAggregationMonthsAndSeconds(self):
""" Returns the aggregation period of the record stream as a dict
containing 'months' and 'seconds'. The months is always an integer and
seconds is a floating point. Only one is allowed to be non-zero.
If there is no aggregation associated with the stream, returns None.
Typically, a raw file or hbase stream will NOT have any aggregation info,
but subclasses of RecordStreamIFace, like StreamReader, will and will
return the aggregation period from this call. This call is used by the
getNextRecordDict() method to assign a record number to a record given
its timestamp and the aggregation interval
Parameters:
------------------------------------------------------------------------
retval: aggregationPeriod (as a dict) or None
'months': number of months in aggregation period
'seconds': number of seconds in aggregation period (as a float)
"""
return None
@abstractmethod
def getRecordsRange(self, bookmark=None, range=None):
"""Returns a range of records, starting from the bookmark. If 'bookmark'
is None, then records read from the first available. If 'range' is
None, all available records will be returned (caution: this could be
a lot of records and require a lot of memory).
"""
@abstractmethod
def getNextRecordIdx(self):
"""Returns the index of the record that will be read next from
getNextRecord()
"""
@abstractmethod
def getLastRecords(self, numRecords):
"""Returns a tuple (successCode, recordsArray), where
successCode - if the stream had enough records to return, True/False
recordsArray - an array of last numRecords records available when
the call was made. Records appended while in the
getLastRecords will be not returned until the next
call to either getNextRecord() or getLastRecords()
"""
@abstractmethod
def removeOldData(self):
"""Deletes all rows from the table if any data was found."""
@abstractmethod
def appendRecord(self, record, inputRef=None):
"""Saves the record in the underlying storage."""
@abstractmethod
def appendRecords(self, records, inputRef=None, progressCB=None):
"""Saves multiple records in the underlying storage."""
@abstractmethod
def getBookmark(self):
"""Returns an anchor to the current position in the data. Passing this
anchor to the constructor makes the current position to be the first
returned record. If record is no longer in the storage, the first available
after it will be returned.
"""
@abstractmethod
def recordsExistAfter(self, bookmark):
"""Returns True iff there are records left after the bookmark."""
@abstractmethod
def seekFromEnd(self, numRecords):
"""Returns a bookmark numRecords from the end of the stream."""
@abstractmethod
def getStats(self):
"""Returns storage stats (like min and max values of the fields)."""
def getFieldMin(self, fieldName):
""" Returns current minimum value for the field 'fieldName'.
If underlying implementation does not support min/max stats collection,
or if a field type does not support min/max (non scalars), the return
value will be None.
"""
stats = self.getStats()
if stats == None:
return None
minValues = stats.get('min', None)
if minValues == None:
return None
index = self.getFieldNames().index(fieldName)
return minValues[index]
def getFieldMax(self, fieldName):
""" Returns current maximum value for the field 'fieldName'.
If underlying implementation does not support min/max stats collection,
or if a field type does not support min/max (non scalars), the return
value will be None.
"""
stats = self.getStats()
if stats == None:
return None
maxValues = stats.get('max', None)
if maxValues == None:
return None
index = self.getFieldNames().index(fieldName)
return maxValues[index]
@abstractmethod
def clearStats(self):
"""Resets stats collected so far."""
@abstractmethod
def getError(self):
"""Returns errors saved in the storage."""
@abstractmethod
def setError(self, error):
"""Saves specified error in the storage."""
@abstractmethod
def isCompleted(self):
"""Returns True if all records are already in the storage or False
if more records is expected.
"""
@abstractmethod
def setCompleted(self, completed):
"""Marks the stream completed (True or False)."""
@abstractmethod
def getFieldNames(self):
"""Returns an array of field names associated with the data."""
@abstractmethod
def getFields(self):
"""Returns a sequence of nupic.data.fieldmeta.FieldMetaInfo
name/type/special tuples for each field in the stream. Might be None, if
that information is provided externally (thru stream def, for example).
"""
def getResetFieldIdx(self):
"""
:returns: index of the 'reset' field; None if no such field. """
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.reset)
def getTimestampFieldIdx(self):
""" Return index of the 'timestamp' field. """
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.timestamp)
def getSequenceIdFieldIdx(self):
""" Return index of the 'sequenceId' field. """
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.sequence)
def getCategoryFieldIdx(self):
""" Return index of the 'category' field. """
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.category)
def getLearningFieldIdx(self):
""" Return index of the 'learning' field. """
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.learning)
@abstractmethod
def setTimeout(self, timeout):
""" Set the read timeout in seconds (int or floating point) """
@abstractmethod
def flush(self):
""" Flush the file to disk """
| agpl-3.0 |
nekulin/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/distutils/tests/test_install_scripts.py | 152 | 2555 | """Tests for distutils.command.install_scripts."""
import os
import unittest
from distutils.command.install_scripts import install_scripts
from distutils.core import Distribution
from distutils.tests import support
class InstallScriptsTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_default_settings(self):
dist = Distribution()
dist.command_obj["build"] = support.DummyCommand(
build_scripts="/foo/bar")
dist.command_obj["install"] = support.DummyCommand(
install_scripts="/splat/funk",
force=1,
skip_build=1,
)
cmd = install_scripts(dist)
self.assert_(not cmd.force)
self.assert_(not cmd.skip_build)
self.assert_(cmd.build_dir is None)
self.assert_(cmd.install_dir is None)
cmd.finalize_options()
self.assert_(cmd.force)
self.assert_(cmd.skip_build)
self.assertEqual(cmd.build_dir, "/foo/bar")
self.assertEqual(cmd.install_dir, "/splat/funk")
def test_installation(self):
source = self.mkdtemp()
expected = []
def write_script(name, text):
expected.append(name)
f = open(os.path.join(source, name), "w")
f.write(text)
f.close()
write_script("script1.py", ("#! /usr/bin/env python2.3\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
write_script("script2.py", ("#!/usr/bin/python\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
write_script("shell.sh", ("#!/bin/sh\n"
"# bogus shell script w/ sh-bang\n"
"exit 0\n"))
target = self.mkdtemp()
dist = Distribution()
dist.command_obj["build"] = support.DummyCommand(build_scripts=source)
dist.command_obj["install"] = support.DummyCommand(
install_scripts=target,
force=1,
skip_build=1,
)
cmd = install_scripts(dist)
cmd.finalize_options()
cmd.run()
installed = os.listdir(target)
for name in expected:
self.assert_(name in installed)
def test_suite():
return unittest.makeSuite(InstallScriptsTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| apache-2.0 |
Shelo/cmdoc | section/views.py | 1 | 4266 | from time import strptime
from django.contrib.auth.decorators import login_required
from django.db.models import F
from django.http import JsonResponse
from django.shortcuts import redirect, get_object_or_404
from edit import decorators
from section import forms, utils, models
import edit.models
import json
@decorators.belongs_to_document
@login_required(login_url='dashboard:login')
def create(request, document_id):
section_form = forms.SectionForm(request.POST)
if section_form.is_valid():
section = section_form.save(commit=False)
section.owner = request.user
section.document_id = document_id
section.modifier = request.user
if section.position == -1:
last = models.Section.objects.filter(
document_id=section.document_id
).last()
if last is None:
section.position = 0
else:
section.position = last.position + 1
models.Section.objects.filter(
document_id=section.document_id,
position__gte=section.position
).update(position=F('position') + 1)
section.save()
return redirect('edit:index', document_id=document_id)
@decorators.belongs_to_document
@login_required(login_url='dashboard:login')
def remove(request, document_id, section_id):
section = get_object_or_404(models.Section, id=section_id)
position = section.position
models.Section.objects.filter(document_id=section.document_id,
position__gte=position + 1).update(position=F('position') - 1)
section.delete()
return redirect('edit:index', document_id=document_id)
@decorators.belongs_to_document
@login_required(login_url='dashboard:login')
def update(request, document_id, section_id):
section = get_object_or_404(models.Section, id=section_id)
section.content = request.POST.get('content')
section.modifier = request.user
section.message = request.POST.get('message')
section.save()
utils.release_section(section_id)
return redirect('edit:index', document_id=document_id)
@decorators.belongs_to_document
@login_required(login_url='dashboard:login')
def acquire(request, document_id):
section_id = request.POST.get('sectionId')
section = get_object_or_404(models.Section, id=section_id)
response = False
# allow editing if no one is editing or you are editing (just for bugs though...)
if section.editing is None or section.editing.username == request.user.username:
response = True
section.editing = request.user
section.save_no_notification()
return JsonResponse({
'status': response,
'current': section.editing.username,
'content': section.content,
})
@decorators.belongs_to_document
@login_required(login_url='dashboard:login')
def release(request, document_id):
section_id = request.POST.get('sectionId')
utils.release_section(section_id)
return JsonResponse({
'status': True
})
@decorators.belongs_to_document
@login_required(login_url='dashboard:login')
def check_status(request, document_id):
sections_data = json.loads(request.POST.get('sections_data'))
serial = int(request.POST.get('serial'))
document = get_object_or_404(edit.models.Document, id=document_id)
response = False
result = []
# this just won't work well, since this is a lot of overheat.
# maybe a way of optimization is to compare the latest modification time of the document
# and check only if it was modified recently.
if document.serial > serial:
response = True
for data in sections_data:
section_id = int(data['id'])
section_serial = int(data['serial'])
section = models.Section.objects.get(id=section_id)
if section.serial > section_serial:
result.append({
'content': section.content,
'serial': section.serial,
'modifier': section.modifier,
'message': section.message
})
return JsonResponse({
'status': response,
'serial': document.serial,
'result': result
})
| mit |
google/fhir-examples | py/google/fhir_examples/validation_example.py | 1 | 2786 | #
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example code for using the resource_validation API to validate FhirProtos.
To run using bazel:
```
bazel build //py/google/fhir_examples:validation_example
bazel-bin/py/google/fhir_examples/validation_example $WORKSPACE
```
To run using python+pip:
```
python3 -m venv venv
source venv/bin/activate
pip install google-fhir
python3 validation_example.py $WORKSPACE
```
Where $WORKSPACE is the location of a synthea dataset. For instructions on
setting up your workspace, see the top-level README.md.
"""
import random
from absl import app
from proto.google.fhir.proto.r4.core.resources import patient_pb2
from google.fhir import fhir_errors
from google.fhir.r4 import json_format
from google.fhir.r4 import resource_validation
def main(argv):
if len(argv) != 2:
raise app.UsageError('Should have exactly one argument for workspace')
workspace = argv[1]
with open(
workspace + '/ndjson/Patient.fhir.ndjson', 'r', encoding='utf-8') as f:
patients = []
# Read first 200 patient records.
for i in range(0, 200):
ndjson_line = f.readline()
try:
patient_proto = json_format.json_fhir_string_to_proto(
ndjson_line, patient_pb2.Patient)
patients.append(patient_proto)
except ValueError as e:
print(f'Failed parsing record {i}: {e}')
for i, patient in enumerate(patients):
# Insert a validation error into 5% of the records.
# Patient.communication is an optional BackboneElement field, but if one
# is added, it *MUST* contain a populated Patient.communication.language
# field.
# See: http://hl7.org/fhir/patient.html
communication = patient.communication.add()
communication.preferred.value = True
# Add the required language field to 95% of records
if random.random() < .95:
language = communication.language.coding.add()
language.system.value = 'http://hl7.org/fhir/valueset-all-languages.html'
language.code.value = 'fr'
try:
resource_validation.validate_resource(patient)
except fhir_errors.InvalidFhirError as e:
print(f'Failed validating record {i}: {e}')
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
josephkirk/PipelineTools | sample/qt_ui.py | 1 | 3109 | import maya.cmds as cm
import maya.mel as mm
import pymel.core as pm
try:
from PySide2 import QtWidgets, QtCore, QtGui
except ImportError:
from PySide import QtCore, QtGui
QtWidgets = QtGui
# Get Maya Main Window
mayaMainWindow = pm.ui.Window('MayaWindow').asQtObject()
# Wrapper
SIGNAL = QtCore.Signal
# UIClass
class JetTool(QtWidgets.QMainWindow):
def __init__(self):
super(JetTool, self).__init__()
try:
pm.deleteUI('PipelineToolsWindow')
except:
pass
# mayaMainWindow = {o.objectName(): o for o in QtWidgets.qApp.topLevelWidgets()}["MayaWindow"]
self.setParent(mayaMainWindow)
self.setWindowFlags(QtCore.Qt.Window)
self.setWindowTitle('Pipeline Tools')
self.setObjectName('PipelineToolsWindow')
self.setMinimumSize(250,100)
self._makeUI()
# def onclick(self):
# text_combo = [self.textbox.text(),self.textbox2.text()]
# self.convertClicked.emit(text_combo)
# print text_combo
def _makeUI(self):
#Generate Widget
self.container = QtWidgets.QWidget(self)
self.container2 = QtWidgets.QWidget(self)
self.layout = QtWidgets.QGridLayout(self.container2)
#Interaction
# self.button.clicked.connect(self.onclick)
#Layout Widget
self.layout = QtWidgets.QGridLayout(self.container)
self.container.setLayout(self.layout)
self.layout.setContentsMargins(5, 15, 5, 5)
self.layout.setHorizontalSpacing(1)
self.layout.setVerticalSpacing(2)
for i in range(5):
self.layout.setColumnMinimumWidth(i,15)
if not i%2:
self.layout.setColumnStretch(i,i)
groupbox = QtWidgets.QGroupBox('testGp')
label = QtWidgets.QLabel('Edit:',self.container)
labeltextbox = QtWidgets.QLineEdit(self.container)
labelbutton = QtWidgets.QPushButton('OK', self.container)
subLayout = QtWidgets.QHBoxLayout(self.container)
subLayout.addWidget(label)
subLayout.addWidget(labeltextbox)
subLayout.addWidget(labelbutton)
groupbox.setLayout(subLayout)
groupbox2 = QtWidgets.QGroupBox('testGp2')
textbox = QtWidgets.QLineEdit(self.container)
textbox2 = QtWidgets.QLineEdit(self.container)
button = QtWidgets.QPushButton('OK', self.container)
subLayout2 = QtWidgets.QVBoxLayout(self.container)
subLayout2.addWidget(textbox)
subLayout2.addWidget(textbox2)
subLayout2.addWidget(button)
groupbox2.setLayout(subLayout2)
self.layout.addWidget(groupbox,0,i)
self.layout.addWidget(groupbox2,1,i)
self.tabWid = QtWidgets.QTabWidget(self)
self.tabWid.addTab(self.container,'test')
self.tabWid.addTab(self.container2,'test2')
self.setCentralWidget(self.tabWid)
| bsd-2-clause |
BeDjango/intef-openedx | lms/djangoapps/lti_provider/tests/test_outcomes.py | 48 | 18771 | """
Tests for the LTI outcome service handlers, both in outcomes.py and in tasks.py
"""
import unittest
from django.test import TestCase
from lxml import etree
from mock import patch, MagicMock, ANY
import requests_oauthlib
import requests
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
from student.tests.factories import UserFactory
from lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService
import lti_provider.outcomes as outcomes
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import ItemFactory, CourseFactory, check_mongo_calls
class StoreOutcomeParametersTest(TestCase):
"""
Tests for the store_outcome_parameters method in outcomes.py
"""
def setUp(self):
super(StoreOutcomeParametersTest, self).setUp()
self.user = UserFactory.create()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.usage_key = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id'
)
self.consumer = LtiConsumer(
consumer_name='consumer',
consumer_key='consumer_key',
consumer_secret='secret'
)
self.consumer.save()
def get_valid_request_params(self):
"""
Returns a dictionary containing a complete set of required LTI
parameters.
"""
return {
'lis_result_sourcedid': 'sourcedid',
'lis_outcome_service_url': 'http://example.com/service_url',
'oauth_consumer_key': 'consumer_key',
'tool_consumer_instance_guid': 'tool_instance_guid',
'usage_key': self.usage_key,
'course_key': self.course_key,
}
def test_graded_assignment_created(self):
params = self.get_valid_request_params()
with self.assertNumQueries(8):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
assignment = GradedAssignment.objects.get(
lis_result_sourcedid=params['lis_result_sourcedid']
)
self.assertEqual(assignment.course_key, self.course_key)
self.assertEqual(assignment.usage_key, self.usage_key)
self.assertEqual(assignment.user, self.user)
def test_outcome_service_created(self):
params = self.get_valid_request_params()
with self.assertNumQueries(8):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
outcome = OutcomeService.objects.get(
lti_consumer=self.consumer
)
self.assertEqual(outcome.lti_consumer, self.consumer)
def test_graded_assignment_references_outcome_service(self):
params = self.get_valid_request_params()
with self.assertNumQueries(8):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
outcome = OutcomeService.objects.get(
lti_consumer=self.consumer
)
assignment = GradedAssignment.objects.get(
lis_result_sourcedid=params['lis_result_sourcedid']
)
self.assertEqual(assignment.outcome_service, outcome)
def test_no_duplicate_graded_assignments(self):
params = self.get_valid_request_params()
with self.assertNumQueries(8):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
with self.assertNumQueries(2):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
assignments = GradedAssignment.objects.filter(
lis_result_sourcedid=params['lis_result_sourcedid']
)
self.assertEqual(len(assignments), 1)
def test_no_duplicate_outcome_services(self):
params = self.get_valid_request_params()
with self.assertNumQueries(8):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
with self.assertNumQueries(2):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
outcome_services = OutcomeService.objects.filter(
lti_consumer=self.consumer
)
self.assertEqual(len(outcome_services), 1)
def test_no_db_update_for_ungraded_assignment(self):
params = self.get_valid_request_params()
del params['lis_result_sourcedid']
with self.assertNumQueries(0):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
def test_no_db_update_for_bad_request(self):
params = self.get_valid_request_params()
del params['lis_outcome_service_url']
with self.assertNumQueries(0):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
def test_db_record_created_without_consumer_id(self):
params = self.get_valid_request_params()
del params['tool_consumer_instance_guid']
with self.assertNumQueries(8):
outcomes.store_outcome_parameters(params, self.user, self.consumer)
self.assertEqual(GradedAssignment.objects.count(), 1)
self.assertEqual(OutcomeService.objects.count(), 1)
class SignAndSendReplaceResultTest(TestCase):
"""
Tests for the sign_and_send_replace_result method in outcomes.py
"""
def setUp(self):
super(SignAndSendReplaceResultTest, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.usage_key = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id'
)
self.user = UserFactory.create()
consumer = LtiConsumer(
consumer_name='consumer',
consumer_key='consumer_key',
consumer_secret='secret'
)
consumer.save()
outcome = OutcomeService(
lis_outcome_service_url='http://example.com/service_url',
lti_consumer=consumer,
)
outcome.save()
self.assignment = GradedAssignment(
user=self.user,
course_key=self.course_key,
usage_key=self.usage_key,
outcome_service=outcome,
lis_result_sourcedid='sourcedid',
)
self.assignment.save()
@patch('requests.post', return_value='response')
def test_sign_and_send_replace_result(self, post_mock):
response = outcomes.sign_and_send_replace_result(self.assignment, 'xml')
post_mock.assert_called_with(
'http://example.com/service_url',
data='xml',
auth=ANY,
headers={'content-type': 'application/xml'}
)
self.assertEqual(response, 'response')
class XmlHandlingTest(TestCase):
"""
Tests for the generate_replace_result_xml and check_replace_result_response
methods in outcomes.py
"""
response_xml = """
<imsx_POXEnvelopeResponse xmlns = "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXResponseHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>4560</imsx_messageIdentifier>
<imsx_statusInfo>
{major_code}
<imsx_severity>status</imsx_severity>
<imsx_description>Score for result_id is now 0.25</imsx_description>
<imsx_messageRefIdentifier>999999123</imsx_messageRefIdentifier>
<imsx_operationRefIdentifier>replaceResult</imsx_operationRefIdentifier>
</imsx_statusInfo>
</imsx_POXResponseHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<replaceResultResponse/>
</imsx_POXBody>
</imsx_POXEnvelopeResponse>
"""
result_id = 'result_id'
score = 0.25
@patch('uuid.uuid4', return_value='random_uuid')
def test_replace_result_message_uuid(self, _uuid_mock):
# Pylint doesn't recognize members in the LXML module
xml = outcomes.generate_replace_result_xml(self.result_id, self.score)
tree = etree.fromstring(xml)
message_id = tree.xpath(
'//ns:imsx_messageIdentifier',
namespaces={'ns': 'http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0'}
)
self.assertEqual(len(message_id), 1)
self.assertEqual(message_id[0].text, 'random_uuid')
def test_replace_result_sourced_id(self):
xml = outcomes.generate_replace_result_xml(self.result_id, self.score)
tree = etree.fromstring(xml)
sourced_id = tree.xpath(
'/ns:imsx_POXEnvelopeRequest/ns:imsx_POXBody/ns:replaceResultRequest/'
'ns:resultRecord/ns:sourcedGUID/ns:sourcedId',
namespaces={'ns': 'http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0'}
)
self.assertEqual(len(sourced_id), 1)
self.assertEqual(sourced_id[0].text, 'result_id')
def test_replace_result_score(self):
xml = outcomes.generate_replace_result_xml(self.result_id, self.score)
tree = etree.fromstring(xml)
xml_score = tree.xpath(
'/ns:imsx_POXEnvelopeRequest/ns:imsx_POXBody/ns:replaceResultRequest/'
'ns:resultRecord/ns:result/ns:resultScore/ns:textString',
namespaces={'ns': 'http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0'}
)
self.assertEqual(len(xml_score), 1)
self.assertEqual(xml_score[0].text, '0.25')
def create_response_object(
self, status, xml,
major_code='<imsx_codeMajor>success</imsx_codeMajor>'
):
"""
Returns an XML document containing a successful replace_result response.
"""
response = MagicMock()
response.status_code = status
response.content = xml.format(major_code=major_code).encode('ascii', 'ignore')
return response
def test_response_with_correct_xml(self):
xml = self.response_xml
response = self.create_response_object(200, xml)
self.assertTrue(outcomes.check_replace_result_response(response))
def test_response_with_bad_status_code(self):
response = self.create_response_object(500, '')
self.assertFalse(outcomes.check_replace_result_response(response))
def test_response_with_invalid_xml(self):
xml = '<badly>formatted</xml>'
response = self.create_response_object(200, xml)
self.assertFalse(outcomes.check_replace_result_response(response))
def test_response_with_multiple_status_fields(self):
response = self.create_response_object(
200, self.response_xml,
major_code='<imsx_codeMajor>success</imsx_codeMajor>'
'<imsx_codeMajor>failure</imsx_codeMajor>'
)
self.assertFalse(outcomes.check_replace_result_response(response))
def test_response_with_no_status_field(self):
response = self.create_response_object(
200, self.response_xml,
major_code=''
)
self.assertFalse(outcomes.check_replace_result_response(response))
def test_response_with_failing_status_field(self):
response = self.create_response_object(
200, self.response_xml,
major_code='<imsx_codeMajor>failure</imsx_codeMajor>'
)
self.assertFalse(outcomes.check_replace_result_response(response))
class TestBodyHashClient(unittest.TestCase):
"""
Test our custom BodyHashClient
This Client should do everything a normal oauthlib.oauth1.Client would do,
except it also adds oauth_body_hash to the Authorization headers.
"""
def test_simple_message(self):
oauth = requests_oauthlib.OAuth1(
'1000000000000000', # fake consumer key
'2000000000000000', # fake consumer secret
signature_method='HMAC-SHA1',
client_class=outcomes.BodyHashClient,
force_include_body=True
)
headers = {'content-type': 'application/xml'}
req = requests.Request(
'POST',
"http://example.edx.org/fake",
data="Hello world!",
auth=oauth,
headers=headers
)
prepped_req = req.prepare()
# Make sure that our body hash is now part of the test...
self.assertIn(
'oauth_body_hash="00hq6RNueFa8QiEjhep5cJRHWAI%3D"',
prepped_req.headers['Authorization']
)
# But make sure we haven't wiped out any of the other oauth values
# that we would expect to be in the Authorization header as well
expected_oauth_headers = [
"oauth_nonce", "oauth_timestamp", "oauth_version",
"oauth_signature_method", "oauth_consumer_key", "oauth_signature",
]
for oauth_header in expected_oauth_headers:
self.assertIn(oauth_header, prepped_req.headers['Authorization'])
class TestAssignmentsForProblem(ModuleStoreTestCase):
"""
Test cases for the assignments_for_problem method in outcomes.py
"""
def setUp(self):
super(TestAssignmentsForProblem, self).setUp()
self.user = UserFactory.create()
self.user_id = self.user.id
self.outcome_service = self.create_outcome_service('outcomes')
self.course = CourseFactory.create()
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.chapter = ItemFactory.create(parent=self.course, category="chapter")
self.vertical = ItemFactory.create(parent=self.chapter, category="vertical")
self.unit = ItemFactory.create(parent=self.vertical, category="unit")
def create_outcome_service(self, id_suffix):
"""
Create and save a new OutcomeService model in the test database. The
OutcomeService model requires an LtiConsumer model, so we create one of
those as well. The method takes an ID string that is used to ensure that
unique fields do not conflict.
"""
lti_consumer = LtiConsumer(
consumer_name='lti_consumer_name' + id_suffix,
consumer_key='lti_consumer_key' + id_suffix,
consumer_secret='lti_consumer_secret' + id_suffix,
instance_guid='lti_instance_guid' + id_suffix
)
lti_consumer.save()
outcome_service = OutcomeService(
lis_outcome_service_url='https://example.com/outcomes/' + id_suffix,
lti_consumer=lti_consumer
)
outcome_service.save()
return outcome_service
def create_graded_assignment(self, desc, result_id, outcome_service):
"""
Create and save a new GradedAssignment model in the test database.
"""
assignment = GradedAssignment(
user=self.user,
course_key=self.course.id,
usage_key=desc.location,
outcome_service=outcome_service,
lis_result_sourcedid=result_id,
version_number=0
)
assignment.save()
return assignment
def test_with_no_graded_assignments(self):
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 0)
def test_with_graded_unit(self):
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 1)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit')
def test_with_graded_vertical(self):
self.create_graded_assignment(self.vertical, 'graded_vertical', self.outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 1)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_vertical')
def test_with_graded_unit_and_vertical(self):
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
self.create_graded_assignment(self.vertical, 'graded_vertical', self.outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 2)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit')
self.assertEqual(assignments[1].lis_result_sourcedid, 'graded_vertical')
def test_with_unit_used_twice(self):
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
self.create_graded_assignment(self.unit, 'graded_unit2', self.outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 2)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit')
self.assertEqual(assignments[1].lis_result_sourcedid, 'graded_unit2')
def test_with_unit_graded_for_different_user(self):
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
other_user = UserFactory.create()
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, other_user.id, self.course.id
)
self.assertEqual(len(assignments), 0)
def test_with_unit_graded_for_multiple_consumers(self):
other_outcome_service = self.create_outcome_service('second_consumer')
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
self.create_graded_assignment(self.unit, 'graded_unit2', other_outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 2)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit')
self.assertEqual(assignments[1].lis_result_sourcedid, 'graded_unit2')
self.assertEqual(assignments[0].outcome_service, self.outcome_service)
self.assertEqual(assignments[1].outcome_service, other_outcome_service)
| agpl-3.0 |
freakboy3742/django | tests/template_tests/filter_tests/test_wordwrap.py | 176 | 2032 | from django.template.defaultfilters import wordwrap
from django.test import SimpleTestCase
from django.utils.functional import lazystr
from django.utils.safestring import mark_safe
from ..utils import setup
class WordwrapTests(SimpleTestCase):
@setup({
'wordwrap01': '{% autoescape off %}{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}{% endautoescape %}'
})
def test_wordwrap01(self):
output = self.engine.render_to_string('wordwrap01', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a &\nb a &\nb')
@setup({'wordwrap02': '{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}'})
def test_wordwrap02(self):
output = self.engine.render_to_string('wordwrap02', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a &\nb a &\nb')
class FunctionTests(SimpleTestCase):
def test_wrap(self):
self.assertEqual(
wordwrap('this is a long paragraph of text that really needs to be wrapped I\'m afraid', 14),
'this is a long\nparagraph of\ntext that\nreally needs\nto be wrapped\nI\'m afraid',
)
def test_indent(self):
self.assertEqual(
wordwrap('this is a short paragraph of text.\n But this line should be indented', 14),
'this is a\nshort\nparagraph of\ntext.\n But this\nline should be\nindented',
)
def test_indent2(self):
self.assertEqual(
wordwrap('this is a short paragraph of text.\n But this line should be indented', 15),
'this is a short\nparagraph of\ntext.\n But this line\nshould be\nindented',
)
def test_non_string_input(self):
self.assertEqual(wordwrap(123, 2), '123')
def test_wrap_lazy_string(self):
self.assertEqual(
wordwrap(lazystr(
'this is a long paragraph of text that really needs to be wrapped I\'m afraid'
), 14),
'this is a long\nparagraph of\ntext that\nreally needs\nto be wrapped\nI\'m afraid',
)
| bsd-3-clause |
40223133/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/select.py | 730 | 9440 | """
borrowed from jython
https://bitbucket.org/jython/jython/raw/28a66ba038620292520470a0bb4dc9bb8ac2e403/Lib/select.py
"""
#import java.nio.channels.SelectableChannel
#import java.nio.channels.SelectionKey
#import java.nio.channels.Selector
#from java.nio.channels.SelectionKey import OP_ACCEPT, OP_CONNECT, OP_WRITE, OP_READ
import errno
import os
import queue
import socket
class error(Exception): pass
ALL = None
_exception_map = {
# (<javaexception>, <circumstance>) : lambda: <code that raises the python equivalent>
#(java.nio.channels.ClosedChannelException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
#(java.nio.channels.CancelledKeyException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
#(java.nio.channels.IllegalBlockingModeException, ALL) : error(errno.ESOCKISBLOCKING, 'socket must be in non-blocking mode'),
}
def _map_exception(exc, circumstance=ALL):
try:
mapped_exception = _exception_map[(exc.__class__, circumstance)]
mapped_exception.java_exception = exc
return mapped_exception
except KeyError:
return error(-1, 'Unmapped java exception: <%s:%s>' % (exc.toString(), circumstance))
POLLIN = 1
POLLOUT = 2
# The following event types are completely ignored on jython
# Java does not support them, AFAICT
# They are declared only to support code compatibility with cpython
POLLPRI = 4
POLLERR = 8
POLLHUP = 16
POLLNVAL = 32
def _getselectable(selectable_object):
try:
channel = selectable_object.getchannel()
except:
try:
channel = selectable_object.fileno().getChannel()
except:
raise TypeError("Object '%s' is not watchable" % selectable_object,
errno.ENOTSOCK)
if channel and not isinstance(channel, java.nio.channels.SelectableChannel):
raise TypeError("Object '%s' is not watchable" % selectable_object,
errno.ENOTSOCK)
return channel
class poll:
def __init__(self):
self.selector = java.nio.channels.Selector.open()
self.chanmap = {}
self.unconnected_sockets = []
def _register_channel(self, socket_object, channel, mask):
jmask = 0
if mask & POLLIN:
# Note that OP_READ is NOT a valid event on server socket channels.
if channel.validOps() & OP_ACCEPT:
jmask = OP_ACCEPT
else:
jmask = OP_READ
if mask & POLLOUT:
if channel.validOps() & OP_WRITE:
jmask |= OP_WRITE
if channel.validOps() & OP_CONNECT:
jmask |= OP_CONNECT
selectionkey = channel.register(self.selector, jmask)
self.chanmap[channel] = (socket_object, selectionkey)
def _check_unconnected_sockets(self):
temp_list = []
for socket_object, mask in self.unconnected_sockets:
channel = _getselectable(socket_object)
if channel is not None:
self._register_channel(socket_object, channel, mask)
else:
temp_list.append( (socket_object, mask) )
self.unconnected_sockets = temp_list
def register(self, socket_object, mask = POLLIN|POLLOUT|POLLPRI):
try:
channel = _getselectable(socket_object)
if channel is None:
# The socket is not yet connected, and thus has no channel
# Add it to a pending list, and return
self.unconnected_sockets.append( (socket_object, mask) )
return
self._register_channel(socket_object, channel, mask)
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def unregister(self, socket_object):
try:
channel = _getselectable(socket_object)
self.chanmap[channel][1].cancel()
del self.chanmap[channel]
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _dopoll(self, timeout):
if timeout is None or timeout < 0:
self.selector.select()
else:
try:
timeout = int(timeout)
if not timeout:
self.selector.selectNow()
else:
# No multiplication required: both cpython and java use millisecond timeouts
self.selector.select(timeout)
except ValueError as vx:
raise error("poll timeout must be a number of milliseconds or None", errno.EINVAL)
# The returned selectedKeys cannot be used from multiple threads!
return self.selector.selectedKeys()
def poll(self, timeout=None):
try:
self._check_unconnected_sockets()
selectedkeys = self._dopoll(timeout)
results = []
for k in selectedkeys.iterator():
jmask = k.readyOps()
pymask = 0
if jmask & OP_READ: pymask |= POLLIN
if jmask & OP_WRITE: pymask |= POLLOUT
if jmask & OP_ACCEPT: pymask |= POLLIN
if jmask & OP_CONNECT: pymask |= POLLOUT
# Now return the original userobject, and the return event mask
results.append( (self.chanmap[k.channel()][0], pymask) )
return results
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _deregister_all(self):
try:
for k in self.selector.keys():
k.cancel()
# Keys are not actually removed from the selector until the next select operation.
self.selector.selectNow()
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def close(self):
try:
self._deregister_all()
self.selector.close()
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _calcselecttimeoutvalue(value):
if value is None:
return None
try:
floatvalue = float(value)
except Exception as x:
raise TypeError("Select timeout value must be a number or None")
if value < 0:
raise error("Select timeout value cannot be negative", errno.EINVAL)
if floatvalue < 0.000001:
return 0
return int(floatvalue * 1000) # Convert to milliseconds
# This cache for poll objects is required because of a bug in java on MS Windows
# http://bugs.jython.org/issue1291
class poll_object_cache:
def __init__(self):
self.is_windows = os.name == 'nt'
if self.is_windows:
self.poll_object_queue = Queue.Queue()
import atexit
atexit.register(self.finalize)
def get_poll_object(self):
if not self.is_windows:
return poll()
try:
return self.poll_object_queue.get(False)
except Queue.Empty:
return poll()
def release_poll_object(self, pobj):
if self.is_windows:
pobj._deregister_all()
self.poll_object_queue.put(pobj)
else:
pobj.close()
def finalize(self):
if self.is_windows:
while True:
try:
p = self.poll_object_queue.get(False)
p.close()
except Queue.Empty:
return
_poll_object_cache = poll_object_cache()
def native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
timeout = _calcselecttimeoutvalue(timeout)
# First create a poll object to do the actual watching.
pobj = _poll_object_cache.get_poll_object()
try:
registered_for_read = {}
# Check the read list
for fd in read_fd_list:
pobj.register(fd, POLLIN)
registered_for_read[fd] = 1
# And now the write list
for fd in write_fd_list:
if fd in registered_for_read:
# registering a second time overwrites the first
pobj.register(fd, POLLIN|POLLOUT)
else:
pobj.register(fd, POLLOUT)
results = pobj.poll(timeout)
# Now start preparing the results
read_ready_list, write_ready_list, oob_ready_list = [], [], []
for fd, mask in results:
if mask & POLLIN:
read_ready_list.append(fd)
if mask & POLLOUT:
write_ready_list.append(fd)
return read_ready_list, write_ready_list, oob_ready_list
finally:
_poll_object_cache.release_poll_object(pobj)
select = native_select
def cpython_compatible_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
# First turn all sockets to non-blocking
# keeping track of which ones have changed
modified_channels = []
try:
for socket_list in [read_fd_list, write_fd_list, outofband_fd_list]:
for s in socket_list:
channel = _getselectable(s)
if channel.isBlocking():
modified_channels.append(channel)
channel.configureBlocking(0)
return native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout)
finally:
for channel in modified_channels:
channel.configureBlocking(1)
| gpl-3.0 |
lmmsoft/LeetCode | LeetCode-Algorithm/0039. Combination Sum/0039.py | 1 | 1769 | from typing import List
class Solution1:
def combinationSum(self, nums: List[int], target: int) -> List:
dp2: dict = {0: [[]]}
for n in nums:
for i in range(n, target + 1):
if (i - n) in dp2:
li: list = dp2[i - n]
li2 = [l + [n] for l in li]
if i in dp2:
dp2[i] = dp2[i] + li2
else:
dp2[i] = li2
if target in dp2:
return dp2[target]
return []
class Solution:
def combinationSum(self, nums: List[int], target: int) -> int:
# 保存方案,使用三维数组
# 第一维: dp:List 表示和为不同数时的方案
# 第二维: dp[s]:List 表示和i的方案列表
# 第三维 每种具体的方案都是数组, dp[s][j]:List 表示和为s的第j个方案的数组
dp: list = [[[]]] + [[] for i in range(
target)] # 初始化 eg: [[[]], [], [], [], [], []], 保证和为0有一种方案 [], 完全背包需要恰好装满的时候,dp[0] 必须初始化为0
for n in nums: # 对于每个数
for s in range(n, target + 1): # 完全背包,从小到大
li = [l + [n] for l in dp[s - n]] # 对于把和为s-n的每种方案,添加数字n, 就变成一种新的 和为s的方案
dp[s] += li # 把新方案添加到 原来和为s的方案里
return dp[target]
if __name__ == '__main__':
assert Solution().combinationSum([2, 3, 6, 7], 7) == [[2, 2, 3], [7]]
assert Solution().combinationSum([2, 3, 5], 8) == [
[2, 2, 2, 2],
[2, 3, 3],
[3, 5]
]
assert Solution().combinationSum([2], 1) == []
| gpl-2.0 |
endlessm/chromium-browser | native_client/site_scons/site_tools/naclsdk.py | 1 | 28469 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""NaCl SDK tool SCons."""
from __future__ import print_function
import __builtin__
import re
import os
import shutil
import sys
import SCons.Scanner
import SCons.Script
import subprocess
import tempfile
NACL_TOOL_MAP = {
'arm': {
'32': {
'tooldir': 'arm-nacl',
'as_flag': '',
'cc_flag': '',
'ld_flag': '',
},
},
'x86': {
'32': {
'tooldir': 'i686-nacl',
'other_libdir': 'lib32',
'as_flag': '--32',
'cc_flag': '-m32',
'ld_flag': ' -melf_i386_nacl',
},
'64': {
'tooldir': 'x86_64-nacl',
'other_libdir': 'lib64',
'as_flag': '--64',
'cc_flag': '-m64',
'ld_flag': ' -melf_x86_64_nacl',
},
},
'mips': {
'32': {
'tooldir': 'mipsel-nacl',
'as_flag': '',
'cc_flag': '',
'ld_flag': '',
},
},
}
def _StubOutEnvToolsForBuiltElsewhere(env):
"""Stub out all tools so that they point to 'true'.
Some machines have their code built by another machine, they'll therefore
run 'true' instead of running the usual build tools.
Args:
env: The SCons environment in question.
"""
assert(env.Bit('built_elsewhere'))
env.Replace(CC='true', CXX='true', LINK='true', AR='true',
RANLIB='true', AS='true', ASPP='true', LD='true',
STRIP='true', OBJDUMP='true', OBJCOPY='true',
PNACLOPT='true', PNACLFINALIZE='true')
def _SetEnvForNativeSdk(env, sdk_path):
"""Initialize environment according to target architecture."""
bin_path = os.path.join(sdk_path, 'bin')
# NOTE: attempts to eliminate this PATH setting and use
# absolute path have been futile
env.PrependENVPath('PATH', bin_path)
tool_prefix = None
tool_map = NACL_TOOL_MAP[env['TARGET_ARCHITECTURE']]
subarch_spec = tool_map[env['TARGET_SUBARCH']]
tooldir = subarch_spec['tooldir']
# We need to pass it extra options for the subarch we are building.
as_mode_flag = subarch_spec['as_flag']
cc_mode_flag = subarch_spec['cc_flag']
ld_mode_flag = subarch_spec['ld_flag']
if os.path.exists(os.path.join(sdk_path, tooldir)):
# The tooldir for the build target exists.
# The tools there do the right thing without special options.
tool_prefix = tooldir
libdir = os.path.join(tooldir, 'lib')
else:
# We're building for a target for which there is no matching tooldir.
# For example, for x86-32 when only <sdk_path>/x86_64-nacl/ exists.
# Find a tooldir for a different subarch that does exist.
others_map = tool_map.copy()
del others_map[env['TARGET_SUBARCH']]
for subarch, tool_spec in others_map.iteritems():
tooldir = tool_spec['tooldir']
if os.path.exists(os.path.join(sdk_path, tooldir)):
# OK, this is the other subarch to use as tooldir.
tool_prefix = tooldir
# The lib directory may have an alternate name, i.e.
# 'lib32' in the x86_64-nacl tooldir.
libdir = os.path.join(tooldir, subarch_spec.get('other_libdir', 'lib'))
break
if tool_prefix is None:
raise Exception("Cannot find a toolchain for %s in %s" %
(env['TARGET_FULLARCH'], sdk_path))
cc = 'clang' if env.Bit('nacl_clang') else 'gcc'
cxx = 'clang++' if env.Bit('nacl_clang') else 'g++'
env.Replace(# Replace header and lib paths.
# where to put nacl extra sdk headers
# TODO(robertm): switch to using the mechanism that
# passes arguments to scons
NACL_SDK_INCLUDE='%s/%s/include' % (sdk_path, tool_prefix),
# where to find/put nacl generic extra sdk libraries
NACL_SDK_LIB='%s/%s' % (sdk_path, libdir),
# Replace the normal unix tools with the NaCl ones.
CC=os.path.join(bin_path, '%s-%s' % (tool_prefix, cc)),
CXX=os.path.join(bin_path, '%s-%s' % (tool_prefix, cxx)),
AR=os.path.join(bin_path, '%s-ar' % tool_prefix),
AS=os.path.join(bin_path, '%s-as' % tool_prefix),
ASPP=os.path.join(bin_path, '%s-%s' % (tool_prefix, cc)),
FILECHECK=os.path.join(bin_path, 'FileCheck'),
GDB=os.path.join(bin_path, '%s-gdb' % tool_prefix),
# NOTE: use g++ for linking so we can handle C AND C++.
LINK=os.path.join(bin_path, '%s-%s' % (tool_prefix, cxx)),
# Grrr... and sometimes we really need ld.
LD=os.path.join(bin_path, '%s-ld' % tool_prefix) + ld_mode_flag,
RANLIB=os.path.join(bin_path, '%s-ranlib' % tool_prefix),
NM=os.path.join(bin_path, '%s-nm' % tool_prefix),
OBJDUMP=os.path.join(bin_path, '%s-objdump' % tool_prefix),
OBJCOPY=os.path.join(bin_path, '%s-objcopy' % tool_prefix),
STRIP=os.path.join(bin_path, '%s-strip' % tool_prefix),
ADDR2LINE=os.path.join(bin_path, '%s-addr2line' % tool_prefix),
BASE_LINKFLAGS=[cc_mode_flag],
BASE_CFLAGS=[cc_mode_flag],
BASE_CXXFLAGS=[cc_mode_flag],
BASE_ASFLAGS=[as_mode_flag],
BASE_ASPPFLAGS=[cc_mode_flag],
CFLAGS=['-std=gnu99'],
CCFLAGS=['-O3',
'-Werror',
'-Wall',
'-Wno-variadic-macros',
'-Wswitch-enum',
'-g',
'-fno-stack-protector',
'-fdiagnostics-show-option',
'-pedantic',
'-D__linux__',
],
ASFLAGS=[],
)
# NaClSdk environment seems to be inherited from the host environment.
# On Linux host, this probably makes sense. On Windows and Mac, this
# introduces nothing except problems.
# For now, simply override the environment settings as in
# <scons>/engine/SCons/Platform/posix.py
env.Replace(LIBPREFIX='lib',
LIBSUFFIX='.a',
SHLIBPREFIX='$LIBPREFIX',
SHLIBSUFFIX='.so',
LIBPREFIXES=['$LIBPREFIX'],
LIBSUFFIXES=['$LIBSUFFIX', '$SHLIBSUFFIX'],
)
# Force -fPIC when compiling for shared libraries.
env.AppendUnique(SHCCFLAGS=['-fPIC'],
)
def _SetEnvForPnacl(env, root):
# All the PNaCl tools require Python to be in the PATH.
arch = env['TARGET_FULLARCH']
assert arch in ['arm', 'mips32', 'x86-32', 'x86-64']
if env.Bit('pnacl_unsandboxed'):
if env.Bit('host_linux'):
arch = '%s-linux' % arch
elif env.Bit('host_mac'):
arch = '%s-mac' % arch
if env.Bit('nonsfi_nacl'):
arch += '-nonsfi'
arch_flag = ' -arch %s' % arch
ld_arch_flag = '' if env.Bit('pnacl_generate_pexe') else arch_flag
translator_root = os.path.join(os.path.dirname(root), 'pnacl_translator')
binroot = os.path.join(root, 'bin')
if env.Bit('pnacl_native_clang_driver'):
binprefix = os.path.join(binroot, 'le32-nacl-')
else:
binprefix = os.path.join(binroot, 'pnacl-')
binext = ''
if env.Bit('host_windows'):
binext = '.bat'
pnacl_ar = binprefix + 'ar' + binext
pnacl_as = binprefix + 'as' + binext
pnacl_nm = binprefix + 'nm' + binext
pnacl_ranlib = binprefix + 'ranlib' + binext
# Use the standalone sandboxed translator in sbtc mode
if env.Bit('use_sandboxed_translator'):
pnacl_translate = os.path.join(translator_root, 'bin',
'pnacl-translate' + binext)
else:
pnacl_translate = binprefix + 'translate' + binext
pnacl_cc = binprefix + 'clang' + binext
pnacl_cxx = binprefix + 'clang++' + binext
pnacl_ld = binprefix + 'ld' + binext
pnacl_disass = binprefix + 'dis' + binext
pnacl_filecheck = os.path.join(binroot, 'FileCheck')
pnacl_finalize = binprefix + 'finalize' + binext
pnacl_opt = binprefix + 'opt' + binext
pnacl_strip = binprefix + 'strip' + binext
# NOTE: XXX_flags start with space for easy concatenation
# The flags generated here get baked into the commands (CC, CXX, LINK)
# instead of CFLAGS etc to keep them from getting blown away by some
# tests. Don't add flags here unless they always need to be preserved.
pnacl_cxx_flags = ''
pnacl_cc_flags = ' -std=gnu99'
pnacl_ld_flags = ' ' + ' '.join(env['PNACL_BCLDFLAGS'])
pnacl_translate_flags = ''
sdk_base = os.path.join(root, 'le32-nacl')
bias_flags = ''
# The supported use cases for nonpexe mode (IRT building, nonsfi) use biased
# bitcode and native calling conventions, so inject the --target= flags to
# get that by default. Put the flags in BASE_{C,CXX,LINK}FLAGS rather than in
# the commands directly, so that the test can override them. In addition to
# using the flags, we have to point NACL_SDK_{LIB,INCLUDE} to the toolchain
# directories containing the biased bitcode libraries.
if not env.Bit('pnacl_generate_pexe') and env['TARGET_FULLARCH'] != 'mips32':
bias_flags = ' '.join(env.BiasedBitcodeFlags())
archdir = {'x86-32': 'i686', 'x86-64': 'x86_64', 'arm': 'arm'}
sdk_base = os.path.join(root, archdir[env['TARGET_FULLARCH']] + '_bc-nacl')
if env.Bit('nacl_pic'):
pnacl_cc_flags += ' -fPIC'
pnacl_cxx_flags += ' -fPIC'
# NOTE: this is a special hack for the pnacl backend which
# does more than linking
pnacl_ld_flags += ' -fPIC'
pnacl_translate_flags += ' -fPIC'
if env.Bit('use_sandboxed_translator'):
sb_flags = ' --pnacl-sb'
pnacl_ld_flags += sb_flags
pnacl_translate_flags += sb_flags
if env.Bit('x86_64_zero_based_sandbox'):
pnacl_translate_flags += ' -sfi-zero-based-sandbox'
env.Replace(# Replace header and lib paths.
NACL_SDK_INCLUDE=os.path.join(root, sdk_base, 'include'),
NACL_SDK_LIB=os.path.join(root, sdk_base, 'lib'),
# Remove arch-specific flags (if any)
BASE_LINKFLAGS=bias_flags,
BASE_CFLAGS=bias_flags,
BASE_CXXFLAGS=bias_flags,
BASE_ASFLAGS='',
BASE_ASPPFLAGS='',
# Replace the normal unix tools with the PNaCl ones.
CC=pnacl_cc + pnacl_cc_flags,
CXX=pnacl_cxx + pnacl_cxx_flags,
ASPP=pnacl_cc + pnacl_cc_flags,
LIBPREFIX="lib",
SHLIBPREFIX="lib",
SHLIBSUFFIX=".so",
OBJSUFFIX=".bc",
LINK=pnacl_cxx + ld_arch_flag + pnacl_ld_flags,
# Although we are currently forced to produce native output
# for LINK, we are free to produce bitcode for SHLINK
# (SharedLibrary linking) because scons doesn't do anything
# with shared libraries except use them with the toolchain.
SHLINK=pnacl_cxx + ld_arch_flag + pnacl_ld_flags,
LD=pnacl_ld,
AR=pnacl_ar,
AS=pnacl_as + ld_arch_flag,
RANLIB=pnacl_ranlib,
FILECHECK=pnacl_filecheck,
DISASS=pnacl_disass,
OBJDUMP=pnacl_disass,
STRIP=pnacl_strip,
TRANSLATE=pnacl_translate + arch_flag + pnacl_translate_flags,
PNACLFINALIZE=pnacl_finalize,
PNACLOPT=pnacl_opt,
)
if env.Bit('built_elsewhere'):
def FakeInstall(dest, source, env):
print('Not installing', dest)
_StubOutEnvToolsForBuiltElsewhere(env)
env.Replace(INSTALL=FakeInstall)
if env.Bit('translate_in_build_step'):
env.Replace(TRANSLATE='true')
env.Replace(PNACLFINALIZE='true')
def PNaClForceNative(env):
assert(env.Bit('bitcode'))
if env.Bit('pnacl_generate_pexe'):
env.Replace(CC='NO-NATIVE-CC-INVOCATION-ALLOWED',
CXX='NO-NATIVE-CXX-INVOCATION-ALLOWED')
return
env.Replace(OBJSUFFIX='.o',
SHLIBSUFFIX='.so')
arch_flag = ' -arch ${TARGET_FULLARCH}'
if env.Bit('nonsfi_nacl'):
arch_flag += '-nonsfi'
cc_flags = ' --pnacl-allow-native --pnacl-allow-translate'
env.Append(CC=arch_flag + cc_flags,
CXX=arch_flag + cc_flags,
ASPP=arch_flag + cc_flags,
LINK=cc_flags) # Already has -arch
env['LD'] = 'NO-NATIVE-LD-INVOCATION-ALLOWED'
env['SHLINK'] = '${LINK}'
if env.Bit('built_elsewhere'):
_StubOutEnvToolsForBuiltElsewhere(env)
# Get an environment for nacl-clang when in PNaCl mode.
def PNaClGetNNaClEnv(env):
assert(env.Bit('bitcode'))
assert(not env.Bit('build_mips32'))
# This is kind of a hack. We clone the environment,
# clear the bitcode bit, and then reload naclsdk.py
native_env = env.Clone()
native_env.ClearBits('bitcode')
native_env.SetBits('nacl_clang')
if env.Bit('built_elsewhere'):
_StubOutEnvToolsForBuiltElsewhere(env)
else:
native_env = native_env.Clone(tools=['naclsdk'])
if native_env.Bit('pnacl_generate_pexe'):
native_env.Replace(CC='NO-NATIVE-CC-INVOCATION-ALLOWED',
CXX='NO-NATIVE-CXX-INVOCATION-ALLOWED')
else:
# These are unfortunately clobbered by running Tool.
native_env.Replace(EXTRA_CFLAGS=env['EXTRA_CFLAGS'],
EXTRA_CXXFLAGS=env['EXTRA_CXXFLAGS'],
CCFLAGS=env['CCFLAGS'],
CFLAGS=env['CFLAGS'],
CXXFLAGS=env['CXXFLAGS'])
return native_env
# This adds architecture specific defines for the target architecture.
# These are normally omitted by PNaCl.
# For example: __i686__, __arm__, __mips__, __x86_64__
def AddBiasForPNaCl(env, temporarily_allow=True):
assert(env.Bit('bitcode'))
# re: the temporarily_allow flag -- that is for:
# BUG= http://code.google.com/p/nativeclient/issues/detail?id=1248
if env.Bit('pnacl_generate_pexe') and not temporarily_allow:
env.Replace(CC='NO-NATIVE-CC-INVOCATION-ALLOWED',
CXX='NO-NATIVE-CXX-INVOCATION-ALLOWED')
return
if env.Bit('build_arm'):
bias_flag = '--pnacl-bias=arm'
elif env.Bit('build_x86_32'):
bias_flag = '--pnacl-bias=x86-32'
elif env.Bit('build_x86_64'):
bias_flag = '--pnacl-bias=x86-64'
elif env.Bit('build_mips32'):
bias_flag = '--pnacl-bias=mips32'
else:
raise Exception("Unknown architecture!")
if env.Bit('nonsfi_nacl'):
bias_flag += '-nonsfi'
env.AppendUnique(CCFLAGS=[bias_flag],
ASPPFLAGS=[bias_flag])
def ValidateSdk(env):
checkables = ['${NACL_SDK_INCLUDE}/stdio.h']
for c in checkables:
if os.path.exists(env.subst(c)):
continue
# Windows build does not use cygwin and so can not see nacl subdirectory
# if it's cygwin's symlink - check for /include instead...
if os.path.exists(re.sub(r'(nacl64|nacl)/include/([^/]*)$',
r'include/\2',
env.subst(c))):
continue
# TODO(pasko): remove the legacy header presence test below.
if os.path.exists(re.sub(r'nacl/include/([^/]*)$',
r'nacl64/include/\1',
env.subst(c))):
continue
message = env.subst('''
ERROR: NativeClient toolchain does not seem present!,
Missing: %s
Configuration is:
NACL_SDK_INCLUDE=${NACL_SDK_INCLUDE}
NACL_SDK_LIB=${NACL_SDK_LIB}
CC=${CC}
CXX=${CXX}
AR=${AR}
AS=${AS}
ASPP=${ASPP}
LINK=${LINK}
RANLIB=${RANLIB}
Run: gclient runhooks --force or build the SDK yourself.
''' % c)
sys.stderr.write(message + "\n\n")
sys.exit(-1)
def ScanLinkerScript(node, env, libpath):
"""SCons scanner for linker script files.
This handles trivial linker scripts like those used for libc.so and libppapi.a.
These scripts just indicate more input files to be linked in, so we want
to produce dependencies on them.
A typical such linker script looks like:
/* Some comments. */
INPUT ( foo.a libbar.a libbaz.a )
or:
/* GNU ld script
Use the shared library, but some functions are only in
the static library, so try that secondarily. */
OUTPUT_FORMAT(elf64-x86-64)
GROUP ( /lib/libc.so.6 /usr/lib/libc_nonshared.a
AS_NEEDED ( /lib/ld-linux-x86-64.so.2 ) )
"""
contents = node.get_text_contents()
if contents.startswith('!<arch>\n') or contents.startswith('\177ELF'):
# An archive or ELF file is not a linker script.
return []
comment_pattern = re.compile(r'/\*.*?\*/', re.DOTALL | re.MULTILINE)
def remove_comments(text):
return re.sub(comment_pattern, '', text)
tokens = remove_comments(contents).split()
libs = []
while tokens:
token = tokens.pop()
if token.startswith('OUTPUT_FORMAT('):
pass
elif token == 'OUTPUT_FORMAT':
# Swallow the next three tokens: '(', 'xyz', ')'
del tokens[0:2]
elif token in ['(', ')', 'INPUT', 'GROUP', 'AS_NEEDED']:
pass
else:
libs.append(token)
# Find those items in the library path, ignoring ones we fail to find.
found = [SCons.Node.FS.find_file(lib, libpath) for lib in libs]
return [lib for lib in found if lib is not None]
# This is a modified copy of the class TempFileMunge in
# third_party/scons-2.0.1/engine/SCons/Platform/__init__.py.
# It differs in using quote_for_at_file (below) in place of
# SCons.Subst.quote_spaces.
class NaClTempFileMunge(object):
"""A callable class. You can set an Environment variable to this,
then call it with a string argument, then it will perform temporary
file substitution on it. This is used to circumvent the long command
line limitation.
Example usage:
env["TEMPFILE"] = TempFileMunge
env["LINKCOM"] = "${TEMPFILE('$LINK $TARGET $SOURCES')}"
By default, the name of the temporary file used begins with a
prefix of '@'. This may be configred for other tool chains by
setting '$TEMPFILEPREFIX'.
env["TEMPFILEPREFIX"] = '-@' # diab compiler
env["TEMPFILEPREFIX"] = '-via' # arm tool chain
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature):
if for_signature:
# If we're being called for signature calculation, it's
# because we're being called by the string expansion in
# Subst.py, which has the logic to strip any $( $) that
# may be in the command line we squirreled away. So we
# just return the raw command line and let the upper
# string substitution layers do their thing.
return self.cmd
# Now we're actually being called because someone is actually
# going to try to execute the command, so we have to do our
# own expansion.
cmd = env.subst_list(self.cmd, SCons.Subst.SUBST_CMD, target, source)[0]
try:
maxline = int(env.subst('$MAXLINELENGTH'))
except ValueError:
maxline = 2048
length = 0
for c in cmd:
length += len(c)
if length <= maxline:
return self.cmd
# We do a normpath because mktemp() has what appears to be
# a bug in Windows that will use a forward slash as a path
# delimiter. Windows's link mistakes that for a command line
# switch and barfs.
#
# We use the .lnk suffix for the benefit of the Phar Lap
# linkloc linker, which likes to append an .lnk suffix if
# none is given.
(fd, tmp) = tempfile.mkstemp('.lnk', text=True)
native_tmp = SCons.Util.get_native_path(os.path.normpath(tmp))
if env['SHELL'] and env['SHELL'] == 'sh':
# The sh shell will try to escape the backslashes in the
# path, so unescape them.
native_tmp = native_tmp.replace('\\', r'\\\\')
# In Cygwin, we want to use rm to delete the temporary
# file, because del does not exist in the sh shell.
rm = env.Detect('rm') or 'del'
else:
# Don't use 'rm' if the shell is not sh, because rm won't
# work with the Windows shells (cmd.exe or command.com) or
# Windows path names.
rm = 'del'
prefix = env.subst('$TEMPFILEPREFIX')
if not prefix:
prefix = '@'
# The @file is sometimes handled by a GNU tool itself, using
# the libiberty/argv.c code, and sometimes handled implicitly
# by Cygwin before the tool's own main even sees it. These
# two treat the contents differently, so there is no single
# perfect way to quote. The libiberty @file code uses a very
# regular scheme: a \ in any context is always swallowed and
# quotes the next character, whatever it is; '...' or "..."
# quote whitespace in ... and the outer quotes are swallowed.
# The Cygwin @file code uses a vaguely similar scheme, but its
# treatment of \ is much less consistent: a \ outside a quoted
# string is never stripped, and a \ inside a quoted string is
# only stripped when it quoted something (Cygwin's definition
# of "something" here is nontrivial). In our uses the only
# appearances of \ we expect are in Windows-style file names.
# Fortunately, an extra doubling of \\ that doesn't get
# stripped is harmless in the middle of a file name.
def quote_for_at_file(s):
s = str(s)
if ' ' in s or '\t' in s:
return '"' + re.sub('([ \t"])', r'\\\1', s) + '"'
return s.replace('\\', '\\\\')
args = list(map(quote_for_at_file, cmd[1:]))
os.write(fd, " ".join(args) + "\n")
os.close(fd)
# XXX Using the SCons.Action.print_actions value directly
# like this is bogus, but expedient. This class should
# really be rewritten as an Action that defines the
# __call__() and strfunction() methods and lets the
# normal action-execution logic handle whether or not to
# print/execute the action. The problem, though, is all
# of that is decided before we execute this method as
# part of expanding the $TEMPFILE construction variable.
# Consequently, refactoring this will have to wait until
# we get more flexible with allowing Actions to exist
# independently and get strung together arbitrarily like
# Ant tasks. In the meantime, it's going to be more
# user-friendly to not let obsession with architectural
# purity get in the way of just being helpful, so we'll
# reach into SCons.Action directly.
if SCons.Action.print_actions:
print("Using tempfile " + native_tmp + " for command line:\n" +
str(cmd[0]) + " " + " ".join(args))
return [ cmd[0], prefix + native_tmp + '\n' + rm, native_tmp ]
def generate(env):
"""SCons entry point for this tool.
Args:
env: The SCons environment in question.
NOTE: SCons requires the use of this name, which fails lint.
"""
# make these methods to the top level scons file
env.AddMethod(ValidateSdk)
env.AddMethod(AddBiasForPNaCl)
env.AddMethod(PNaClForceNative)
env.AddMethod(PNaClGetNNaClEnv)
# Invoke the various unix tools that the NativeClient SDK resembles.
env.Tool('g++')
env.Tool('gcc')
env.Tool('gnulink')
env.Tool('ar')
env.Tool('as')
if env.Bit('pnacl_generate_pexe'):
suffix = '.nonfinal.pexe'
else:
suffix = '.nexe'
env.Replace(
COMPONENT_LINKFLAGS=[''],
COMPONENT_LIBRARY_LINK_SUFFIXES=['.pso', '.so', '.a'],
_RPATH='',
COMPONENT_LIBRARY_DEBUG_SUFFIXES=[],
PROGSUFFIX=suffix,
# adding BASE_ AND EXTRA_ flags to common command lines
# The suggested usage pattern is:
# BASE_XXXFLAGS can only be set in this file
# EXTRA_XXXFLAGS can only be set in a ComponentXXX call
# NOTE: we also have EXTRA_LIBS which is handles separately in
# site_scons/site_tools/component_builders.py
# NOTE: the command lines were gleaned from:
# * ../third_party/scons-2.0.1/engine/SCons/Tool/cc.py
# * ../third_party/scons-2.0.1/engine/SCons/Tool/c++.py
# * etc.
CCCOM='$CC $BASE_CFLAGS $CFLAGS $EXTRA_CFLAGS ' +
'$CCFLAGS $_CCCOMCOM -c -o $TARGET $SOURCES',
SHCCCOM='$SHCC $BASE_CFLAGS $SHCFLAGS $EXTRA_CFLAGS ' +
'$SHCCFLAGS $_CCCOMCOM -c -o $TARGET $SOURCES',
CXXCOM='$CXX $BASE_CXXFLAGS $CXXFLAGS $EXTRA_CXXFLAGS ' +
'$CCFLAGS $_CCCOMCOM -c -o $TARGET $SOURCES',
SHCXXCOM='$SHCXX $BASE_CXXFLAGS $SHCXXFLAGS $EXTRA_CXXFLAGS ' +
'$SHCCFLAGS $_CCCOMCOM -c -o $TARGET $SOURCES',
LINKCOM='$LINK $BASE_LINKFLAGS $LINKFLAGS $EXTRA_LINKFLAGS ' +
'$SOURCES $_LIBDIRFLAGS $_LIBFLAGS -o $TARGET',
SHLINKCOM='$SHLINK $BASE_LINKFLAGS $SHLINKFLAGS $EXTRA_LINKFLAGS ' +
'$SOURCES $_LIBDIRFLAGS $_LIBFLAGS -o $TARGET',
ASCOM='$AS $BASE_ASFLAGS $ASFLAGS $EXTRA_ASFLAGS -o $TARGET $SOURCES',
ASPPCOM='$ASPP $BASE_ASPPFLAGS $ASPPFLAGS $EXTRA_ASPPFLAGS ' +
'$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES',
# Strip doesn't seem to be a first-class citizen in SCons country,
# so we have to add these *COM, *COMSTR manually.
# Note: it appears we cannot add this in component_setup.py
STRIPFLAGS=['--strip-all'],
STRIPCOM='${STRIP} ${STRIPFLAGS}',
TRANSLATECOM='${TRANSLATE} ${TRANSLATEFLAGS} ${SOURCES} -o ${TARGET}',
PNACLFINALIZEFLAGS=[],
PNACLFINALIZECOM='${PNACLFINALIZE} ${PNACLFINALIZEFLAGS} ' +
'${SOURCES} -o ${TARGET}',
)
# Windows has a small limit on the command line size. The linking and AR
# commands can get quite large. So bring in the SCons machinery to put
# most of a command line into a temporary file and pass it with
# @filename, which works with gcc.
if env['PLATFORM'] in ['win32', 'cygwin']:
env['TEMPFILE'] = NaClTempFileMunge
for com in ['LINKCOM', 'SHLINKCOM', 'ARCOM']:
env[com] = "${TEMPFILE('%s')}" % env[com]
# Get root of the SDK.
root = env.GetToolchainDir()
# if bitcode=1 use pnacl toolchain
if env.Bit('bitcode'):
_SetEnvForPnacl(env, root)
elif env.Bit('built_elsewhere'):
def FakeInstall(dest, source, env):
print('Not installing', dest)
_StubOutEnvToolsForBuiltElsewhere(env)
env.Replace(INSTALL=FakeInstall)
else:
_SetEnvForNativeSdk(env, root)
if (env.Bit('bitcode') or env.Bit('nacl_clang')) and env.Bit('build_x86'):
# Get GDB from the nacl-gcc glibc toolchain even when using PNaCl.
# TODO(mseaborn): We really want the nacl-gdb binary to be in a
# separate tarball from the nacl-gcc toolchain, then this step
# will not be necessary.
# See http://code.google.com/p/nativeclient/issues/detail?id=2773
temp_env = env.Clone()
temp_env.ClearBits('bitcode', 'nacl_clang')
temp_env.SetBits('nacl_glibc')
temp_root = temp_env.GetToolchainDir()
_SetEnvForNativeSdk(temp_env, temp_root)
env.Replace(GDB=temp_env['GDB'])
env.Prepend(LIBPATH='${NACL_SDK_LIB}')
# Install our scanner for (potential) linker scripts.
# It applies to "source" files ending in .a or .so.
# Dependency files it produces are to be found in ${LIBPATH}.
# It is applied recursively to those dependencies in case
# some of them are linker scripts too.
ldscript_scanner = SCons.Scanner.Base(
function=ScanLinkerScript,
skeys=['.a', '.so', '.pso'],
path_function=SCons.Scanner.FindPathDirs('LIBPATH'),
recursive=True
)
env.Append(SCANNERS=ldscript_scanner)
# Scons tests can check this version number to decide whether to
# enable tests for toolchain bug fixes or new features. See
# description in pnacl/build.sh.
if 'toolchain_feature_version' in SCons.Script.ARGUMENTS:
version = int(SCons.Script.ARGUMENTS['toolchain_feature_version'])
else:
version_file = os.path.join(root, 'FEATURE_VERSION')
# There is no pnacl_newlib toolchain on ARM, only a pnacl_translator, so
# use that if necessary. Otherwise use it if we are doing sandboxed
# translation.
if not os.path.exists(version_file) or env.Bit('use_sandboxed_translator'):
version_file = os.path.join(os.path.dirname(root), 'pnacl_translator',
'FEATURE_VERSION')
if os.path.exists(version_file):
with open(version_file, 'r') as fh:
version = int(fh.read())
else:
version = 0
env.Replace(TOOLCHAIN_FEATURE_VERSION=version)
| bsd-3-clause |
e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/vars/manager.py | 1 | 29075 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
from collections import defaultdict, MutableMapping
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound
from ansible.inventory.host import Host
from ansible.inventory.helpers import sort_groups, get_group_vars
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems, string_types, text_type
from ansible.plugins.loader import lookup_loader, vars_loader
from ansible.plugins.cache import FactCache
from ansible.template import Templar
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.vars import combine_vars
from ansible.utils.unsafe_proxy import wrap_var
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def preprocess_vars(a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [a]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
def strip_internal_keys(dirty):
'''
All keys stating with _ansible_ are internal, so create a copy of the 'dirty' dict
and remove them from the clean one before returning it
'''
clean = dirty.copy()
for k in dirty.keys():
if isinstance(k, string_types) and k.startswith('_ansible_'):
del clean[k]
elif isinstance(dirty[k], dict):
clean[k] = strip_internal_keys(dirty[k])
return clean
def remove_internal_keys(data):
'''
More nuanced version of strip_internal_keys
'''
for key in list(data.keys()):
if (key.startswith('_ansible_') and key != '_ansible_parsed') or key in C.INTERNAL_RESULT_KEYS:
display.warning("Removed unexpected internal key in module return: %s = %s" % (key, data[key]))
del data[key]
# remove bad/empty internal keys
for key in ['warnings', 'deprecations']:
if key in data and not data[key]:
del data[key]
class VariableManager:
_ALLOWED = frozenset(['plugins_by_group', 'groups_plugins_play', 'groups_plugins_inventory', 'groups_inventory',
'all_plugins_play', 'all_plugins_inventory', 'all_inventory'])
def __init__(self, loader=None, inventory=None):
self._nonpersistent_fact_cache = defaultdict(dict)
self._vars_cache = defaultdict(dict)
self._extra_vars = defaultdict(dict)
self._host_vars_files = defaultdict(dict)
self._group_vars_files = defaultdict(dict)
self._inventory = inventory
self._loader = loader
self._hostvars = None
self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
self._options_vars = defaultdict(dict)
# bad cache plugin is not fatal error
try:
self._fact_cache = FactCache()
except AnsibleError as e:
display.warning(to_native(e))
# fallback to a dict as in memory cache
self._fact_cache = {}
def __getstate__(self):
data = dict(
fact_cache=self._fact_cache,
np_fact_cache=self._nonpersistent_fact_cache,
vars_cache=self._vars_cache,
extra_vars=self._extra_vars,
host_vars_files=self._host_vars_files,
group_vars_files=self._group_vars_files,
omit_token=self._omit_token,
options_vars=self._options_vars,
inventory=self._inventory,
)
return data
def __setstate__(self, data):
self._fact_cache = data.get('fact_cache', defaultdict(dict))
self._nonpersistent_fact_cache = data.get('np_fact_cache', defaultdict(dict))
self._vars_cache = data.get('vars_cache', defaultdict(dict))
self._extra_vars = data.get('extra_vars', dict())
self._host_vars_files = data.get('host_vars_files', defaultdict(dict))
self._group_vars_files = data.get('group_vars_files', defaultdict(dict))
self._omit_token = data.get('omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest())
self._inventory = data.get('inventory', None)
self._options_vars = data.get('options_vars', dict())
@property
def extra_vars(self):
''' ensures a clean copy of the extra_vars are made '''
return self._extra_vars.copy()
@extra_vars.setter
def extra_vars(self, value):
''' ensures a clean copy of the extra_vars are used to set the value '''
assert isinstance(value, MutableMapping), "the type of 'value' for extra_vars should be a MutableMapping, but is a %s" % type(value)
self._extra_vars = value.copy()
def set_inventory(self, inventory):
self._inventory = inventory
@property
def options_vars(self):
''' ensures a clean copy of the options_vars are made '''
return self._options_vars.copy()
@options_vars.setter
def options_vars(self, value):
''' ensures a clean copy of the options_vars are used to set the value '''
assert isinstance(value, dict), "the type of 'value' for options_vars should be a dict, but is a %s" % type(value)
self._options_vars = value.copy()
def _preprocess_vars(self, a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [a]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True):
'''
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
sets of variables being returned due to the additional context).
The order of precedence is:
- play->roles->get_default_vars (if there is a play context)
- group_vars_files[host] (if there is a host context)
- host_vars_files[host] (if there is a host context)
- host->get_vars (if there is a host context)
- fact_cache[host] (if there is a host context)
- play vars (if there is a play context)
- play vars_files (if there's no host context, ignore
file names that cannot be templated)
- task->get_vars (if there is a task context)
- vars_cache[host] (if there is a host context)
- extra vars
'''
display.debug("in VariableManager get_vars()")
all_vars = dict()
magic_variables = self._get_magic_variables(
play=play,
host=host,
task=task,
include_hostvars=include_hostvars,
include_delegate_to=include_delegate_to,
)
if play:
# first we compile any vars specified in defaults/main.yml
# for all roles within the specified play
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_default_vars())
basedirs = []
if task:
# set basedirs
if C.PLAYBOOK_VARS_ROOT == 'all': # should be default
basedirs = task.get_search_path()
elif C.PLAYBOOK_VARS_ROOT == 'top': # only option pre 2.3
basedirs = [self._loader.get_basedir()]
elif C.PLAYBOOK_VARS_ROOT in ('bottom', 'playbook_dir'): # only option in 2.4.0
basedirs = [task.get_search_path()[0]]
else:
raise AnsibleError('Unkown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT)
# if we have a task in this context, and that task has a role, make
# sure it sees its defaults above any other roles, as we previously
# (v1) made sure each task had a copy of its roles default vars
if task._role is not None and (play or task.action == 'include_role'):
all_vars = combine_vars(all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain()))
if host:
# THE 'all' group and the rest of groups for a host, used below
all_group = self._inventory.groups.get('all')
host_groups = sort_groups([g for g in host.get_groups() if g.name not in ['all']])
def _get_plugin_vars(plugin, path, entities):
data = {}
try:
data = plugin.get_vars(self._loader, path, entities)
except AttributeError:
try:
for entity in entities:
if isinstance(entity, Host):
data.update(plugin.get_host_vars(entity.name))
else:
data.update(plugin.get_group_vars(entity.name))
except AttributeError:
if hasattr(plugin, 'run'):
raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
else:
raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
return data
# internal fuctions that actually do the work
def _plugins_inventory(entities):
''' merges all entities by inventory source '''
data = {}
for inventory_dir in self._inventory._sources:
if ',' in inventory_dir: # skip host lists
continue
elif not os.path.isdir(inventory_dir): # always pass 'inventory directory'
inventory_dir = os.path.dirname(inventory_dir)
for plugin in vars_loader.all():
data = combine_vars(data, _get_plugin_vars(plugin, inventory_dir, entities))
return data
def _plugins_play(entities):
''' merges all entities adjacent to play '''
data = {}
for plugin in vars_loader.all():
for path in basedirs:
data = combine_vars(data, _get_plugin_vars(plugin, path, entities))
return data
# configurable functions that are sortable via config, rememer to add to _ALLOWED if expanding this list
def all_inventory():
return all_group.get_vars()
def all_plugins_inventory():
return _plugins_inventory([all_group])
def all_plugins_play():
return _plugins_play([all_group])
def groups_inventory():
''' gets group vars from inventory '''
return get_group_vars(host_groups)
def groups_plugins_inventory():
''' gets plugin sources from inventory for groups '''
return _plugins_inventory(host_groups)
def groups_plugins_play():
''' gets plugin sources from play for groups '''
return _plugins_play(host_groups)
def plugins_by_groups():
'''
merges all plugin sources by group,
This should be used instead, NOT in combination with the other groups_plugins* functions
'''
data = {}
for group in host_groups:
data[group] = combine_vars(data[group], _plugins_inventory(group))
data[group] = combine_vars(data[group], _plugins_play(group))
return data
# Merge groups as per precedence config
# only allow to call the functions we want exposed
for entry in C.VARIABLE_PRECEDENCE:
if entry in self._ALLOWED:
display.debug('Calling %s to load vars for %s' % (entry, host.name))
all_vars = combine_vars(all_vars, locals()[entry]())
else:
display.warning('Ignoring unknown variable precedence entry: %s' % (entry))
# host vars, from inventory, inventory adjacent and play adjacent via plugins
all_vars = combine_vars(all_vars, host.get_vars())
all_vars = combine_vars(all_vars, _plugins_inventory([host]))
all_vars = combine_vars(all_vars, _plugins_play([host]))
# finally, the facts caches for this host, if it exists
try:
host_facts = wrap_var(self._fact_cache.get(host.name, {}))
# push facts to main namespace
all_vars = combine_vars(all_vars, host_facts)
except KeyError:
pass
if play:
all_vars = combine_vars(all_vars, play.get_vars())
for vars_file_item in play.get_vars_files():
# create a set of temporary vars here, which incorporate the extra
# and magic vars so we can properly template the vars_files entries
temp_vars = combine_vars(all_vars, self._extra_vars)
temp_vars = combine_vars(temp_vars, magic_variables)
templar = Templar(loader=self._loader, variables=temp_vars)
# we assume each item in the list is itself a list, as we
# support "conditional includes" for vars_files, which mimics
# the with_first_found mechanism.
vars_file_list = vars_file_item
if not isinstance(vars_file_list, list):
vars_file_list = [vars_file_list]
# now we iterate through the (potential) files, and break out
# as soon as we read one from the list. If none are found, we
# raise an error, which is silently ignored at this point.
try:
for vars_file in vars_file_list:
vars_file = templar.template(vars_file)
try:
data = preprocess_vars(self._loader.load_from_file(vars_file, unsafe=True))
if data is not None:
for item in data:
all_vars = combine_vars(all_vars, item)
break
except AnsibleFileNotFound:
# we continue on loader failures
continue
except AnsibleParserError:
raise
else:
# if include_delegate_to is set to False, we ignore the missing
# vars file here because we're working on a delegated host
if include_delegate_to:
raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item)
except (UndefinedError, AnsibleUndefinedVariable):
if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None:
raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item,
obj=vars_file_item)
else:
# we do not have a full context here, and the missing variable could be because of that
# so just show a warning and continue
display.vvv("skipping vars_file '%s' due to an undefined variable" % vars_file_item)
continue
display.vvv("Read vars_file '%s'" % vars_file_item)
# By default, we now merge in all vars from all roles in the play,
# unless the user has disabled this via a config option
if not C.DEFAULT_PRIVATE_ROLE_VARS:
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_vars(include_params=False))
# next, we merge in the vars from the role, which will specifically
# follow the role dependency chain, and then we merge in the tasks
# vars (which will look at parent blocks/task includes)
if task:
if task._role:
all_vars = combine_vars(all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False))
all_vars = combine_vars(all_vars, task.get_vars())
# next, we merge in the vars cache (include vars) and nonpersistent
# facts cache (set_fact/register), in that order
if host:
all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()))
# next, we merge in role params and task include params
if task:
if task._role:
all_vars = combine_vars(all_vars, task._role.get_role_params(task.get_dep_chain()))
# special case for include tasks, where the include params
# may be specified in the vars field for the task, which should
# have higher precedence than the vars/np facts above
all_vars = combine_vars(all_vars, task.get_include_params())
# extra vars
all_vars = combine_vars(all_vars, self._extra_vars)
# magic variables
all_vars = combine_vars(all_vars, magic_variables)
# special case for the 'environment' magic variable, as someone
# may have set it as a variable and we don't want to stomp on it
if task:
all_vars['environment'] = task.environment
# if we have a task and we're delegating to another host, figure out the
# variables for that host now so we don't have to rely on hostvars later
if task and task.delegate_to is not None and include_delegate_to:
all_vars['ansible_delegated_vars'] = self._get_delegated_vars(play, task, all_vars)
# 'vars' magic var
if task or play:
# has to be copy, otherwise recursive ref
all_vars['vars'] = all_vars.copy()
display.debug("done with get_vars()")
return all_vars
def _get_magic_variables(self, play, host, task, include_hostvars, include_delegate_to):
'''
Returns a dictionary of so-called "magic" variables in Ansible,
which are special variables we set internally for use.
'''
variables = {}
variables['playbook_dir'] = os.path.abspath(self._loader.get_basedir())
variables['ansible_playbook_python'] = sys.executable
if play:
variables['role_names'] = [r._role_name for r in play.roles]
if task:
if task._role:
variables['role_name'] = task._role.get_name()
variables['role_path'] = task._role._role_path
variables['role_uuid'] = text_type(task._role._uuid)
if self._inventory is not None:
variables['groups'] = self._inventory.get_groups_dict()
if play:
templar = Templar(loader=self._loader)
if templar.is_template(play.hosts):
pattern = 'all'
else:
pattern = play.hosts or 'all'
# add the list of hosts in the play, as adjusted for limit/filters
variables['ansible_play_hosts_all'] = [x.name for x in self._inventory.get_hosts(pattern=pattern, ignore_restrictions=True)]
variables['ansible_play_hosts'] = [x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts]
variables['ansible_play_batch'] = [x.name for x in self._inventory.get_hosts() if x.name not in play._removed_hosts]
# DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch,
# however this would take work in the templating engine, so for now we'll add both
variables['play_hosts'] = variables['ansible_play_batch']
# the 'omit' value alows params to be left out if the variable they are based on is undefined
variables['omit'] = self._omit_token
# Set options vars
for option, option_value in iteritems(self._options_vars):
variables[option] = option_value
if self._hostvars is not None and include_hostvars:
variables['hostvars'] = self._hostvars
return variables
def _get_delegated_vars(self, play, task, existing_variables):
# we unfortunately need to template the delegate_to field here,
# as we're fetching vars before post_validate has been called on
# the task that has been passed in
vars_copy = existing_variables.copy()
templar = Templar(loader=self._loader, variables=vars_copy)
items = []
if task.loop is not None:
if task.loop in lookup_loader:
try:
loop_terms = listify_lookup_plugin_terms(terms=task.loop_args, templar=templar,
loader=self._loader, fail_on_undefined=True, convert_bare=False)
items = lookup_loader.get(task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=vars_copy)
except AnsibleUndefinedVariable:
# This task will be skipped later due to this, so we just setup
# a dummy array for the later code so it doesn't fail
items = [None]
else:
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % task.loop)
else:
items = [None]
delegated_host_vars = dict()
for item in items:
# update the variables with the item value for templating, in case we need it
if item is not None:
vars_copy['item'] = item
templar.set_available_variables(vars_copy)
delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False)
if delegated_host_name is None:
raise AnsibleError(message="Undefined delegate_to host for task:", obj=task._ds)
if delegated_host_name in delegated_host_vars:
# no need to repeat ourselves, as the delegate_to value
# does not appear to be tied to the loop item variable
continue
# a dictionary of variables to use if we have to create a new host below
# we set the default port based on the default transport here, to make sure
# we use the proper default for windows
new_port = C.DEFAULT_REMOTE_PORT
if C.DEFAULT_TRANSPORT == 'winrm':
new_port = 5986
new_delegated_host_vars = dict(
ansible_delegated_host=delegated_host_name,
ansible_host=delegated_host_name, # not redundant as other sources can change ansible_host
ansible_port=new_port,
ansible_user=C.DEFAULT_REMOTE_USER,
ansible_connection=C.DEFAULT_TRANSPORT,
)
# now try to find the delegated-to host in inventory, or failing that,
# create a new host on the fly so we can fetch variables for it
delegated_host = None
if self._inventory is not None:
delegated_host = self._inventory.get_host(delegated_host_name)
# try looking it up based on the address field, and finally
# fall back to creating a host on the fly to use for the var lookup
if delegated_host is None:
if delegated_host_name in C.LOCALHOST:
delegated_host = self._inventory.localhost
else:
for h in self._inventory.get_hosts(ignore_limits=True, ignore_restrictions=True):
# check if the address matches, or if both the delegated_to host
# and the current host are in the list of localhost aliases
if h.address == delegated_host_name:
delegated_host = h
break
else:
delegated_host = Host(name=delegated_host_name)
delegated_host.vars = combine_vars(delegated_host.vars, new_delegated_host_vars)
else:
delegated_host = Host(name=delegated_host_name)
delegated_host.vars = combine_vars(delegated_host.vars, new_delegated_host_vars)
# now we go fetch the vars for the delegated-to host and save them in our
# master dictionary of variables to be used later in the TaskExecutor/PlayContext
delegated_host_vars[delegated_host_name] = self.get_vars(
play=play,
host=delegated_host,
task=task,
include_delegate_to=False,
include_hostvars=False,
)
return delegated_host_vars
def clear_facts(self, hostname):
'''
Clears the facts for a host
'''
if hostname in self._fact_cache:
del self._fact_cache[hostname]
def set_host_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict), "the type of 'facts' to set for host_facts should be a dict but is a %s" % type(facts)
if host.name not in self._fact_cache:
self._fact_cache[host.name] = facts
else:
try:
self._fact_cache.update(host.name, facts)
except KeyError:
self._fact_cache[host.name] = facts
def set_nonpersistent_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict), "the type of 'facts' to set for nonpersistent_facts should be a dict but is a %s" % type(facts)
if host.name not in self._nonpersistent_fact_cache:
self._nonpersistent_fact_cache[host.name] = facts
else:
try:
self._nonpersistent_fact_cache[host.name].update(facts)
except KeyError:
self._nonpersistent_fact_cache[host.name] = facts
def set_host_variable(self, host, varname, value):
'''
Sets a value in the vars_cache for a host.
'''
host_name = host.get_name()
if host_name not in self._vars_cache:
self._vars_cache[host_name] = dict()
if varname in self._vars_cache[host_name] and isinstance(self._vars_cache[host_name][varname], MutableMapping) and isinstance(value, MutableMapping):
self._vars_cache[host_name] = combine_vars(self._vars_cache[host_name], {varname: value})
else:
self._vars_cache[host_name][varname] = value
| bsd-3-clause |
nderituedwin/django-user-accounts | account/utils.py | 11 | 3918 | from __future__ import unicode_literals
import functools
try:
from urllib.parse import urlparse, urlunparse
except ImportError: # python 2
from urlparse import urlparse, urlunparse
from django.core import urlresolvers
from django.core.exceptions import SuspiciousOperation
from django.http import HttpResponseRedirect, QueryDict
from django.contrib.auth import get_user_model
from account.conf import settings
def get_user_lookup_kwargs(kwargs):
result = {}
username_field = getattr(get_user_model(), "USERNAME_FIELD", "username")
for key, value in kwargs.items():
result[key.format(username=username_field)] = value
return result
def default_redirect(request, fallback_url, **kwargs):
redirect_field_name = kwargs.get("redirect_field_name", "next")
next_url = request.POST.get(redirect_field_name, request.GET.get(redirect_field_name))
if not next_url:
# try the session if available
if hasattr(request, "session"):
session_key_value = kwargs.get("session_key_value", "redirect_to")
if session_key_value in request.session:
next_url = request.session[session_key_value]
del request.session[session_key_value]
is_safe = functools.partial(
ensure_safe_url,
allowed_protocols=kwargs.get("allowed_protocols"),
allowed_host=request.get_host()
)
if next_url and is_safe(next_url):
return next_url
else:
try:
fallback_url = urlresolvers.reverse(fallback_url)
except urlresolvers.NoReverseMatch:
if callable(fallback_url):
raise
if "/" not in fallback_url and "." not in fallback_url:
raise
# assert the fallback URL is safe to return to caller. if it is
# determined unsafe then raise an exception as the fallback value comes
# from the a source the developer choose.
is_safe(fallback_url, raise_on_fail=True)
return fallback_url
def user_display(user):
return settings.ACCOUNT_USER_DISPLAY(user)
def ensure_safe_url(url, allowed_protocols=None, allowed_host=None, raise_on_fail=False):
if allowed_protocols is None:
allowed_protocols = ["http", "https"]
parsed = urlparse(url)
# perform security checks to ensure no malicious intent
# (i.e., an XSS attack with a data URL)
safe = True
if parsed.scheme and parsed.scheme not in allowed_protocols:
if raise_on_fail:
raise SuspiciousOperation("Unsafe redirect to URL with protocol '{0}'".format(parsed.scheme))
safe = False
if allowed_host and parsed.netloc and parsed.netloc != allowed_host:
if raise_on_fail:
raise SuspiciousOperation("Unsafe redirect to URL not matching host '{0}'".format(allowed_host))
safe = False
return safe
def handle_redirect_to_login(request, **kwargs):
login_url = kwargs.get("login_url")
redirect_field_name = kwargs.get("redirect_field_name")
next_url = kwargs.get("next_url")
if login_url is None:
login_url = settings.ACCOUNT_LOGIN_URL
if next_url is None:
next_url = request.get_full_path()
try:
login_url = urlresolvers.reverse(login_url)
except urlresolvers.NoReverseMatch:
if callable(login_url):
raise
if "/" not in login_url and "." not in login_url:
raise
url_bits = list(urlparse(login_url))
if redirect_field_name:
querystring = QueryDict(url_bits[4], mutable=True)
querystring[redirect_field_name] = next_url
url_bits[4] = querystring.urlencode(safe="/")
return HttpResponseRedirect(urlunparse(url_bits))
def get_form_data(form, field_name, default=None):
if form.prefix:
key = "-".join([form.prefix, field_name])
else:
key = field_name
return form.data.get(key, default)
| mit |
chrislit/abydos | abydos/distance/_rouge_s.py | 1 | 2866 | # Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._rouge_s.
Rouge-S similarity
"""
from typing import Any
from ._distance import _Distance
from ..tokenizer import QSkipgrams
from ..util._ncr import _ncr
__all__ = ['RougeS']
class RougeS(_Distance):
r"""Rouge-S similarity.
Rouge-S similarity :cite:`Lin:2004`, operating on character-level skipgrams
.. versionadded:: 0.4.0
"""
def __init__(self, qval: int = 2, **kwargs: Any) -> None:
"""Initialize RougeS instance.
Parameters
----------
**kwargs
Arbitrary keyword arguments
.. versionadded:: 0.4.0
"""
super(RougeS, self).__init__(**kwargs)
self._qval = qval
self._tokenizer = QSkipgrams(qval=qval, start_stop='')
def sim(self, src: str, tar: str, beta: float = 8) -> float:
"""Return the Rouge-S similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
beta : int or float
A weighting factor to prejudice similarity towards src
Returns
-------
float
Rouge-S similarity
Examples
--------
>>> cmp = RougeS()
>>> cmp.sim('cat', 'hat')
0.3333333333333333
>>> cmp.sim('Niall', 'Neil')
0.30185758513931893
>>> cmp.sim('aluminum', 'Catalan')
0.10755653612796467
>>> cmp.sim('ATCG', 'TAGC')
0.6666666666666666
.. versionadded:: 0.4.0
"""
if src == tar:
return 1.0
qsg_src = self._tokenizer.tokenize(src).get_counter()
qsg_tar = self._tokenizer.tokenize(tar).get_counter()
intersection = sum((qsg_src & qsg_tar).values())
if intersection:
r_skip = intersection / _ncr(len(src), self._qval)
p_skip = intersection / _ncr(len(tar), self._qval)
else:
return 0.0
beta_sq = beta * beta
return (1 + beta_sq) * r_skip * p_skip / (r_skip + beta_sq * p_skip)
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
Edraak/edx-platform | lms/envs/sauce.py | 67 | 2208 | """
This config file extends the test environment configuration
so that we can run the lettuce acceptance tests on SauceLabs.
"""
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import os
PORTS = [
2000, 2001, 2020, 2109, 2222, 2310, 3000, 3001,
3030, 3210, 3333, 4000, 4001, 4040, 4321, 4502, 4503,
5050, 5555, 5432, 6060, 6666, 6543, 7000, 7070, 7774,
7777, 8003, 8031, 8080, 8081, 8765, 8888,
9080, 9090, 9876, 9999, 49221, 55001
]
DESIRED_CAPABILITIES = {
'chrome': DesiredCapabilities.CHROME,
'internetexplorer': DesiredCapabilities.INTERNETEXPLORER,
'firefox': DesiredCapabilities.FIREFOX,
'opera': DesiredCapabilities.OPERA,
'iphone': DesiredCapabilities.IPHONE,
'ipad': DesiredCapabilities.IPAD,
'safari': DesiredCapabilities.SAFARI,
'android': DesiredCapabilities.ANDROID
}
# All keys must be URL and JSON encodable
# PLATFORM-BROWSER-VERSION_NUM-DEVICE
ALL_CONFIG = {
'Linux-chrome--': ['Linux', 'chrome', '', ''],
'Windows 8-chrome--': ['Windows 8', 'chrome', '', ''],
'Windows 7-chrome--': ['Windows 7', 'chrome', '', ''],
'Windows XP-chrome--': ['Windows XP', 'chrome', '', ''],
'OS X 10.8-chrome--': ['OS X 10.8', 'chrome', '', ''],
'OS X 10.6-chrome--': ['OS X 10.6', 'chrome', '', ''],
'Linux-firefox-23-': ['Linux', 'firefox', '23', ''],
'Windows 8-firefox-23-': ['Windows 8', 'firefox', '23', ''],
'Windows 7-firefox-23-': ['Windows 7', 'firefox', '23', ''],
'Windows XP-firefox-23-': ['Windows XP', 'firefox', '23', ''],
'OS X 10.8-safari-6-': ['OS X 10.8', 'safari', '6', ''],
'Windows 8-internetexplorer-10-': ['Windows 8', 'internetexplorer', '10', ''],
}
SAUCE_INFO = ALL_CONFIG.get(os.environ.get('SAUCE_INFO', 'Linux-chrome--'))
# Information needed to utilize Sauce Labs.
SAUCE = {
'USERNAME': os.environ.get('SAUCE_USER_NAME'),
'ACCESS_ID': os.environ.get('SAUCE_API_KEY'),
'PLATFORM': SAUCE_INFO[0],
'BROWSER': DESIRED_CAPABILITIES.get(SAUCE_INFO[1]),
'VERSION': SAUCE_INFO[2],
'DEVICE': SAUCE_INFO[3],
'SESSION': 'Jenkins Acceptance Tests',
'BUILD': os.environ.get('BUILD_DISPLAY_NAME', 'LETTUCE TESTS'),
}
| agpl-3.0 |
susundberg/arduino-simple-unittest | src/tools/cpp/nonvirtual_dtors.py | 2 | 2101 | # Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print classes which have a virtual method and non-virtual destructor."""
from __future__ import print_function
from __future__ import unicode_literals
from . import ast
from . import metrics
__author__ = '[email protected] (Neal Norwitz)'
def _find_warnings(filename, source, ast_list):
count = 0
for node in ast_list:
if isinstance(node, ast.Class) and node.body:
class_node = node
has_virtuals = False
for node in node.body:
if isinstance(node, ast.Class) and node.body:
_find_warnings(filename, source, [node])
elif (isinstance(node, ast.Function) and
node.modifiers & ast.FUNCTION_VIRTUAL):
has_virtuals = True
if node.modifiers & ast.FUNCTION_DTOR:
break
else:
if has_virtuals and not class_node.bases:
lines = metrics.Metrics(source)
print(
'%s:%d' % (
filename,
lines.get_line_number(
class_node.start)),
end=' ')
print("'{}' has virtual methods without a virtual "
'dtor'.format(class_node.name))
count += 1
return count
def run(filename, source, entire_ast, include_paths, quiet):
return _find_warnings(filename, source, entire_ast)
| mit |
UserXXX/CuBolt | cubolt/exceptions.py | 1 | 1551 | # The MIT License (MIT)
#
# Copyright (c) 2014-2015 Bjoern Lange
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This file is part of CuBolt.
"""Custom exceptions needed by CuBolt."""
class IndexBelowWorldException(Exception):
"""This exception is thrown if someone tries to set a block below
the 'a' index of a chunk. Cube World doesn't support this so CuBolt
prevents a user from doing this.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value) | mit |
starbucks2010/N915F_kernel_permissive | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
DarrenRainey/volatility | volatility/plugins/linux/flags.py | 58 | 1831 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
# flags used throughout the plugins
# these aren't going to change due to binary breakage if they would
# Protocol strings should use volatility.protos
tcp_states = ("",
"ESTABLISHED",
"SYN_SENT",
"SYN_RECV",
"FIN_WAIT1",
"FIN_WAIT2",
"TIME_WAIT",
"CLOSE",
"CLOSE_WAIT",
"LAST_ACK",
"LISTEN",
"CLOSING")
MNT_NOSUID = 0x01
MNT_NODEV = 0x02
MNT_NOEXEC = 0x04
MNT_NOATIME = 0x08
MNT_NODIRATIME = 0x10
MNT_RELATIME = 0x20
mnt_flags = {
MNT_NOSUID: ",nosuid",
MNT_NODEV: ",nodev",
MNT_NOEXEC: ",noexec",
MNT_NOATIME: ",noatime",
MNT_NODIRATIME: ",nodiratime",
MNT_RELATIME: ",relatime"
}
S_IFMT = 0170000
S_IFSOCK = 0140000
S_IFLNK = 0120000
S_IFREG = 0100000
S_IFBLK = 0060000
S_IFDIR = 0040000
S_IFCHR = 0020000
S_IFIFO = 0010000
S_ISUID = 0004000
S_ISGID = 0002000
| gpl-2.0 |
cmtm/networkx | networkx/algorithms/reciprocity.py | 8 | 3042 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 by
# Haochen Wu <[email protected]>
# All rights reserved.
# BSD license.
"""Algorithms to calculate reciprocity in a directed graph."""
from networkx import NetworkXError
from ..utils import not_implemented_for
__author__ = """\n""".join(['Haochen Wu <[email protected]>'])
__all__= ['reciprocity','overall_reciprocity']
@not_implemented_for('undirected','multigraph')
def reciprocity(G, nodes=None):
"""Compute the reciprocity in a directed graph.
The reciprocity of a directed graph is defined as the ratio
of the number of edges pointing in both directions to the total
number of edges in the graph.
Formally, :math:`r = |{(u,v) \in G|(v,u) \in G}| / |{(u,v) \in G}|`.
The reciprocity of a single node u is defined similarly,
it is the ratio of the number of edges in both directions to
the total number of edges attached to node u.
Parameters
----------
G : graph
A networkx directed graph
nodes : container of nodes, optional (default=whole graph)
Compute reciprocity for nodes in this container.
Returns
-------
out : dictionary
Reciprocity keyed by node label.
Notes
-----
The reciprocity is not defined for isolated nodes.
In such cases this function will return None.
"""
# If `nodes` is not specified, calculate the reciprocity of the graph.
if nodes is None:
return overall_reciprocity(G)
# If `nodes` represents a single node in the graph, return only its
# reciprocity.
if nodes in G:
reciprocity = next(_reciprocity_iter(G,nodes))[1]
if reciprocity is None:
raise NetworkXError('Not defined for isolated nodes.')
else:
return reciprocity
# Otherwise, `nodes` represents an iterable of nodes, so return a
# dictionary mapping node to its reciprocity.
return dict(_reciprocity_iter(G,nodes))
def _reciprocity_iter(G,nodes):
""" Return an iterator of (node, reciprocity).
"""
n = G.nbunch_iter(nodes)
for node in n:
pred = set(G.predecessors(node))
succ = set(G.successors(node))
overlap = pred & succ
n_total = len(pred) + len(succ)
# Reciprocity is not defined for isolated nodes.
# Return None.
if n_total == 0:
yield (node,None)
else:
reciprocity = 2.0*float(len(overlap))/float(n_total)
yield (node,reciprocity)
@not_implemented_for('undirected','multigraph')
def overall_reciprocity(G):
"""Compute the reciprocity for the whole graph.
See the doc of reciprocity for the definition.
Parameters
----------
G : graph
A networkx graph
"""
n_all_edge = G.number_of_edges()
n_overlap_edge = (n_all_edge - G.to_undirected().number_of_edges()) *2
if n_all_edge == 0:
raise NetworkXError("Not defined for empty graphs")
return float(n_overlap_edge)/float(n_all_edge)
| bsd-3-clause |
CT-Data-Collaborative/ctdata-mailchimp | ctdata_mailchimp/migrations/0004_auto__add_field_subscriptionplugin_assign_language.py | 2 | 2971 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SubscriptionPlugin.assign_language'
db.add_column(u'cmsplugin_subscriptionplugin', 'assign_language',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SubscriptionPlugin.assign_language'
db.delete_column(u'cmsplugin_subscriptionplugin', 'assign_language')
models = {
u'aldryn_mailchimp.subscriptionplugin': {
'Meta': {'object_name': 'SubscriptionPlugin', '_ormbases': ['cms.CMSPlugin']},
'assign_language': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'list_id': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
}
}
complete_apps = ['aldryn_mailchimp'] | bsd-3-clause |
isb-cgc/ISB-CGC-Webapp | bq_data_access/v1/feature_search/mirna.py | 1 | 2416 | #
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from builtins import object
from MySQLdb.cursors import DictCursor
from _mysql_exceptions import MySQLError
from bq_data_access.v1.feature_search.common import BackendException, InvalidFieldException
from bq_data_access.v1.feature_search.common import FOUND_FEATURE_LIMIT
from bq_data_access.v1.mirna_data import build_feature_label, MIRN_FEATURE_TYPE
from cohorts.metadata_helpers import get_sql_connection
class MIRNSearcher(object):
search_fields = set(['mirna_name', 'platform', 'value_field'])
@classmethod
def get_table_name(cls):
return "feature_defs_mirna"
def validate_search_field(self, keyword, field):
if field not in self.search_fields:
raise InvalidFieldException("MIRN", keyword, field)
def search(self, keyword, field):
self.validate_search_field(keyword, field)
query = 'SELECT mirna_name, platform, value_field, internal_feature_id ' \
'FROM {table_name} WHERE {search_field} LIKE %s LIMIT %s'.format(
table_name=self.get_table_name(),
search_field=field
)
# Format the keyword for MySQL string matching
sql_keyword = '%' + keyword + '%'
query_args = [sql_keyword, FOUND_FEATURE_LIMIT]
try:
db = get_sql_connection()
cursor = db.cursor(DictCursor)
cursor.execute(query, tuple(query_args))
items = []
for row in cursor.fetchall():
items.append(row)
# Generate human readable labels
for item in items:
item['feature_type'] = MIRN_FEATURE_TYPE
item['label'] = build_feature_label(item)
return items
except MySQLError:
raise BackendException('database error', keyword, field)
| apache-2.0 |
adlai/p2pool | SOAPpy/NS.py | 289 | 3724 | from __future__ import nested_scopes
"""
################################################################################
#
# SOAPpy - Cayce Ullman ([email protected])
# Brian Matthews ([email protected])
# Gregory Warnes ([email protected])
# Christopher Blunck ([email protected])
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: NS.py 1468 2008-05-24 01:55:33Z warnes $'
from version import __version__
##############################################################################
# Namespace Class
################################################################################
def invertDict(dict):
d = {}
for k, v in dict.items():
d[v] = k
return d
class NS:
XML = "http://www.w3.org/XML/1998/namespace"
ENV = "http://schemas.xmlsoap.org/soap/envelope/"
ENC = "http://schemas.xmlsoap.org/soap/encoding/"
XSD = "http://www.w3.org/1999/XMLSchema"
XSD2 = "http://www.w3.org/2000/10/XMLSchema"
XSD3 = "http://www.w3.org/2001/XMLSchema"
XSD_L = [XSD, XSD2, XSD3]
EXSD_L= [ENC, XSD, XSD2, XSD3]
XSI = "http://www.w3.org/1999/XMLSchema-instance"
XSI2 = "http://www.w3.org/2000/10/XMLSchema-instance"
XSI3 = "http://www.w3.org/2001/XMLSchema-instance"
XSI_L = [XSI, XSI2, XSI3]
URN = "http://soapinterop.org/xsd"
# For generated messages
XML_T = "xml"
ENV_T = "SOAP-ENV"
ENC_T = "SOAP-ENC"
XSD_T = "xsd"
XSD2_T= "xsd2"
XSD3_T= "xsd3"
XSI_T = "xsi"
XSI2_T= "xsi2"
XSI3_T= "xsi3"
URN_T = "urn"
NSMAP = {ENV_T: ENV, ENC_T: ENC, XSD_T: XSD, XSD2_T: XSD2,
XSD3_T: XSD3, XSI_T: XSI, XSI2_T: XSI2, XSI3_T: XSI3,
URN_T: URN}
NSMAP_R = invertDict(NSMAP)
STMAP = {'1999': (XSD_T, XSI_T), '2000': (XSD2_T, XSI2_T),
'2001': (XSD3_T, XSI3_T)}
STMAP_R = invertDict(STMAP)
def __init__(self):
raise Error, "Don't instantiate this"
| gpl-3.0 |
xyuanmu/XX-Net | python3.8.2/Lib/encodings/cp1140.py | 272 | 13105 | """ Python Character Mapping Codec cp1140 generated from 'python-mappings/CP1140.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1140',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'\xa2' # 0x4A -> CENT SIGN
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'|' # 0x4F -> VERTICAL LINE
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
'!' # 0x5A -> EXCLAMATION MARK
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'\xac' # 0x5F -> NOT SIGN
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\u20ac' # 0x9F -> EURO SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xae' # 0xAF -> REGISTERED SIGN
'^' # 0xB0 -> CIRCUMFLEX ACCENT
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'[' # 0xBA -> LEFT SQUARE BRACKET
']' # 0xBB -> RIGHT SQUARE BRACKET
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-2-clause |
hashworks/CouchPotatoServer | libs/tornado/log.py | 124 | 9469 | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Logging support for Tornado.
Tornado uses three logger streams:
* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and
potentially other servers in the future)
* ``tornado.application``: Logging of errors from application code (i.e.
uncaught exceptions from callbacks)
* ``tornado.general``: General-purpose logging, including any errors
or warnings from Tornado itself.
These streams may be configured independently using the standard library's
`logging` module. For example, you may wish to send ``tornado.access`` logs
to a separate file for analysis.
"""
from __future__ import absolute_import, division, print_function, with_statement
import logging
import logging.handlers
import sys
from tornado.escape import _unicode
from tornado.util import unicode_type, basestring_type
try:
import curses
except ImportError:
curses = None
# Logger objects for internal tornado use
access_log = logging.getLogger("tornado.access")
app_log = logging.getLogger("tornado.application")
gen_log = logging.getLogger("tornado.general")
def _stderr_supports_color():
color = False
if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
def _safe_unicode(s):
try:
return _unicode(s)
except UnicodeDecodeError:
return repr(s)
class LogFormatter(logging.Formatter):
"""Log formatter used in Tornado.
Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
This formatter is enabled automatically by
`tornado.options.parse_command_line` (unless ``--logging=none`` is
used).
"""
DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
}
def __init__(self, color=True, fmt=DEFAULT_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, colors=DEFAULT_COLORS):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
logging.Formatter.__init__(self, datefmt=datefmt)
self._fmt = fmt
self._colors = {}
if color and _stderr_supports_color():
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or
curses.tigetstr("setf") or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = unicode_type(fg_color, "ascii")
for levelno, code in colors.items():
self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii")
self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
else:
self._normal = ''
def format(self, record):
try:
message = record.getMessage()
assert isinstance(message, basestring_type) # guaranteed by logging
# Encoding notes: The logging module prefers to work with character
# strings, but only enforces that log messages are instances of
# basestring. In python 2, non-ascii bytestrings will make
# their way through the logging framework until they blow up with
# an unhelpful decoding error (with this formatter it happens
# when we attach the prefix, but there are other opportunities for
# exceptions further along in the framework).
#
# If a byte string makes it this far, convert it to unicode to
# ensure it will make it out to the logs. Use repr() as a fallback
# to ensure that all byte strings can be converted successfully,
# but don't do it by default so we don't add extra quotes to ascii
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings whereever possible).
record.message = _safe_unicode(message)
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = self.formatTime(record, self.datefmt)
if record.levelno in self._colors:
record.color = self._colors[record.levelno]
record.end_color = self._normal
else:
record.color = record.end_color = ''
formatted = self._fmt % record.__dict__
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
# exc_text contains multiple lines. We need to _safe_unicode
# each line separately so that non-utf8 bytes don't cause
# all the newlines to turn into '\n'.
lines = [formatted.rstrip()]
lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n'))
formatted = '\n'.join(lines)
return formatted.replace("\n", "\n ")
def enable_pretty_logging(options=None, logger=None):
"""Turns on formatted logging output as configured.
This is called automatically by `tornado.options.parse_command_line`
and `tornado.options.parse_config_file`.
"""
if options is None:
from tornado.options import options
if options.logging is None or options.logging.lower() == 'none':
return
if logger is None:
logger = logging.getLogger()
logger.setLevel(getattr(logging, options.logging.upper()))
if options.log_file_prefix:
channel = logging.handlers.RotatingFileHandler(
filename=options.log_file_prefix,
maxBytes=options.log_file_max_size,
backupCount=options.log_file_num_backups)
channel.setFormatter(LogFormatter(color=False))
logger.addHandler(channel)
if (options.log_to_stderr or
(options.log_to_stderr is None and not logger.handlers)):
# Set up color if we are in a tty and curses is installed
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger.addHandler(channel)
def define_logging_options(options=None):
if options is None:
# late import to prevent cycle
from tornado.options import options
options.define("logging", default="info",
help=("Set the Python log level. If 'none', tornado won't touch the "
"logging configuration."),
metavar="debug|info|warning|error|none")
options.define("log_to_stderr", type=bool, default=None,
help=("Send log output to stderr (colorized if possible). "
"By default use stderr if --log_file_prefix is not set and "
"no other logging is configured."))
options.define("log_file_prefix", type=str, default=None, metavar="PATH",
help=("Path prefix for log files. "
"Note that if you are running multiple tornado processes, "
"log_file_prefix must be different for each of them (e.g. "
"include the port number)"))
options.define("log_file_max_size", type=int, default=100 * 1000 * 1000,
help="max size of log files before rollover")
options.define("log_file_num_backups", type=int, default=10,
help="number of log files to keep")
options.add_parse_callback(enable_pretty_logging)
| gpl-3.0 |
fmaguire/ete | ete3/tools/phylobuild_lib/workflow/common.py | 1 | 25974 | from __future__ import absolute_import
from __future__ import print_function
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: [email protected]
#
#
# #END_LICENSE#############################################################
from collections import defaultdict
import logging
from ete3.tools.phylobuild_lib.utils import (DEBUG, GLOBALS, SeqGroup, tobool, sec2time, read_time_file,
_max, _min, _mean, _std, _median)
from ete3.tools.phylobuild_lib.apps import APP2CLASS
from ete3.tools.phylobuild_lib import task as all_tasks
from ete3.tools.phylobuild_lib import db
from ete3.tools.phylobuild_lib.errors import ConfigError, DataError, TaskError
from ete3.tools.phylobuild_lib.master_task import register_task_recursively, isjob
import six
from six.moves import range
log = logging.getLogger("main")
class IterConfig(dict):
def __init__(self, conf, wkname, size, seqtype):
"""Special dict to extract the value of each parameter given
the properties of a task: size and seqtype.
"""
dict.__init__(self, conf[wkname])
self.conf = conf
self.seqtype = seqtype
self.size = size
self['npr_wf_type'] = conf['_npr'].get('wf_type', None)
self['npr_workflows'] = conf['_npr'].get('workflows', [])
self['switch_aa_similarity'] = conf['_npr'].get('nt_switch_thr', 1.0)
if conf[wkname]["_app"] == self['npr_wf_type']:
self['max_iters'] = conf['_npr'].get('max_iters', 1) # 1 = no npr by default!
else:
self['max_iters'] = 1
self['_tree_splitter'] = '@default_tree_splitter'
# if max_outgroup size is 0, means that no rooting is done in child NPR trees
self['use_outgroup'] = conf['default_tree_splitter']['_max_outgroup_size'] != 0
def __getattr__(self, v):
try:
return dict.__getattr__(self, v)
except AttributeError:
return self.__getitem__(v)
def __getitem__(self, v):
# Automatically switch among nt and aa bindings
if v in set(["tree_builder", "aligner", "model_tester",
"alg_cleaner"]):
v = "%s_%s" %(self.seqtype, v)
try:
value = dict.__getitem__(self, "_%s" %v)
except KeyError as e:
return dict.__getitem__(self, v)
else:
# If list, let's take the correct element
if type(value) == list:
raise ValueError('This should not occur. Please report the error!')
if type(value) != str:
return value
elif value.lower() == "none":
return None, None
elif value.startswith("@"):
classname = APP2CLASS[self.conf[value[1:]]["_app"]]
return value[1:], getattr(all_tasks, classname)
else:
return value
def process_new_tasks(task, new_tasks, conf):
# Basic registration and processing of newly generated tasks
parent_taskid = task.taskid if task else None
for ts in new_tasks:
log.log(22, "Registering new task: %s", ts)
register_task_recursively(ts, parentid=parent_taskid)
conf["_nodeinfo"][ts.nodeid].setdefault("tasks", []).append(ts)
# sort task by nodeid
#GLOBALS["nodeinfo"][ts.nodeid].setdefault("tasks", []).append(ts)
if task:
# Clone processor, in case tasks belong to a side workflow
ts.task_processor = task.task_processor
ts.configid = task.configid
ts.threadid = task.threadid
ts.main_tree = task.main_tree
# NPR allows switching the workflow associated to new tasks, if so,
# child task should have a target_wkname attribute already,
# otherwise we assume the same parent workflow
if not hasattr(ts, "target_wkname"):
ts.target_wkname = task.target_wkname
#db.add_runid2task(ts.threadid, ts.taskid)
def inc_iternumber(threadid):
current_iter = get_iternumber(threadid)
GLOBALS["threadinfo"][threadid]["last_iter"] = current_iter + 1
return current_iter + 1
def get_iternumber(threadid):
return GLOBALS["threadinfo"][threadid].setdefault("last_iter", 1)
def get_identity(fname):
s = SeqGroup(fname)
seqlen = len(six.itervalues(s.id2seq))
ident = list()
for i in range(seqlen):
states = defaultdict(int)
for seq in six.itervalues(s.id2seq):
if seq[i] != "-":
states[seq[i]] += 1
values = list(states.values())
if values:
ident.append(float(max(values))/sum(values))
return (_max(ident), _min(ident),
_mean(ident), _std(ident))
def get_seqs_identity(alg, seqs):
''' Returns alg statistics regarding a set of sequences'''
seqlen = len(alg.get_seq(seqs[0]))
ident = list()
for i in range(seqlen):
states = defaultdict(int)
for seq_id in seqs:
seq = alg.get_seq(seq_id)
if seq[i] != "-":
states[seq[i]] += 1
values = list(states.values())
if values:
ident.append(float(max(values))/sum(values))
return (_max(ident), _min(ident),
_mean(ident), _std(ident))
def split_tree(task_tree_node, task_outgroups, main_tree, alg_path, npr_conf, threadid, target_cladeids):
"""Browses a task tree from root to leaves and yields next
suitable nodes for NPR iterations. Each yielded node comes with
the set of target and outgroup tips.
"""
def processable_node(_n):
"""This an internal function that returns true if a given node
is suitable for a NPR iteration. It can be used as
"is_leaf_fn" when traversing a tree.
Note that this function uses several variables which change within the
split_tree function, so must be kept within its namespace.
"""
is_leaf = False
for wkname, wkfilter in npr_conf.npr_workflows:
# if node is not in the targets or does not meet size filters, skip
# workflow
if _n is master_node or \
(_TARGET_NODES and _n not in _TARGET_NODES) or \
(target_cladeids and _n.cladeid not in target_cladeids) or \
len(n2content[_n]) < max(wkfilter.get("min_size", 3), 3) or \
("max_size" in wkfilter and len(n2content[_n]) > wkfilter["max_size"]):
continue
# If seq_sim filter used, calculate node stats
if ALG and ("min_seq_sim" in wkfilter or "max_seq_sim" in wkfilter):
if not hasattr(_n, "seqs_mean_ident"):
log.log(20, "Calculating node sequence stats...")
mx, mn, avg, std = get_seqs_identity(ALG,
[__n.name for __n in n2content[_n]])
_n.add_features(seqs_max_ident=mx, seqs_min_ident=mn,
seqs_mean_ident=avg, seqs_std_ident=std)
log.log(20, "mx=%s, mn=%s, avg=%s, std=%s" %(mx, mn, avg, std))
if _n.seqs_mean_ident < wkfilter["min_seq_sim"]:
continue
if _n.seqs_mean_ident > wkfilter["max_seq_sim"]:
continue
else:
_n.add_features(seqs_max_ident=None, seqs_min_ident=None,
seqs_mean_ident=None, seqs_std_ident=None)
if "min_support" in wkfilter:
# If we are optimizing only lowly supported nodes, and nodes are
# optimized without an outgroup, our target node is actually the
# parent of lowly supported nodes. Therefore, I check if support
# is low in children nodes, and return this node if so.
if not npr_conf.use_outgroup:
if not [_ch for _ch in _n.children if _ch.support <= wkfilter["min_support"]]:
continue
# Otherwise, just skip the node if it above the min support
elif _n.support > wkfilter["min_support"]:
continue
# At this point, node passed all filters of this workflow were met,
# so it can be optimized
is_leaf = True
_n._target_wkname = wkname
break
return is_leaf
log.log(20, "Loading tree content...")
n2content = main_tree.get_cached_content()
if alg_path:
log.log(20, "Loading associated alignment to check seq. similarity")
raw_alg = db.get_task_data(*alg_path.split("."))
ALG = SeqGroup(raw_alg)
else:
ALG = None
log.log(20, "Finding next NPR nodes...")
# task_tree_node is actually a node in main_tree, since it has been
# already merged
trees_to_browse = [task_tree_node]
npr_nodes = 0
# loads current tree content, so we can check not reconstructing exactly the
# same tree
tasktree_content = set([leaf.name for leaf in n2content[task_tree_node]]) | set(task_outgroups)
while trees_to_browse:
master_node = trees_to_browse.pop()
# if custom taxa levels are defined as targets, find them in this
# subtree
_TARGET_NODES = defaultdict(list) # this container is used by
# processable_node function
opt_levels = GLOBALS[threadid].get('_optimized_levels', None)
if opt_levels is not None:
# any descendant of the already processed node is suitable for
# selection. If the ancestor of level-species is on top of the
# task_tree_node, it will be discarded
avail_nodes = set(master_node.get_descendants())
for lin in opt_levels:
sp2lin, lin2sp = GLOBALS["lineages"]
optimized, strict_monophyly = opt_levels[lin]
if not optimized:
ancestor = main_tree.get_common_ancestor(*lin2sp[lin])
if ancestor in avail_nodes:
# check that the node satisfies level monophyly config
ancestor_content = set([x.name for x in n2content[ancestor]])
if not strict_monophyly or lin2sp[lin] == ancestor_content:
_TARGET_NODES[ancestor].append(lin)
elif strict_monophyly:
log.log(26, "Discarding not monophyletic level @@11:%s@@1:" %lin)
else:
log.log(26, "Discarding upper clade @@11:%s@@1:" %lin)
for node in master_node.iter_leaves(is_leaf_fn=processable_node):
if opt_levels:
log.log(28, "Trying to optimizing custom tree level: @@11:%s@@1:" %_TARGET_NODES[node])
for lin in _TARGET_NODES[node]:
# Marks the level as optimized, so is not computed again
opt_levels[lin][0] = True
log.log(28, "Found possible target node of size %s branch support %f" %(len(n2content[node]), node.support))
log.log(28, "First suitable workflow: %s" %(node._target_wkname))
# Finds best outgroup for the target node
if npr_conf.use_outgroup:
splitterconfname, _ = npr_conf.tree_splitter
splitterconf = GLOBALS[threadid][splitterconfname]
#seqs, outs = select_outgroups(node, n2content, splitterconf)
#seqs, outs = select_closest_outgroup(node, n2content, splitterconf)
seqs, outs = select_sister_outgroup(node, n2content, splitterconf)
else:
seqs = set([_i.name for _i in n2content[node]])
outs = set()
if seqs | outs == tasktree_content:
log.log(26, "Discarding target node of size %s, due to identity with its parent node" %len(n2content[node]))
#print tasktree_content
#print seqs
#print outs
trees_to_browse.append(node)
else:
npr_nodes += 1
yield node, seqs, outs, node._target_wkname
log.log(28, "%s nodes will be optimized", npr_nodes)
def get_next_npr_node(threadid, ttree, task_outgroups, mtree, alg_path, npr_conf, target_cladeids=None):
current_iter = get_iternumber(threadid)
if npr_conf.max_iters and current_iter >= npr_conf.max_iters:
log.warning("Maximum number of iterations reached!")
return
if not npr_conf.npr_workflows:
log.log(26, "NPR is disabled")
return
for node, seqs, outs, wkname in split_tree(ttree, task_outgroups, mtree, alg_path,
npr_conf, threadid, target_cladeids):
if npr_conf.max_iters and current_iter < npr_conf.max_iters:
log.log(28,
"@@16:Target node of size %s with %s outgroups marked for a new NPR iteration!@@1:" %(
len(seqs),
len(outs)))
# Yield new iteration
inc_iternumber(threadid)
yield node, seqs, outs, wkname
def select_closest_outgroup(target, n2content, splitterconf):
def sort_outgroups(x,y):
r = cmp(x[1], y[1]) # closer node
if r == 0:
r = -1 * cmp(len(n2content[x[0]]), len(n2content[y[0]])) # larger node
if r == 0:
r = -1 * cmp(x[0].support, y[0].support) # higher supported node
if r == 0:
return cmp(x[0].cladeid, y[0].cladeid) # by content name
else:
return r
else:
return r
else:
return r
if not target.up:
raise TaskError(None, "Cannot select outgroups for the root node!")
# Prepare cutoffs
out_topodist = tobool(splitterconf["_outgroup_topology_dist"])
max_outgroup_size = max(int(float(splitterconf["_max_outgroup_size"]) * len(n2content[target])), 1)
out_min_support = float(splitterconf["_min_outgroup_support"])
log.log(26, "Max outgroup size allowed %d" %max_outgroup_size)
# Gets a list of outside nodes an their distance to current target node
n2targetdist = distance_matrix_new(target, leaf_only=False,
topology_only=out_topodist)
valid_nodes = sorted([(node, ndist) for node, ndist in six.iteritems(n2targetdist)
if not(n2content[node] & n2content[target])
and node.support >= out_min_support
and len(n2content[node])<=max_outgroup_size],
sort_outgroups)
if valid_nodes:
best_outgroup = valid_nodes[0][0]
else:
print('\n'.join(sorted(["%s Size:%d Dist:%f Supp:%f" %(node.cladeid, len(n2content[node]), ndist, node.support)
for node, ndist in six.iteritems(n2targetdist)],
sort_outgroups)))
raise TaskError(None, "Could not find a suitable outgroup!")
log.log(20,
"Found possible outgroup Size:%d Distance:%f Support:%f",
len(n2content[best_outgroup]), n2targetdist[best_outgroup], best_outgroup.support)
log.log(20, "Supports: %0.2f (children=%s)", best_outgroup.support,
','.join(["%0.2f" % ch.support for ch in
best_outgroup.children]))
log.log(24, "best outgroup topology:\n%s", best_outgroup)
#print target
#print target.get_tree_root()
seqs = [n.name for n in n2content[target]]
outs = [n.name for n in n2content[best_outgroup]]
return set(seqs), set(outs)
def select_sister_outgroup(target, n2content, splitterconf):
def sort_outgroups(x,y):
r = cmp(x[1], y[1]) # closer node
if r == 0:
r = -1 * cmp(len(n2content[x[0]]), len(n2content[y[0]])) # larger node
if r == 0:
r = -1 * cmp(x[0].support, y[0].support) # higher supported node
if r == 0:
return cmp(x[0].cladeid, y[0].cladeid) # by content name
else:
return r
else:
return r
else:
return r
if not target.up:
raise TaskError(None, "Cannot select outgroups for the root node!")
# Prepare cutoffs
out_topodist = tobool(splitterconf["_outgroup_topology_dist"])
out_min_support = float(splitterconf["_min_outgroup_support"])
if splitterconf["_max_outgroup_size"].strip().endswith("%"):
max_outgroup_size = max(1, round((float(splitterconf["_max_outgroup_size"].strip("%"))/100) * len(n2content[target])))
log.log(26, "Max outgroup size allowed %s = %d" %(splitterconf["_max_outgroup_size"], max_outgroup_size))
else:
max_outgroup_size = max(1, int(splitterconf["_max_outgroup_size"]))
log.log(26, "Max outgroup size allowed %d" %max_outgroup_size)
# Gets a list of outside nodes an their distance to current target node
n2targetdist = distance_matrix_new(target, leaf_only=False,
topology_only=out_topodist)
sister_content = n2content[target.get_sisters()[0]]
valid_nodes = sorted([(node, ndist) for node, ndist in six.iteritems(n2targetdist)
if not(n2content[node] & n2content[target])
and n2content[node].issubset(sister_content)
and node.support >= out_min_support
and len(n2content[node])<=max_outgroup_size],
sort_outgroups)
if valid_nodes:
best_outgroup = valid_nodes[0][0]
else:
print('\n'.join(sorted(["%s Size:%d Distance:%f Support:%f" %(node.cladeid, len(n2content[node]), ndist, node.support)
for node, ndist in six.iteritems(n2targetdist)],
sort_outgroups)))
raise TaskError(None, "Could not find a suitable outgroup!")
log.log(20,
"Found possible outgroup Size:%d Dist:%f Supp:%f",
len(n2content[best_outgroup]), n2targetdist[best_outgroup], best_outgroup.support)
log.log(20, "Supports: %0.2f (children=%s)", best_outgroup.support,
','.join(["%0.2f" % ch.support for ch in
best_outgroup.children]))
log.log(24, "best outgroup topology:\n%s", best_outgroup)
#print target
#print target.get_tree_root()
seqs = [n.name for n in n2content[target]]
outs = [n.name for n in n2content[best_outgroup]]
return set(seqs), set(outs)
def select_outgroups(target, n2content, splitterconf):
"""Given a set of target sequences, find the best set of out
sequences to use. Several ways can be selected to find out
sequences:
"""
name2dist = {"min": _min, "max": _max,
"mean":_mean, "median":_median}
#policy = splitterconf["_outgroup_policy"] # node or leaves
out_topodist = tobool(splitterconf["_outgroup_topology_dist"])
optimal_out_size = int(splitterconf["_max_outgroup_size"])
#out_distfn = splitterconf["_outgroup_dist"]
out_min_support = float(splitterconf["_outgroup_min_support"])
if not target.up:
raise TaskError(None, "Cannot select outgroups for the root node!")
if not optimal_out_size:
raise TaskError(None, "You are trying to set 0 outgroups!")
# Gets a list of outside nodes an their distance to current target node
n2targetdist = distance_matrix_new(target, leaf_only=False,
topology_only=out_topodist)
#kk, test = distance_matrix(target, leaf_only=False,
# topology_only=False)
#for x in test:
# if test[x] != n2targetdist[x]:
# print x
# print test[x], n2targetdist[x]
# print x.get_distance(target)
# raw_input("ERROR!")
score = lambda _n: (_n.support,
#len(n2content[_n])/float(optimal_out_size),
1 - (abs(optimal_out_size - len(n2content[_n])) / float(max(optimal_out_size, len(n2content[_n])))), # outgroup size
1 - (n2targetdist[_n] / max_dist) #outgroup proximity to target
)
def sort_outgroups(x,y):
score_x = set(score(x))
score_y = set(score(y))
while score_x:
min_score_x = min(score_x)
v = cmp(min_score_x, min(score_y))
if v == 0:
score_x.discard(min_score_x)
score_y.discard(min_score_x)
else:
break
# If still equal, sort by cladid to maintain reproducibility
if v == 0:
v = cmp(x.cladeid, y.cladeid)
return v
#del n2targetdist[target.get_tree_root()]
max_dist = max(n2targetdist.values())
valid_nodes = [n for n in n2targetdist if \
not n2content[n] & n2content[target] and
n.support >= out_min_support]
if not valid_nodes:
raise TaskError(None, "Could not find a suitable outgroup (min_support=%s)"\
%out_min_support)
valid_nodes.sort(sort_outgroups, reverse=True)
best_outgroup = valid_nodes[0]
seqs = [n.name for n in n2content[target]]
outs = [n.name for n in n2content[best_outgroup]]
log.log(20,
"Found possible outgroup of size %s: score (support,size,dist)=%s",
len(outs), score(best_outgroup))
log.log(20, "Supports: %0.2f (children=%s)", best_outgroup.support,
','.join(["%0.2f" % ch.support for ch in
best_outgroup.children]))
if DEBUG():
root = target.get_tree_root()
for _seq in outs:
tar = root & _seq
tar.img_style["fgcolor"]="green"
tar.img_style["size"] = 12
tar.img_style["shape"] = "circle"
target.img_style["bgcolor"] = "lightblue"
NPR_TREE_STYLE.title.clear()
NPR_TREE_STYLE.title.add_face( faces.TextFace("MainTree:"
" Outgroup selection is mark in green. Red=optimized nodes ",
fgcolor="blue"), 0)
root.show(tree_style=NPR_TREE_STYLE)
for _n in root.traverse():
_n.img_style = None
return set(seqs), set(outs)
def distance_matrix_new(target, leaf_only=False, topology_only=False):
t = target.get_tree_root()
real_outgroup = t.children[0]
t.set_outgroup(target)
n2dist = {target:0}
for n in target.get_descendants("preorder"):
n2dist[n] = n2dist[n.up] + (topology_only or n.dist)
sister = target.get_sisters()[0]
n2dist[sister] = (topology_only or sister.dist)+ (topology_only or target.dist)
for n in sister.get_descendants("preorder"):
n2dist[n] = n2dist[n.up] + (topology_only or n.dist)
t.set_outgroup(real_outgroup)
## Slow Test.
# for n in t.get_descendants():
# if float(str(target.get_distance(n))) != float(str(n2dist[n])):
# print n
# print target.get_distance(n), n2dist[n]
# raw_input("ERROR")
return n2dist
def assembly_tree(runid):
task_nodes = db.get_runid_nodes(runid)
task_nodes.reverse()
main_tree = None
iternumber = 1
while task_nodes:
cladeid, packtree, size = task_nodes.pop(-1)
if not packtree:
continue
tree = db.decode(packtree)
# print tree.dist
# Restore original gene names
for leaf in tree.iter_leaves():
leaf.add_features(safename=leaf.name)
leaf.name = leaf.realname
if main_tree:
# substitute node in main tree by the optimized one
target_node = main_tree.search_nodes(cladeid=cladeid)[0]
target_node.up.add_child(tree)
target_node.detach()
else:
main_tree = tree
iter_name = "Iter_%04d_%dseqs" %(iternumber, size)
tree.add_features(iternumber=iternumber)
iternumber += 1
return main_tree, iternumber
def get_cmd_log(task):
cmd_lines = []
if getattr(task, 'get_launch_cmd', None):
launch_cmd = task.get_launch_cmd()
tm_s, tm_e = read_time_file(task.time_file)
cmd_lines.append([task.jobid, sec2time(tm_e - tm_s), task.jobname, launch_cmd])
if getattr(task, 'jobs', None):
for subtask in task.jobs:
cmd_lines.extend(get_cmd_log(subtask))
return cmd_lines
| gpl-3.0 |
bkeepers/pygments.rb | vendor/pygments-main/external/moin-parser.py | 118 | 3600 | # -*- coding: utf-8 -*-
"""
The Pygments MoinMoin Parser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a MoinMoin parser plugin that renders source code to HTML via
Pygments; you need Pygments 0.7 or newer for this parser to work.
To use it, set the options below to match your setup and put this file in
the data/plugin/parser subdirectory of your Moin instance, and give it the
name that the parser directive should have. For example, if you name the
file ``code.py``, you can get a highlighted Python code sample with this
Wiki markup::
{{{
#!code python
[...]
}}}
Additionally, if you set ATTACHMENTS below to True, Pygments will also be
called for all attachments for whose filenames there is no other parser
registered.
You are responsible for including CSS rules that will map the Pygments CSS
classes to colors. You can output a stylesheet file with `pygmentize`, put
it into the `htdocs` directory of your Moin instance and then include it in
the `stylesheets` configuration option in the Moin config, e.g.::
stylesheets = [('screen', '/htdocs/pygments.css')]
If you do not want to do that and are willing to accept larger HTML
output, you can set the INLINESTYLES option below to True.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Options
# ~~~~~~~
# Set to True if you want to highlight attachments, in addition to
# {{{ }}} blocks.
ATTACHMENTS = True
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
import sys
from pygments import highlight
from pygments.lexers import get_lexer_by_name, get_lexer_for_filename, TextLexer
from pygments.formatters import HtmlFormatter
from pygments.util import ClassNotFound
# wrap lines in <span>s so that the Moin-generated line numbers work
class MoinHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
for line in source:
yield 1, '<span class="line">' + line[1] + '</span>'
htmlformatter = MoinHtmlFormatter(noclasses=INLINESTYLES)
textlexer = TextLexer()
codeid = [0]
class Parser:
"""
MoinMoin Pygments parser.
"""
if ATTACHMENTS:
extensions = '*'
else:
extensions = []
Dependencies = []
def __init__(self, raw, request, **kw):
self.raw = raw
self.req = request
if "format_args" in kw:
# called from a {{{ }}} block
try:
self.lexer = get_lexer_by_name(kw['format_args'].strip())
except ClassNotFound:
self.lexer = textlexer
return
if "filename" in kw:
# called for an attachment
filename = kw['filename']
else:
# called for an attachment by an older moin
# HACK: find out the filename by peeking into the execution
# frame which might not always work
try:
frame = sys._getframe(1)
filename = frame.f_locals['filename']
except:
filename = 'x.txt'
try:
self.lexer = get_lexer_for_filename(filename)
except ClassNotFound:
self.lexer = textlexer
def format(self, formatter):
codeid[0] += 1
id = "pygments_%s" % codeid[0]
w = self.req.write
w(formatter.code_area(1, id, start=1, step=1))
w(formatter.rawHTML(highlight(self.raw, self.lexer, htmlformatter)))
w(formatter.code_area(0, id))
| mit |
40223119/2015cda | static/Brython3.1.1-20150328-091302/Lib/html/entities.py | 814 | 75240 | """HTML character entity references."""
# maps the HTML entity name to the Unicode codepoint
name2codepoint = {
'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1
'Acirc': 0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1
'Agrave': 0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
'Alpha': 0x0391, # greek capital letter alpha, U+0391
'Aring': 0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
'Atilde': 0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1
'Auml': 0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1
'Beta': 0x0392, # greek capital letter beta, U+0392
'Ccedil': 0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1
'Chi': 0x03a7, # greek capital letter chi, U+03A7
'Dagger': 0x2021, # double dagger, U+2021 ISOpub
'Delta': 0x0394, # greek capital letter delta, U+0394 ISOgrk3
'ETH': 0x00d0, # latin capital letter ETH, U+00D0 ISOlat1
'Eacute': 0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1
'Ecirc': 0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1
'Egrave': 0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1
'Epsilon': 0x0395, # greek capital letter epsilon, U+0395
'Eta': 0x0397, # greek capital letter eta, U+0397
'Euml': 0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1
'Gamma': 0x0393, # greek capital letter gamma, U+0393 ISOgrk3
'Iacute': 0x00cd, # latin capital letter I with acute, U+00CD ISOlat1
'Icirc': 0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1
'Igrave': 0x00cc, # latin capital letter I with grave, U+00CC ISOlat1
'Iota': 0x0399, # greek capital letter iota, U+0399
'Iuml': 0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1
'Kappa': 0x039a, # greek capital letter kappa, U+039A
'Lambda': 0x039b, # greek capital letter lambda, U+039B ISOgrk3
'Mu': 0x039c, # greek capital letter mu, U+039C
'Ntilde': 0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1
'Nu': 0x039d, # greek capital letter nu, U+039D
'OElig': 0x0152, # latin capital ligature OE, U+0152 ISOlat2
'Oacute': 0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1
'Ocirc': 0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1
'Ograve': 0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1
'Omega': 0x03a9, # greek capital letter omega, U+03A9 ISOgrk3
'Omicron': 0x039f, # greek capital letter omicron, U+039F
'Oslash': 0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
'Otilde': 0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1
'Ouml': 0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1
'Phi': 0x03a6, # greek capital letter phi, U+03A6 ISOgrk3
'Pi': 0x03a0, # greek capital letter pi, U+03A0 ISOgrk3
'Prime': 0x2033, # double prime = seconds = inches, U+2033 ISOtech
'Psi': 0x03a8, # greek capital letter psi, U+03A8 ISOgrk3
'Rho': 0x03a1, # greek capital letter rho, U+03A1
'Scaron': 0x0160, # latin capital letter S with caron, U+0160 ISOlat2
'Sigma': 0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3
'THORN': 0x00de, # latin capital letter THORN, U+00DE ISOlat1
'Tau': 0x03a4, # greek capital letter tau, U+03A4
'Theta': 0x0398, # greek capital letter theta, U+0398 ISOgrk3
'Uacute': 0x00da, # latin capital letter U with acute, U+00DA ISOlat1
'Ucirc': 0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1
'Ugrave': 0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1
'Upsilon': 0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3
'Uuml': 0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1
'Xi': 0x039e, # greek capital letter xi, U+039E ISOgrk3
'Yacute': 0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1
'Yuml': 0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2
'Zeta': 0x0396, # greek capital letter zeta, U+0396
'aacute': 0x00e1, # latin small letter a with acute, U+00E1 ISOlat1
'acirc': 0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1
'acute': 0x00b4, # acute accent = spacing acute, U+00B4 ISOdia
'aelig': 0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
'agrave': 0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
'alefsym': 0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW
'alpha': 0x03b1, # greek small letter alpha, U+03B1 ISOgrk3
'amp': 0x0026, # ampersand, U+0026 ISOnum
'and': 0x2227, # logical and = wedge, U+2227 ISOtech
'ang': 0x2220, # angle, U+2220 ISOamso
'aring': 0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
'asymp': 0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr
'atilde': 0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1
'auml': 0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1
'bdquo': 0x201e, # double low-9 quotation mark, U+201E NEW
'beta': 0x03b2, # greek small letter beta, U+03B2 ISOgrk3
'brvbar': 0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum
'bull': 0x2022, # bullet = black small circle, U+2022 ISOpub
'cap': 0x2229, # intersection = cap, U+2229 ISOtech
'ccedil': 0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1
'cedil': 0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia
'cent': 0x00a2, # cent sign, U+00A2 ISOnum
'chi': 0x03c7, # greek small letter chi, U+03C7 ISOgrk3
'circ': 0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub
'clubs': 0x2663, # black club suit = shamrock, U+2663 ISOpub
'cong': 0x2245, # approximately equal to, U+2245 ISOtech
'copy': 0x00a9, # copyright sign, U+00A9 ISOnum
'crarr': 0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
'cup': 0x222a, # union = cup, U+222A ISOtech
'curren': 0x00a4, # currency sign, U+00A4 ISOnum
'dArr': 0x21d3, # downwards double arrow, U+21D3 ISOamsa
'dagger': 0x2020, # dagger, U+2020 ISOpub
'darr': 0x2193, # downwards arrow, U+2193 ISOnum
'deg': 0x00b0, # degree sign, U+00B0 ISOnum
'delta': 0x03b4, # greek small letter delta, U+03B4 ISOgrk3
'diams': 0x2666, # black diamond suit, U+2666 ISOpub
'divide': 0x00f7, # division sign, U+00F7 ISOnum
'eacute': 0x00e9, # latin small letter e with acute, U+00E9 ISOlat1
'ecirc': 0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1
'egrave': 0x00e8, # latin small letter e with grave, U+00E8 ISOlat1
'empty': 0x2205, # empty set = null set = diameter, U+2205 ISOamso
'emsp': 0x2003, # em space, U+2003 ISOpub
'ensp': 0x2002, # en space, U+2002 ISOpub
'epsilon': 0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3
'equiv': 0x2261, # identical to, U+2261 ISOtech
'eta': 0x03b7, # greek small letter eta, U+03B7 ISOgrk3
'eth': 0x00f0, # latin small letter eth, U+00F0 ISOlat1
'euml': 0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1
'euro': 0x20ac, # euro sign, U+20AC NEW
'exist': 0x2203, # there exists, U+2203 ISOtech
'fnof': 0x0192, # latin small f with hook = function = florin, U+0192 ISOtech
'forall': 0x2200, # for all, U+2200 ISOtech
'frac12': 0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum
'frac14': 0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
'frac34': 0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
'frasl': 0x2044, # fraction slash, U+2044 NEW
'gamma': 0x03b3, # greek small letter gamma, U+03B3 ISOgrk3
'ge': 0x2265, # greater-than or equal to, U+2265 ISOtech
'gt': 0x003e, # greater-than sign, U+003E ISOnum
'hArr': 0x21d4, # left right double arrow, U+21D4 ISOamsa
'harr': 0x2194, # left right arrow, U+2194 ISOamsa
'hearts': 0x2665, # black heart suit = valentine, U+2665 ISOpub
'hellip': 0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub
'iacute': 0x00ed, # latin small letter i with acute, U+00ED ISOlat1
'icirc': 0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1
'iexcl': 0x00a1, # inverted exclamation mark, U+00A1 ISOnum
'igrave': 0x00ec, # latin small letter i with grave, U+00EC ISOlat1
'image': 0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso
'infin': 0x221e, # infinity, U+221E ISOtech
'int': 0x222b, # integral, U+222B ISOtech
'iota': 0x03b9, # greek small letter iota, U+03B9 ISOgrk3
'iquest': 0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum
'isin': 0x2208, # element of, U+2208 ISOtech
'iuml': 0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1
'kappa': 0x03ba, # greek small letter kappa, U+03BA ISOgrk3
'lArr': 0x21d0, # leftwards double arrow, U+21D0 ISOtech
'lambda': 0x03bb, # greek small letter lambda, U+03BB ISOgrk3
'lang': 0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech
'laquo': 0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
'larr': 0x2190, # leftwards arrow, U+2190 ISOnum
'lceil': 0x2308, # left ceiling = apl upstile, U+2308 ISOamsc
'ldquo': 0x201c, # left double quotation mark, U+201C ISOnum
'le': 0x2264, # less-than or equal to, U+2264 ISOtech
'lfloor': 0x230a, # left floor = apl downstile, U+230A ISOamsc
'lowast': 0x2217, # asterisk operator, U+2217 ISOtech
'loz': 0x25ca, # lozenge, U+25CA ISOpub
'lrm': 0x200e, # left-to-right mark, U+200E NEW RFC 2070
'lsaquo': 0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed
'lsquo': 0x2018, # left single quotation mark, U+2018 ISOnum
'lt': 0x003c, # less-than sign, U+003C ISOnum
'macr': 0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
'mdash': 0x2014, # em dash, U+2014 ISOpub
'micro': 0x00b5, # micro sign, U+00B5 ISOnum
'middot': 0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
'minus': 0x2212, # minus sign, U+2212 ISOtech
'mu': 0x03bc, # greek small letter mu, U+03BC ISOgrk3
'nabla': 0x2207, # nabla = backward difference, U+2207 ISOtech
'nbsp': 0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum
'ndash': 0x2013, # en dash, U+2013 ISOpub
'ne': 0x2260, # not equal to, U+2260 ISOtech
'ni': 0x220b, # contains as member, U+220B ISOtech
'not': 0x00ac, # not sign, U+00AC ISOnum
'notin': 0x2209, # not an element of, U+2209 ISOtech
'nsub': 0x2284, # not a subset of, U+2284 ISOamsn
'ntilde': 0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1
'nu': 0x03bd, # greek small letter nu, U+03BD ISOgrk3
'oacute': 0x00f3, # latin small letter o with acute, U+00F3 ISOlat1
'ocirc': 0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1
'oelig': 0x0153, # latin small ligature oe, U+0153 ISOlat2
'ograve': 0x00f2, # latin small letter o with grave, U+00F2 ISOlat1
'oline': 0x203e, # overline = spacing overscore, U+203E NEW
'omega': 0x03c9, # greek small letter omega, U+03C9 ISOgrk3
'omicron': 0x03bf, # greek small letter omicron, U+03BF NEW
'oplus': 0x2295, # circled plus = direct sum, U+2295 ISOamsb
'or': 0x2228, # logical or = vee, U+2228 ISOtech
'ordf': 0x00aa, # feminine ordinal indicator, U+00AA ISOnum
'ordm': 0x00ba, # masculine ordinal indicator, U+00BA ISOnum
'oslash': 0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
'otilde': 0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1
'otimes': 0x2297, # circled times = vector product, U+2297 ISOamsb
'ouml': 0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1
'para': 0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum
'part': 0x2202, # partial differential, U+2202 ISOtech
'permil': 0x2030, # per mille sign, U+2030 ISOtech
'perp': 0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
'phi': 0x03c6, # greek small letter phi, U+03C6 ISOgrk3
'pi': 0x03c0, # greek small letter pi, U+03C0 ISOgrk3
'piv': 0x03d6, # greek pi symbol, U+03D6 ISOgrk3
'plusmn': 0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
'pound': 0x00a3, # pound sign, U+00A3 ISOnum
'prime': 0x2032, # prime = minutes = feet, U+2032 ISOtech
'prod': 0x220f, # n-ary product = product sign, U+220F ISOamsb
'prop': 0x221d, # proportional to, U+221D ISOtech
'psi': 0x03c8, # greek small letter psi, U+03C8 ISOgrk3
'quot': 0x0022, # quotation mark = APL quote, U+0022 ISOnum
'rArr': 0x21d2, # rightwards double arrow, U+21D2 ISOtech
'radic': 0x221a, # square root = radical sign, U+221A ISOtech
'rang': 0x232a, # right-pointing angle bracket = ket, U+232A ISOtech
'raquo': 0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
'rarr': 0x2192, # rightwards arrow, U+2192 ISOnum
'rceil': 0x2309, # right ceiling, U+2309 ISOamsc
'rdquo': 0x201d, # right double quotation mark, U+201D ISOnum
'real': 0x211c, # blackletter capital R = real part symbol, U+211C ISOamso
'reg': 0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum
'rfloor': 0x230b, # right floor, U+230B ISOamsc
'rho': 0x03c1, # greek small letter rho, U+03C1 ISOgrk3
'rlm': 0x200f, # right-to-left mark, U+200F NEW RFC 2070
'rsaquo': 0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed
'rsquo': 0x2019, # right single quotation mark, U+2019 ISOnum
'sbquo': 0x201a, # single low-9 quotation mark, U+201A NEW
'scaron': 0x0161, # latin small letter s with caron, U+0161 ISOlat2
'sdot': 0x22c5, # dot operator, U+22C5 ISOamsb
'sect': 0x00a7, # section sign, U+00A7 ISOnum
'shy': 0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum
'sigma': 0x03c3, # greek small letter sigma, U+03C3 ISOgrk3
'sigmaf': 0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3
'sim': 0x223c, # tilde operator = varies with = similar to, U+223C ISOtech
'spades': 0x2660, # black spade suit, U+2660 ISOpub
'sub': 0x2282, # subset of, U+2282 ISOtech
'sube': 0x2286, # subset of or equal to, U+2286 ISOtech
'sum': 0x2211, # n-ary sumation, U+2211 ISOamsb
'sup': 0x2283, # superset of, U+2283 ISOtech
'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum
'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum
'sup3': 0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum
'supe': 0x2287, # superset of or equal to, U+2287 ISOtech
'szlig': 0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1
'tau': 0x03c4, # greek small letter tau, U+03C4 ISOgrk3
'there4': 0x2234, # therefore, U+2234 ISOtech
'theta': 0x03b8, # greek small letter theta, U+03B8 ISOgrk3
'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW
'thinsp': 0x2009, # thin space, U+2009 ISOpub
'thorn': 0x00fe, # latin small letter thorn with, U+00FE ISOlat1
'tilde': 0x02dc, # small tilde, U+02DC ISOdia
'times': 0x00d7, # multiplication sign, U+00D7 ISOnum
'trade': 0x2122, # trade mark sign, U+2122 ISOnum
'uArr': 0x21d1, # upwards double arrow, U+21D1 ISOamsa
'uacute': 0x00fa, # latin small letter u with acute, U+00FA ISOlat1
'uarr': 0x2191, # upwards arrow, U+2191 ISOnum
'ucirc': 0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1
'ugrave': 0x00f9, # latin small letter u with grave, U+00F9 ISOlat1
'uml': 0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia
'upsih': 0x03d2, # greek upsilon with hook symbol, U+03D2 NEW
'upsilon': 0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3
'uuml': 0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1
'weierp': 0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso
'xi': 0x03be, # greek small letter xi, U+03BE ISOgrk3
'yacute': 0x00fd, # latin small letter y with acute, U+00FD ISOlat1
'yen': 0x00a5, # yen sign = yuan sign, U+00A5 ISOnum
'yuml': 0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1
'zeta': 0x03b6, # greek small letter zeta, U+03B6 ISOgrk3
'zwj': 0x200d, # zero width joiner, U+200D NEW RFC 2070
'zwnj': 0x200c, # zero width non-joiner, U+200C NEW RFC 2070
}
# maps the HTML5 named character references to the equivalent Unicode character(s)
html5 = {
'Aacute': '\xc1',
'aacute': '\xe1',
'Aacute;': '\xc1',
'aacute;': '\xe1',
'Abreve;': '\u0102',
'abreve;': '\u0103',
'ac;': '\u223e',
'acd;': '\u223f',
'acE;': '\u223e\u0333',
'Acirc': '\xc2',
'acirc': '\xe2',
'Acirc;': '\xc2',
'acirc;': '\xe2',
'acute': '\xb4',
'acute;': '\xb4',
'Acy;': '\u0410',
'acy;': '\u0430',
'AElig': '\xc6',
'aelig': '\xe6',
'AElig;': '\xc6',
'aelig;': '\xe6',
'af;': '\u2061',
'Afr;': '\U0001d504',
'afr;': '\U0001d51e',
'Agrave': '\xc0',
'agrave': '\xe0',
'Agrave;': '\xc0',
'agrave;': '\xe0',
'alefsym;': '\u2135',
'aleph;': '\u2135',
'Alpha;': '\u0391',
'alpha;': '\u03b1',
'Amacr;': '\u0100',
'amacr;': '\u0101',
'amalg;': '\u2a3f',
'AMP': '&',
'amp': '&',
'AMP;': '&',
'amp;': '&',
'And;': '\u2a53',
'and;': '\u2227',
'andand;': '\u2a55',
'andd;': '\u2a5c',
'andslope;': '\u2a58',
'andv;': '\u2a5a',
'ang;': '\u2220',
'ange;': '\u29a4',
'angle;': '\u2220',
'angmsd;': '\u2221',
'angmsdaa;': '\u29a8',
'angmsdab;': '\u29a9',
'angmsdac;': '\u29aa',
'angmsdad;': '\u29ab',
'angmsdae;': '\u29ac',
'angmsdaf;': '\u29ad',
'angmsdag;': '\u29ae',
'angmsdah;': '\u29af',
'angrt;': '\u221f',
'angrtvb;': '\u22be',
'angrtvbd;': '\u299d',
'angsph;': '\u2222',
'angst;': '\xc5',
'angzarr;': '\u237c',
'Aogon;': '\u0104',
'aogon;': '\u0105',
'Aopf;': '\U0001d538',
'aopf;': '\U0001d552',
'ap;': '\u2248',
'apacir;': '\u2a6f',
'apE;': '\u2a70',
'ape;': '\u224a',
'apid;': '\u224b',
'apos;': "'",
'ApplyFunction;': '\u2061',
'approx;': '\u2248',
'approxeq;': '\u224a',
'Aring': '\xc5',
'aring': '\xe5',
'Aring;': '\xc5',
'aring;': '\xe5',
'Ascr;': '\U0001d49c',
'ascr;': '\U0001d4b6',
'Assign;': '\u2254',
'ast;': '*',
'asymp;': '\u2248',
'asympeq;': '\u224d',
'Atilde': '\xc3',
'atilde': '\xe3',
'Atilde;': '\xc3',
'atilde;': '\xe3',
'Auml': '\xc4',
'auml': '\xe4',
'Auml;': '\xc4',
'auml;': '\xe4',
'awconint;': '\u2233',
'awint;': '\u2a11',
'backcong;': '\u224c',
'backepsilon;': '\u03f6',
'backprime;': '\u2035',
'backsim;': '\u223d',
'backsimeq;': '\u22cd',
'Backslash;': '\u2216',
'Barv;': '\u2ae7',
'barvee;': '\u22bd',
'Barwed;': '\u2306',
'barwed;': '\u2305',
'barwedge;': '\u2305',
'bbrk;': '\u23b5',
'bbrktbrk;': '\u23b6',
'bcong;': '\u224c',
'Bcy;': '\u0411',
'bcy;': '\u0431',
'bdquo;': '\u201e',
'becaus;': '\u2235',
'Because;': '\u2235',
'because;': '\u2235',
'bemptyv;': '\u29b0',
'bepsi;': '\u03f6',
'bernou;': '\u212c',
'Bernoullis;': '\u212c',
'Beta;': '\u0392',
'beta;': '\u03b2',
'beth;': '\u2136',
'between;': '\u226c',
'Bfr;': '\U0001d505',
'bfr;': '\U0001d51f',
'bigcap;': '\u22c2',
'bigcirc;': '\u25ef',
'bigcup;': '\u22c3',
'bigodot;': '\u2a00',
'bigoplus;': '\u2a01',
'bigotimes;': '\u2a02',
'bigsqcup;': '\u2a06',
'bigstar;': '\u2605',
'bigtriangledown;': '\u25bd',
'bigtriangleup;': '\u25b3',
'biguplus;': '\u2a04',
'bigvee;': '\u22c1',
'bigwedge;': '\u22c0',
'bkarow;': '\u290d',
'blacklozenge;': '\u29eb',
'blacksquare;': '\u25aa',
'blacktriangle;': '\u25b4',
'blacktriangledown;': '\u25be',
'blacktriangleleft;': '\u25c2',
'blacktriangleright;': '\u25b8',
'blank;': '\u2423',
'blk12;': '\u2592',
'blk14;': '\u2591',
'blk34;': '\u2593',
'block;': '\u2588',
'bne;': '=\u20e5',
'bnequiv;': '\u2261\u20e5',
'bNot;': '\u2aed',
'bnot;': '\u2310',
'Bopf;': '\U0001d539',
'bopf;': '\U0001d553',
'bot;': '\u22a5',
'bottom;': '\u22a5',
'bowtie;': '\u22c8',
'boxbox;': '\u29c9',
'boxDL;': '\u2557',
'boxDl;': '\u2556',
'boxdL;': '\u2555',
'boxdl;': '\u2510',
'boxDR;': '\u2554',
'boxDr;': '\u2553',
'boxdR;': '\u2552',
'boxdr;': '\u250c',
'boxH;': '\u2550',
'boxh;': '\u2500',
'boxHD;': '\u2566',
'boxHd;': '\u2564',
'boxhD;': '\u2565',
'boxhd;': '\u252c',
'boxHU;': '\u2569',
'boxHu;': '\u2567',
'boxhU;': '\u2568',
'boxhu;': '\u2534',
'boxminus;': '\u229f',
'boxplus;': '\u229e',
'boxtimes;': '\u22a0',
'boxUL;': '\u255d',
'boxUl;': '\u255c',
'boxuL;': '\u255b',
'boxul;': '\u2518',
'boxUR;': '\u255a',
'boxUr;': '\u2559',
'boxuR;': '\u2558',
'boxur;': '\u2514',
'boxV;': '\u2551',
'boxv;': '\u2502',
'boxVH;': '\u256c',
'boxVh;': '\u256b',
'boxvH;': '\u256a',
'boxvh;': '\u253c',
'boxVL;': '\u2563',
'boxVl;': '\u2562',
'boxvL;': '\u2561',
'boxvl;': '\u2524',
'boxVR;': '\u2560',
'boxVr;': '\u255f',
'boxvR;': '\u255e',
'boxvr;': '\u251c',
'bprime;': '\u2035',
'Breve;': '\u02d8',
'breve;': '\u02d8',
'brvbar': '\xa6',
'brvbar;': '\xa6',
'Bscr;': '\u212c',
'bscr;': '\U0001d4b7',
'bsemi;': '\u204f',
'bsim;': '\u223d',
'bsime;': '\u22cd',
'bsol;': '\\',
'bsolb;': '\u29c5',
'bsolhsub;': '\u27c8',
'bull;': '\u2022',
'bullet;': '\u2022',
'bump;': '\u224e',
'bumpE;': '\u2aae',
'bumpe;': '\u224f',
'Bumpeq;': '\u224e',
'bumpeq;': '\u224f',
'Cacute;': '\u0106',
'cacute;': '\u0107',
'Cap;': '\u22d2',
'cap;': '\u2229',
'capand;': '\u2a44',
'capbrcup;': '\u2a49',
'capcap;': '\u2a4b',
'capcup;': '\u2a47',
'capdot;': '\u2a40',
'CapitalDifferentialD;': '\u2145',
'caps;': '\u2229\ufe00',
'caret;': '\u2041',
'caron;': '\u02c7',
'Cayleys;': '\u212d',
'ccaps;': '\u2a4d',
'Ccaron;': '\u010c',
'ccaron;': '\u010d',
'Ccedil': '\xc7',
'ccedil': '\xe7',
'Ccedil;': '\xc7',
'ccedil;': '\xe7',
'Ccirc;': '\u0108',
'ccirc;': '\u0109',
'Cconint;': '\u2230',
'ccups;': '\u2a4c',
'ccupssm;': '\u2a50',
'Cdot;': '\u010a',
'cdot;': '\u010b',
'cedil': '\xb8',
'cedil;': '\xb8',
'Cedilla;': '\xb8',
'cemptyv;': '\u29b2',
'cent': '\xa2',
'cent;': '\xa2',
'CenterDot;': '\xb7',
'centerdot;': '\xb7',
'Cfr;': '\u212d',
'cfr;': '\U0001d520',
'CHcy;': '\u0427',
'chcy;': '\u0447',
'check;': '\u2713',
'checkmark;': '\u2713',
'Chi;': '\u03a7',
'chi;': '\u03c7',
'cir;': '\u25cb',
'circ;': '\u02c6',
'circeq;': '\u2257',
'circlearrowleft;': '\u21ba',
'circlearrowright;': '\u21bb',
'circledast;': '\u229b',
'circledcirc;': '\u229a',
'circleddash;': '\u229d',
'CircleDot;': '\u2299',
'circledR;': '\xae',
'circledS;': '\u24c8',
'CircleMinus;': '\u2296',
'CirclePlus;': '\u2295',
'CircleTimes;': '\u2297',
'cirE;': '\u29c3',
'cire;': '\u2257',
'cirfnint;': '\u2a10',
'cirmid;': '\u2aef',
'cirscir;': '\u29c2',
'ClockwiseContourIntegral;': '\u2232',
'CloseCurlyDoubleQuote;': '\u201d',
'CloseCurlyQuote;': '\u2019',
'clubs;': '\u2663',
'clubsuit;': '\u2663',
'Colon;': '\u2237',
'colon;': ':',
'Colone;': '\u2a74',
'colone;': '\u2254',
'coloneq;': '\u2254',
'comma;': ',',
'commat;': '@',
'comp;': '\u2201',
'compfn;': '\u2218',
'complement;': '\u2201',
'complexes;': '\u2102',
'cong;': '\u2245',
'congdot;': '\u2a6d',
'Congruent;': '\u2261',
'Conint;': '\u222f',
'conint;': '\u222e',
'ContourIntegral;': '\u222e',
'Copf;': '\u2102',
'copf;': '\U0001d554',
'coprod;': '\u2210',
'Coproduct;': '\u2210',
'COPY': '\xa9',
'copy': '\xa9',
'COPY;': '\xa9',
'copy;': '\xa9',
'copysr;': '\u2117',
'CounterClockwiseContourIntegral;': '\u2233',
'crarr;': '\u21b5',
'Cross;': '\u2a2f',
'cross;': '\u2717',
'Cscr;': '\U0001d49e',
'cscr;': '\U0001d4b8',
'csub;': '\u2acf',
'csube;': '\u2ad1',
'csup;': '\u2ad0',
'csupe;': '\u2ad2',
'ctdot;': '\u22ef',
'cudarrl;': '\u2938',
'cudarrr;': '\u2935',
'cuepr;': '\u22de',
'cuesc;': '\u22df',
'cularr;': '\u21b6',
'cularrp;': '\u293d',
'Cup;': '\u22d3',
'cup;': '\u222a',
'cupbrcap;': '\u2a48',
'CupCap;': '\u224d',
'cupcap;': '\u2a46',
'cupcup;': '\u2a4a',
'cupdot;': '\u228d',
'cupor;': '\u2a45',
'cups;': '\u222a\ufe00',
'curarr;': '\u21b7',
'curarrm;': '\u293c',
'curlyeqprec;': '\u22de',
'curlyeqsucc;': '\u22df',
'curlyvee;': '\u22ce',
'curlywedge;': '\u22cf',
'curren': '\xa4',
'curren;': '\xa4',
'curvearrowleft;': '\u21b6',
'curvearrowright;': '\u21b7',
'cuvee;': '\u22ce',
'cuwed;': '\u22cf',
'cwconint;': '\u2232',
'cwint;': '\u2231',
'cylcty;': '\u232d',
'Dagger;': '\u2021',
'dagger;': '\u2020',
'daleth;': '\u2138',
'Darr;': '\u21a1',
'dArr;': '\u21d3',
'darr;': '\u2193',
'dash;': '\u2010',
'Dashv;': '\u2ae4',
'dashv;': '\u22a3',
'dbkarow;': '\u290f',
'dblac;': '\u02dd',
'Dcaron;': '\u010e',
'dcaron;': '\u010f',
'Dcy;': '\u0414',
'dcy;': '\u0434',
'DD;': '\u2145',
'dd;': '\u2146',
'ddagger;': '\u2021',
'ddarr;': '\u21ca',
'DDotrahd;': '\u2911',
'ddotseq;': '\u2a77',
'deg': '\xb0',
'deg;': '\xb0',
'Del;': '\u2207',
'Delta;': '\u0394',
'delta;': '\u03b4',
'demptyv;': '\u29b1',
'dfisht;': '\u297f',
'Dfr;': '\U0001d507',
'dfr;': '\U0001d521',
'dHar;': '\u2965',
'dharl;': '\u21c3',
'dharr;': '\u21c2',
'DiacriticalAcute;': '\xb4',
'DiacriticalDot;': '\u02d9',
'DiacriticalDoubleAcute;': '\u02dd',
'DiacriticalGrave;': '`',
'DiacriticalTilde;': '\u02dc',
'diam;': '\u22c4',
'Diamond;': '\u22c4',
'diamond;': '\u22c4',
'diamondsuit;': '\u2666',
'diams;': '\u2666',
'die;': '\xa8',
'DifferentialD;': '\u2146',
'digamma;': '\u03dd',
'disin;': '\u22f2',
'div;': '\xf7',
'divide': '\xf7',
'divide;': '\xf7',
'divideontimes;': '\u22c7',
'divonx;': '\u22c7',
'DJcy;': '\u0402',
'djcy;': '\u0452',
'dlcorn;': '\u231e',
'dlcrop;': '\u230d',
'dollar;': '$',
'Dopf;': '\U0001d53b',
'dopf;': '\U0001d555',
'Dot;': '\xa8',
'dot;': '\u02d9',
'DotDot;': '\u20dc',
'doteq;': '\u2250',
'doteqdot;': '\u2251',
'DotEqual;': '\u2250',
'dotminus;': '\u2238',
'dotplus;': '\u2214',
'dotsquare;': '\u22a1',
'doublebarwedge;': '\u2306',
'DoubleContourIntegral;': '\u222f',
'DoubleDot;': '\xa8',
'DoubleDownArrow;': '\u21d3',
'DoubleLeftArrow;': '\u21d0',
'DoubleLeftRightArrow;': '\u21d4',
'DoubleLeftTee;': '\u2ae4',
'DoubleLongLeftArrow;': '\u27f8',
'DoubleLongLeftRightArrow;': '\u27fa',
'DoubleLongRightArrow;': '\u27f9',
'DoubleRightArrow;': '\u21d2',
'DoubleRightTee;': '\u22a8',
'DoubleUpArrow;': '\u21d1',
'DoubleUpDownArrow;': '\u21d5',
'DoubleVerticalBar;': '\u2225',
'DownArrow;': '\u2193',
'Downarrow;': '\u21d3',
'downarrow;': '\u2193',
'DownArrowBar;': '\u2913',
'DownArrowUpArrow;': '\u21f5',
'DownBreve;': '\u0311',
'downdownarrows;': '\u21ca',
'downharpoonleft;': '\u21c3',
'downharpoonright;': '\u21c2',
'DownLeftRightVector;': '\u2950',
'DownLeftTeeVector;': '\u295e',
'DownLeftVector;': '\u21bd',
'DownLeftVectorBar;': '\u2956',
'DownRightTeeVector;': '\u295f',
'DownRightVector;': '\u21c1',
'DownRightVectorBar;': '\u2957',
'DownTee;': '\u22a4',
'DownTeeArrow;': '\u21a7',
'drbkarow;': '\u2910',
'drcorn;': '\u231f',
'drcrop;': '\u230c',
'Dscr;': '\U0001d49f',
'dscr;': '\U0001d4b9',
'DScy;': '\u0405',
'dscy;': '\u0455',
'dsol;': '\u29f6',
'Dstrok;': '\u0110',
'dstrok;': '\u0111',
'dtdot;': '\u22f1',
'dtri;': '\u25bf',
'dtrif;': '\u25be',
'duarr;': '\u21f5',
'duhar;': '\u296f',
'dwangle;': '\u29a6',
'DZcy;': '\u040f',
'dzcy;': '\u045f',
'dzigrarr;': '\u27ff',
'Eacute': '\xc9',
'eacute': '\xe9',
'Eacute;': '\xc9',
'eacute;': '\xe9',
'easter;': '\u2a6e',
'Ecaron;': '\u011a',
'ecaron;': '\u011b',
'ecir;': '\u2256',
'Ecirc': '\xca',
'ecirc': '\xea',
'Ecirc;': '\xca',
'ecirc;': '\xea',
'ecolon;': '\u2255',
'Ecy;': '\u042d',
'ecy;': '\u044d',
'eDDot;': '\u2a77',
'Edot;': '\u0116',
'eDot;': '\u2251',
'edot;': '\u0117',
'ee;': '\u2147',
'efDot;': '\u2252',
'Efr;': '\U0001d508',
'efr;': '\U0001d522',
'eg;': '\u2a9a',
'Egrave': '\xc8',
'egrave': '\xe8',
'Egrave;': '\xc8',
'egrave;': '\xe8',
'egs;': '\u2a96',
'egsdot;': '\u2a98',
'el;': '\u2a99',
'Element;': '\u2208',
'elinters;': '\u23e7',
'ell;': '\u2113',
'els;': '\u2a95',
'elsdot;': '\u2a97',
'Emacr;': '\u0112',
'emacr;': '\u0113',
'empty;': '\u2205',
'emptyset;': '\u2205',
'EmptySmallSquare;': '\u25fb',
'emptyv;': '\u2205',
'EmptyVerySmallSquare;': '\u25ab',
'emsp13;': '\u2004',
'emsp14;': '\u2005',
'emsp;': '\u2003',
'ENG;': '\u014a',
'eng;': '\u014b',
'ensp;': '\u2002',
'Eogon;': '\u0118',
'eogon;': '\u0119',
'Eopf;': '\U0001d53c',
'eopf;': '\U0001d556',
'epar;': '\u22d5',
'eparsl;': '\u29e3',
'eplus;': '\u2a71',
'epsi;': '\u03b5',
'Epsilon;': '\u0395',
'epsilon;': '\u03b5',
'epsiv;': '\u03f5',
'eqcirc;': '\u2256',
'eqcolon;': '\u2255',
'eqsim;': '\u2242',
'eqslantgtr;': '\u2a96',
'eqslantless;': '\u2a95',
'Equal;': '\u2a75',
'equals;': '=',
'EqualTilde;': '\u2242',
'equest;': '\u225f',
'Equilibrium;': '\u21cc',
'equiv;': '\u2261',
'equivDD;': '\u2a78',
'eqvparsl;': '\u29e5',
'erarr;': '\u2971',
'erDot;': '\u2253',
'Escr;': '\u2130',
'escr;': '\u212f',
'esdot;': '\u2250',
'Esim;': '\u2a73',
'esim;': '\u2242',
'Eta;': '\u0397',
'eta;': '\u03b7',
'ETH': '\xd0',
'eth': '\xf0',
'ETH;': '\xd0',
'eth;': '\xf0',
'Euml': '\xcb',
'euml': '\xeb',
'Euml;': '\xcb',
'euml;': '\xeb',
'euro;': '\u20ac',
'excl;': '!',
'exist;': '\u2203',
'Exists;': '\u2203',
'expectation;': '\u2130',
'ExponentialE;': '\u2147',
'exponentiale;': '\u2147',
'fallingdotseq;': '\u2252',
'Fcy;': '\u0424',
'fcy;': '\u0444',
'female;': '\u2640',
'ffilig;': '\ufb03',
'fflig;': '\ufb00',
'ffllig;': '\ufb04',
'Ffr;': '\U0001d509',
'ffr;': '\U0001d523',
'filig;': '\ufb01',
'FilledSmallSquare;': '\u25fc',
'FilledVerySmallSquare;': '\u25aa',
'fjlig;': 'fj',
'flat;': '\u266d',
'fllig;': '\ufb02',
'fltns;': '\u25b1',
'fnof;': '\u0192',
'Fopf;': '\U0001d53d',
'fopf;': '\U0001d557',
'ForAll;': '\u2200',
'forall;': '\u2200',
'fork;': '\u22d4',
'forkv;': '\u2ad9',
'Fouriertrf;': '\u2131',
'fpartint;': '\u2a0d',
'frac12': '\xbd',
'frac12;': '\xbd',
'frac13;': '\u2153',
'frac14': '\xbc',
'frac14;': '\xbc',
'frac15;': '\u2155',
'frac16;': '\u2159',
'frac18;': '\u215b',
'frac23;': '\u2154',
'frac25;': '\u2156',
'frac34': '\xbe',
'frac34;': '\xbe',
'frac35;': '\u2157',
'frac38;': '\u215c',
'frac45;': '\u2158',
'frac56;': '\u215a',
'frac58;': '\u215d',
'frac78;': '\u215e',
'frasl;': '\u2044',
'frown;': '\u2322',
'Fscr;': '\u2131',
'fscr;': '\U0001d4bb',
'gacute;': '\u01f5',
'Gamma;': '\u0393',
'gamma;': '\u03b3',
'Gammad;': '\u03dc',
'gammad;': '\u03dd',
'gap;': '\u2a86',
'Gbreve;': '\u011e',
'gbreve;': '\u011f',
'Gcedil;': '\u0122',
'Gcirc;': '\u011c',
'gcirc;': '\u011d',
'Gcy;': '\u0413',
'gcy;': '\u0433',
'Gdot;': '\u0120',
'gdot;': '\u0121',
'gE;': '\u2267',
'ge;': '\u2265',
'gEl;': '\u2a8c',
'gel;': '\u22db',
'geq;': '\u2265',
'geqq;': '\u2267',
'geqslant;': '\u2a7e',
'ges;': '\u2a7e',
'gescc;': '\u2aa9',
'gesdot;': '\u2a80',
'gesdoto;': '\u2a82',
'gesdotol;': '\u2a84',
'gesl;': '\u22db\ufe00',
'gesles;': '\u2a94',
'Gfr;': '\U0001d50a',
'gfr;': '\U0001d524',
'Gg;': '\u22d9',
'gg;': '\u226b',
'ggg;': '\u22d9',
'gimel;': '\u2137',
'GJcy;': '\u0403',
'gjcy;': '\u0453',
'gl;': '\u2277',
'gla;': '\u2aa5',
'glE;': '\u2a92',
'glj;': '\u2aa4',
'gnap;': '\u2a8a',
'gnapprox;': '\u2a8a',
'gnE;': '\u2269',
'gne;': '\u2a88',
'gneq;': '\u2a88',
'gneqq;': '\u2269',
'gnsim;': '\u22e7',
'Gopf;': '\U0001d53e',
'gopf;': '\U0001d558',
'grave;': '`',
'GreaterEqual;': '\u2265',
'GreaterEqualLess;': '\u22db',
'GreaterFullEqual;': '\u2267',
'GreaterGreater;': '\u2aa2',
'GreaterLess;': '\u2277',
'GreaterSlantEqual;': '\u2a7e',
'GreaterTilde;': '\u2273',
'Gscr;': '\U0001d4a2',
'gscr;': '\u210a',
'gsim;': '\u2273',
'gsime;': '\u2a8e',
'gsiml;': '\u2a90',
'GT': '>',
'gt': '>',
'GT;': '>',
'Gt;': '\u226b',
'gt;': '>',
'gtcc;': '\u2aa7',
'gtcir;': '\u2a7a',
'gtdot;': '\u22d7',
'gtlPar;': '\u2995',
'gtquest;': '\u2a7c',
'gtrapprox;': '\u2a86',
'gtrarr;': '\u2978',
'gtrdot;': '\u22d7',
'gtreqless;': '\u22db',
'gtreqqless;': '\u2a8c',
'gtrless;': '\u2277',
'gtrsim;': '\u2273',
'gvertneqq;': '\u2269\ufe00',
'gvnE;': '\u2269\ufe00',
'Hacek;': '\u02c7',
'hairsp;': '\u200a',
'half;': '\xbd',
'hamilt;': '\u210b',
'HARDcy;': '\u042a',
'hardcy;': '\u044a',
'hArr;': '\u21d4',
'harr;': '\u2194',
'harrcir;': '\u2948',
'harrw;': '\u21ad',
'Hat;': '^',
'hbar;': '\u210f',
'Hcirc;': '\u0124',
'hcirc;': '\u0125',
'hearts;': '\u2665',
'heartsuit;': '\u2665',
'hellip;': '\u2026',
'hercon;': '\u22b9',
'Hfr;': '\u210c',
'hfr;': '\U0001d525',
'HilbertSpace;': '\u210b',
'hksearow;': '\u2925',
'hkswarow;': '\u2926',
'hoarr;': '\u21ff',
'homtht;': '\u223b',
'hookleftarrow;': '\u21a9',
'hookrightarrow;': '\u21aa',
'Hopf;': '\u210d',
'hopf;': '\U0001d559',
'horbar;': '\u2015',
'HorizontalLine;': '\u2500',
'Hscr;': '\u210b',
'hscr;': '\U0001d4bd',
'hslash;': '\u210f',
'Hstrok;': '\u0126',
'hstrok;': '\u0127',
'HumpDownHump;': '\u224e',
'HumpEqual;': '\u224f',
'hybull;': '\u2043',
'hyphen;': '\u2010',
'Iacute': '\xcd',
'iacute': '\xed',
'Iacute;': '\xcd',
'iacute;': '\xed',
'ic;': '\u2063',
'Icirc': '\xce',
'icirc': '\xee',
'Icirc;': '\xce',
'icirc;': '\xee',
'Icy;': '\u0418',
'icy;': '\u0438',
'Idot;': '\u0130',
'IEcy;': '\u0415',
'iecy;': '\u0435',
'iexcl': '\xa1',
'iexcl;': '\xa1',
'iff;': '\u21d4',
'Ifr;': '\u2111',
'ifr;': '\U0001d526',
'Igrave': '\xcc',
'igrave': '\xec',
'Igrave;': '\xcc',
'igrave;': '\xec',
'ii;': '\u2148',
'iiiint;': '\u2a0c',
'iiint;': '\u222d',
'iinfin;': '\u29dc',
'iiota;': '\u2129',
'IJlig;': '\u0132',
'ijlig;': '\u0133',
'Im;': '\u2111',
'Imacr;': '\u012a',
'imacr;': '\u012b',
'image;': '\u2111',
'ImaginaryI;': '\u2148',
'imagline;': '\u2110',
'imagpart;': '\u2111',
'imath;': '\u0131',
'imof;': '\u22b7',
'imped;': '\u01b5',
'Implies;': '\u21d2',
'in;': '\u2208',
'incare;': '\u2105',
'infin;': '\u221e',
'infintie;': '\u29dd',
'inodot;': '\u0131',
'Int;': '\u222c',
'int;': '\u222b',
'intcal;': '\u22ba',
'integers;': '\u2124',
'Integral;': '\u222b',
'intercal;': '\u22ba',
'Intersection;': '\u22c2',
'intlarhk;': '\u2a17',
'intprod;': '\u2a3c',
'InvisibleComma;': '\u2063',
'InvisibleTimes;': '\u2062',
'IOcy;': '\u0401',
'iocy;': '\u0451',
'Iogon;': '\u012e',
'iogon;': '\u012f',
'Iopf;': '\U0001d540',
'iopf;': '\U0001d55a',
'Iota;': '\u0399',
'iota;': '\u03b9',
'iprod;': '\u2a3c',
'iquest': '\xbf',
'iquest;': '\xbf',
'Iscr;': '\u2110',
'iscr;': '\U0001d4be',
'isin;': '\u2208',
'isindot;': '\u22f5',
'isinE;': '\u22f9',
'isins;': '\u22f4',
'isinsv;': '\u22f3',
'isinv;': '\u2208',
'it;': '\u2062',
'Itilde;': '\u0128',
'itilde;': '\u0129',
'Iukcy;': '\u0406',
'iukcy;': '\u0456',
'Iuml': '\xcf',
'iuml': '\xef',
'Iuml;': '\xcf',
'iuml;': '\xef',
'Jcirc;': '\u0134',
'jcirc;': '\u0135',
'Jcy;': '\u0419',
'jcy;': '\u0439',
'Jfr;': '\U0001d50d',
'jfr;': '\U0001d527',
'jmath;': '\u0237',
'Jopf;': '\U0001d541',
'jopf;': '\U0001d55b',
'Jscr;': '\U0001d4a5',
'jscr;': '\U0001d4bf',
'Jsercy;': '\u0408',
'jsercy;': '\u0458',
'Jukcy;': '\u0404',
'jukcy;': '\u0454',
'Kappa;': '\u039a',
'kappa;': '\u03ba',
'kappav;': '\u03f0',
'Kcedil;': '\u0136',
'kcedil;': '\u0137',
'Kcy;': '\u041a',
'kcy;': '\u043a',
'Kfr;': '\U0001d50e',
'kfr;': '\U0001d528',
'kgreen;': '\u0138',
'KHcy;': '\u0425',
'khcy;': '\u0445',
'KJcy;': '\u040c',
'kjcy;': '\u045c',
'Kopf;': '\U0001d542',
'kopf;': '\U0001d55c',
'Kscr;': '\U0001d4a6',
'kscr;': '\U0001d4c0',
'lAarr;': '\u21da',
'Lacute;': '\u0139',
'lacute;': '\u013a',
'laemptyv;': '\u29b4',
'lagran;': '\u2112',
'Lambda;': '\u039b',
'lambda;': '\u03bb',
'Lang;': '\u27ea',
'lang;': '\u27e8',
'langd;': '\u2991',
'langle;': '\u27e8',
'lap;': '\u2a85',
'Laplacetrf;': '\u2112',
'laquo': '\xab',
'laquo;': '\xab',
'Larr;': '\u219e',
'lArr;': '\u21d0',
'larr;': '\u2190',
'larrb;': '\u21e4',
'larrbfs;': '\u291f',
'larrfs;': '\u291d',
'larrhk;': '\u21a9',
'larrlp;': '\u21ab',
'larrpl;': '\u2939',
'larrsim;': '\u2973',
'larrtl;': '\u21a2',
'lat;': '\u2aab',
'lAtail;': '\u291b',
'latail;': '\u2919',
'late;': '\u2aad',
'lates;': '\u2aad\ufe00',
'lBarr;': '\u290e',
'lbarr;': '\u290c',
'lbbrk;': '\u2772',
'lbrace;': '{',
'lbrack;': '[',
'lbrke;': '\u298b',
'lbrksld;': '\u298f',
'lbrkslu;': '\u298d',
'Lcaron;': '\u013d',
'lcaron;': '\u013e',
'Lcedil;': '\u013b',
'lcedil;': '\u013c',
'lceil;': '\u2308',
'lcub;': '{',
'Lcy;': '\u041b',
'lcy;': '\u043b',
'ldca;': '\u2936',
'ldquo;': '\u201c',
'ldquor;': '\u201e',
'ldrdhar;': '\u2967',
'ldrushar;': '\u294b',
'ldsh;': '\u21b2',
'lE;': '\u2266',
'le;': '\u2264',
'LeftAngleBracket;': '\u27e8',
'LeftArrow;': '\u2190',
'Leftarrow;': '\u21d0',
'leftarrow;': '\u2190',
'LeftArrowBar;': '\u21e4',
'LeftArrowRightArrow;': '\u21c6',
'leftarrowtail;': '\u21a2',
'LeftCeiling;': '\u2308',
'LeftDoubleBracket;': '\u27e6',
'LeftDownTeeVector;': '\u2961',
'LeftDownVector;': '\u21c3',
'LeftDownVectorBar;': '\u2959',
'LeftFloor;': '\u230a',
'leftharpoondown;': '\u21bd',
'leftharpoonup;': '\u21bc',
'leftleftarrows;': '\u21c7',
'LeftRightArrow;': '\u2194',
'Leftrightarrow;': '\u21d4',
'leftrightarrow;': '\u2194',
'leftrightarrows;': '\u21c6',
'leftrightharpoons;': '\u21cb',
'leftrightsquigarrow;': '\u21ad',
'LeftRightVector;': '\u294e',
'LeftTee;': '\u22a3',
'LeftTeeArrow;': '\u21a4',
'LeftTeeVector;': '\u295a',
'leftthreetimes;': '\u22cb',
'LeftTriangle;': '\u22b2',
'LeftTriangleBar;': '\u29cf',
'LeftTriangleEqual;': '\u22b4',
'LeftUpDownVector;': '\u2951',
'LeftUpTeeVector;': '\u2960',
'LeftUpVector;': '\u21bf',
'LeftUpVectorBar;': '\u2958',
'LeftVector;': '\u21bc',
'LeftVectorBar;': '\u2952',
'lEg;': '\u2a8b',
'leg;': '\u22da',
'leq;': '\u2264',
'leqq;': '\u2266',
'leqslant;': '\u2a7d',
'les;': '\u2a7d',
'lescc;': '\u2aa8',
'lesdot;': '\u2a7f',
'lesdoto;': '\u2a81',
'lesdotor;': '\u2a83',
'lesg;': '\u22da\ufe00',
'lesges;': '\u2a93',
'lessapprox;': '\u2a85',
'lessdot;': '\u22d6',
'lesseqgtr;': '\u22da',
'lesseqqgtr;': '\u2a8b',
'LessEqualGreater;': '\u22da',
'LessFullEqual;': '\u2266',
'LessGreater;': '\u2276',
'lessgtr;': '\u2276',
'LessLess;': '\u2aa1',
'lesssim;': '\u2272',
'LessSlantEqual;': '\u2a7d',
'LessTilde;': '\u2272',
'lfisht;': '\u297c',
'lfloor;': '\u230a',
'Lfr;': '\U0001d50f',
'lfr;': '\U0001d529',
'lg;': '\u2276',
'lgE;': '\u2a91',
'lHar;': '\u2962',
'lhard;': '\u21bd',
'lharu;': '\u21bc',
'lharul;': '\u296a',
'lhblk;': '\u2584',
'LJcy;': '\u0409',
'ljcy;': '\u0459',
'Ll;': '\u22d8',
'll;': '\u226a',
'llarr;': '\u21c7',
'llcorner;': '\u231e',
'Lleftarrow;': '\u21da',
'llhard;': '\u296b',
'lltri;': '\u25fa',
'Lmidot;': '\u013f',
'lmidot;': '\u0140',
'lmoust;': '\u23b0',
'lmoustache;': '\u23b0',
'lnap;': '\u2a89',
'lnapprox;': '\u2a89',
'lnE;': '\u2268',
'lne;': '\u2a87',
'lneq;': '\u2a87',
'lneqq;': '\u2268',
'lnsim;': '\u22e6',
'loang;': '\u27ec',
'loarr;': '\u21fd',
'lobrk;': '\u27e6',
'LongLeftArrow;': '\u27f5',
'Longleftarrow;': '\u27f8',
'longleftarrow;': '\u27f5',
'LongLeftRightArrow;': '\u27f7',
'Longleftrightarrow;': '\u27fa',
'longleftrightarrow;': '\u27f7',
'longmapsto;': '\u27fc',
'LongRightArrow;': '\u27f6',
'Longrightarrow;': '\u27f9',
'longrightarrow;': '\u27f6',
'looparrowleft;': '\u21ab',
'looparrowright;': '\u21ac',
'lopar;': '\u2985',
'Lopf;': '\U0001d543',
'lopf;': '\U0001d55d',
'loplus;': '\u2a2d',
'lotimes;': '\u2a34',
'lowast;': '\u2217',
'lowbar;': '_',
'LowerLeftArrow;': '\u2199',
'LowerRightArrow;': '\u2198',
'loz;': '\u25ca',
'lozenge;': '\u25ca',
'lozf;': '\u29eb',
'lpar;': '(',
'lparlt;': '\u2993',
'lrarr;': '\u21c6',
'lrcorner;': '\u231f',
'lrhar;': '\u21cb',
'lrhard;': '\u296d',
'lrm;': '\u200e',
'lrtri;': '\u22bf',
'lsaquo;': '\u2039',
'Lscr;': '\u2112',
'lscr;': '\U0001d4c1',
'Lsh;': '\u21b0',
'lsh;': '\u21b0',
'lsim;': '\u2272',
'lsime;': '\u2a8d',
'lsimg;': '\u2a8f',
'lsqb;': '[',
'lsquo;': '\u2018',
'lsquor;': '\u201a',
'Lstrok;': '\u0141',
'lstrok;': '\u0142',
'LT': '<',
'lt': '<',
'LT;': '<',
'Lt;': '\u226a',
'lt;': '<',
'ltcc;': '\u2aa6',
'ltcir;': '\u2a79',
'ltdot;': '\u22d6',
'lthree;': '\u22cb',
'ltimes;': '\u22c9',
'ltlarr;': '\u2976',
'ltquest;': '\u2a7b',
'ltri;': '\u25c3',
'ltrie;': '\u22b4',
'ltrif;': '\u25c2',
'ltrPar;': '\u2996',
'lurdshar;': '\u294a',
'luruhar;': '\u2966',
'lvertneqq;': '\u2268\ufe00',
'lvnE;': '\u2268\ufe00',
'macr': '\xaf',
'macr;': '\xaf',
'male;': '\u2642',
'malt;': '\u2720',
'maltese;': '\u2720',
'Map;': '\u2905',
'map;': '\u21a6',
'mapsto;': '\u21a6',
'mapstodown;': '\u21a7',
'mapstoleft;': '\u21a4',
'mapstoup;': '\u21a5',
'marker;': '\u25ae',
'mcomma;': '\u2a29',
'Mcy;': '\u041c',
'mcy;': '\u043c',
'mdash;': '\u2014',
'mDDot;': '\u223a',
'measuredangle;': '\u2221',
'MediumSpace;': '\u205f',
'Mellintrf;': '\u2133',
'Mfr;': '\U0001d510',
'mfr;': '\U0001d52a',
'mho;': '\u2127',
'micro': '\xb5',
'micro;': '\xb5',
'mid;': '\u2223',
'midast;': '*',
'midcir;': '\u2af0',
'middot': '\xb7',
'middot;': '\xb7',
'minus;': '\u2212',
'minusb;': '\u229f',
'minusd;': '\u2238',
'minusdu;': '\u2a2a',
'MinusPlus;': '\u2213',
'mlcp;': '\u2adb',
'mldr;': '\u2026',
'mnplus;': '\u2213',
'models;': '\u22a7',
'Mopf;': '\U0001d544',
'mopf;': '\U0001d55e',
'mp;': '\u2213',
'Mscr;': '\u2133',
'mscr;': '\U0001d4c2',
'mstpos;': '\u223e',
'Mu;': '\u039c',
'mu;': '\u03bc',
'multimap;': '\u22b8',
'mumap;': '\u22b8',
'nabla;': '\u2207',
'Nacute;': '\u0143',
'nacute;': '\u0144',
'nang;': '\u2220\u20d2',
'nap;': '\u2249',
'napE;': '\u2a70\u0338',
'napid;': '\u224b\u0338',
'napos;': '\u0149',
'napprox;': '\u2249',
'natur;': '\u266e',
'natural;': '\u266e',
'naturals;': '\u2115',
'nbsp': '\xa0',
'nbsp;': '\xa0',
'nbump;': '\u224e\u0338',
'nbumpe;': '\u224f\u0338',
'ncap;': '\u2a43',
'Ncaron;': '\u0147',
'ncaron;': '\u0148',
'Ncedil;': '\u0145',
'ncedil;': '\u0146',
'ncong;': '\u2247',
'ncongdot;': '\u2a6d\u0338',
'ncup;': '\u2a42',
'Ncy;': '\u041d',
'ncy;': '\u043d',
'ndash;': '\u2013',
'ne;': '\u2260',
'nearhk;': '\u2924',
'neArr;': '\u21d7',
'nearr;': '\u2197',
'nearrow;': '\u2197',
'nedot;': '\u2250\u0338',
'NegativeMediumSpace;': '\u200b',
'NegativeThickSpace;': '\u200b',
'NegativeThinSpace;': '\u200b',
'NegativeVeryThinSpace;': '\u200b',
'nequiv;': '\u2262',
'nesear;': '\u2928',
'nesim;': '\u2242\u0338',
'NestedGreaterGreater;': '\u226b',
'NestedLessLess;': '\u226a',
'NewLine;': '\n',
'nexist;': '\u2204',
'nexists;': '\u2204',
'Nfr;': '\U0001d511',
'nfr;': '\U0001d52b',
'ngE;': '\u2267\u0338',
'nge;': '\u2271',
'ngeq;': '\u2271',
'ngeqq;': '\u2267\u0338',
'ngeqslant;': '\u2a7e\u0338',
'nges;': '\u2a7e\u0338',
'nGg;': '\u22d9\u0338',
'ngsim;': '\u2275',
'nGt;': '\u226b\u20d2',
'ngt;': '\u226f',
'ngtr;': '\u226f',
'nGtv;': '\u226b\u0338',
'nhArr;': '\u21ce',
'nharr;': '\u21ae',
'nhpar;': '\u2af2',
'ni;': '\u220b',
'nis;': '\u22fc',
'nisd;': '\u22fa',
'niv;': '\u220b',
'NJcy;': '\u040a',
'njcy;': '\u045a',
'nlArr;': '\u21cd',
'nlarr;': '\u219a',
'nldr;': '\u2025',
'nlE;': '\u2266\u0338',
'nle;': '\u2270',
'nLeftarrow;': '\u21cd',
'nleftarrow;': '\u219a',
'nLeftrightarrow;': '\u21ce',
'nleftrightarrow;': '\u21ae',
'nleq;': '\u2270',
'nleqq;': '\u2266\u0338',
'nleqslant;': '\u2a7d\u0338',
'nles;': '\u2a7d\u0338',
'nless;': '\u226e',
'nLl;': '\u22d8\u0338',
'nlsim;': '\u2274',
'nLt;': '\u226a\u20d2',
'nlt;': '\u226e',
'nltri;': '\u22ea',
'nltrie;': '\u22ec',
'nLtv;': '\u226a\u0338',
'nmid;': '\u2224',
'NoBreak;': '\u2060',
'NonBreakingSpace;': '\xa0',
'Nopf;': '\u2115',
'nopf;': '\U0001d55f',
'not': '\xac',
'Not;': '\u2aec',
'not;': '\xac',
'NotCongruent;': '\u2262',
'NotCupCap;': '\u226d',
'NotDoubleVerticalBar;': '\u2226',
'NotElement;': '\u2209',
'NotEqual;': '\u2260',
'NotEqualTilde;': '\u2242\u0338',
'NotExists;': '\u2204',
'NotGreater;': '\u226f',
'NotGreaterEqual;': '\u2271',
'NotGreaterFullEqual;': '\u2267\u0338',
'NotGreaterGreater;': '\u226b\u0338',
'NotGreaterLess;': '\u2279',
'NotGreaterSlantEqual;': '\u2a7e\u0338',
'NotGreaterTilde;': '\u2275',
'NotHumpDownHump;': '\u224e\u0338',
'NotHumpEqual;': '\u224f\u0338',
'notin;': '\u2209',
'notindot;': '\u22f5\u0338',
'notinE;': '\u22f9\u0338',
'notinva;': '\u2209',
'notinvb;': '\u22f7',
'notinvc;': '\u22f6',
'NotLeftTriangle;': '\u22ea',
'NotLeftTriangleBar;': '\u29cf\u0338',
'NotLeftTriangleEqual;': '\u22ec',
'NotLess;': '\u226e',
'NotLessEqual;': '\u2270',
'NotLessGreater;': '\u2278',
'NotLessLess;': '\u226a\u0338',
'NotLessSlantEqual;': '\u2a7d\u0338',
'NotLessTilde;': '\u2274',
'NotNestedGreaterGreater;': '\u2aa2\u0338',
'NotNestedLessLess;': '\u2aa1\u0338',
'notni;': '\u220c',
'notniva;': '\u220c',
'notnivb;': '\u22fe',
'notnivc;': '\u22fd',
'NotPrecedes;': '\u2280',
'NotPrecedesEqual;': '\u2aaf\u0338',
'NotPrecedesSlantEqual;': '\u22e0',
'NotReverseElement;': '\u220c',
'NotRightTriangle;': '\u22eb',
'NotRightTriangleBar;': '\u29d0\u0338',
'NotRightTriangleEqual;': '\u22ed',
'NotSquareSubset;': '\u228f\u0338',
'NotSquareSubsetEqual;': '\u22e2',
'NotSquareSuperset;': '\u2290\u0338',
'NotSquareSupersetEqual;': '\u22e3',
'NotSubset;': '\u2282\u20d2',
'NotSubsetEqual;': '\u2288',
'NotSucceeds;': '\u2281',
'NotSucceedsEqual;': '\u2ab0\u0338',
'NotSucceedsSlantEqual;': '\u22e1',
'NotSucceedsTilde;': '\u227f\u0338',
'NotSuperset;': '\u2283\u20d2',
'NotSupersetEqual;': '\u2289',
'NotTilde;': '\u2241',
'NotTildeEqual;': '\u2244',
'NotTildeFullEqual;': '\u2247',
'NotTildeTilde;': '\u2249',
'NotVerticalBar;': '\u2224',
'npar;': '\u2226',
'nparallel;': '\u2226',
'nparsl;': '\u2afd\u20e5',
'npart;': '\u2202\u0338',
'npolint;': '\u2a14',
'npr;': '\u2280',
'nprcue;': '\u22e0',
'npre;': '\u2aaf\u0338',
'nprec;': '\u2280',
'npreceq;': '\u2aaf\u0338',
'nrArr;': '\u21cf',
'nrarr;': '\u219b',
'nrarrc;': '\u2933\u0338',
'nrarrw;': '\u219d\u0338',
'nRightarrow;': '\u21cf',
'nrightarrow;': '\u219b',
'nrtri;': '\u22eb',
'nrtrie;': '\u22ed',
'nsc;': '\u2281',
'nsccue;': '\u22e1',
'nsce;': '\u2ab0\u0338',
'Nscr;': '\U0001d4a9',
'nscr;': '\U0001d4c3',
'nshortmid;': '\u2224',
'nshortparallel;': '\u2226',
'nsim;': '\u2241',
'nsime;': '\u2244',
'nsimeq;': '\u2244',
'nsmid;': '\u2224',
'nspar;': '\u2226',
'nsqsube;': '\u22e2',
'nsqsupe;': '\u22e3',
'nsub;': '\u2284',
'nsubE;': '\u2ac5\u0338',
'nsube;': '\u2288',
'nsubset;': '\u2282\u20d2',
'nsubseteq;': '\u2288',
'nsubseteqq;': '\u2ac5\u0338',
'nsucc;': '\u2281',
'nsucceq;': '\u2ab0\u0338',
'nsup;': '\u2285',
'nsupE;': '\u2ac6\u0338',
'nsupe;': '\u2289',
'nsupset;': '\u2283\u20d2',
'nsupseteq;': '\u2289',
'nsupseteqq;': '\u2ac6\u0338',
'ntgl;': '\u2279',
'Ntilde': '\xd1',
'ntilde': '\xf1',
'Ntilde;': '\xd1',
'ntilde;': '\xf1',
'ntlg;': '\u2278',
'ntriangleleft;': '\u22ea',
'ntrianglelefteq;': '\u22ec',
'ntriangleright;': '\u22eb',
'ntrianglerighteq;': '\u22ed',
'Nu;': '\u039d',
'nu;': '\u03bd',
'num;': '#',
'numero;': '\u2116',
'numsp;': '\u2007',
'nvap;': '\u224d\u20d2',
'nVDash;': '\u22af',
'nVdash;': '\u22ae',
'nvDash;': '\u22ad',
'nvdash;': '\u22ac',
'nvge;': '\u2265\u20d2',
'nvgt;': '>\u20d2',
'nvHarr;': '\u2904',
'nvinfin;': '\u29de',
'nvlArr;': '\u2902',
'nvle;': '\u2264\u20d2',
'nvlt;': '<\u20d2',
'nvltrie;': '\u22b4\u20d2',
'nvrArr;': '\u2903',
'nvrtrie;': '\u22b5\u20d2',
'nvsim;': '\u223c\u20d2',
'nwarhk;': '\u2923',
'nwArr;': '\u21d6',
'nwarr;': '\u2196',
'nwarrow;': '\u2196',
'nwnear;': '\u2927',
'Oacute': '\xd3',
'oacute': '\xf3',
'Oacute;': '\xd3',
'oacute;': '\xf3',
'oast;': '\u229b',
'ocir;': '\u229a',
'Ocirc': '\xd4',
'ocirc': '\xf4',
'Ocirc;': '\xd4',
'ocirc;': '\xf4',
'Ocy;': '\u041e',
'ocy;': '\u043e',
'odash;': '\u229d',
'Odblac;': '\u0150',
'odblac;': '\u0151',
'odiv;': '\u2a38',
'odot;': '\u2299',
'odsold;': '\u29bc',
'OElig;': '\u0152',
'oelig;': '\u0153',
'ofcir;': '\u29bf',
'Ofr;': '\U0001d512',
'ofr;': '\U0001d52c',
'ogon;': '\u02db',
'Ograve': '\xd2',
'ograve': '\xf2',
'Ograve;': '\xd2',
'ograve;': '\xf2',
'ogt;': '\u29c1',
'ohbar;': '\u29b5',
'ohm;': '\u03a9',
'oint;': '\u222e',
'olarr;': '\u21ba',
'olcir;': '\u29be',
'olcross;': '\u29bb',
'oline;': '\u203e',
'olt;': '\u29c0',
'Omacr;': '\u014c',
'omacr;': '\u014d',
'Omega;': '\u03a9',
'omega;': '\u03c9',
'Omicron;': '\u039f',
'omicron;': '\u03bf',
'omid;': '\u29b6',
'ominus;': '\u2296',
'Oopf;': '\U0001d546',
'oopf;': '\U0001d560',
'opar;': '\u29b7',
'OpenCurlyDoubleQuote;': '\u201c',
'OpenCurlyQuote;': '\u2018',
'operp;': '\u29b9',
'oplus;': '\u2295',
'Or;': '\u2a54',
'or;': '\u2228',
'orarr;': '\u21bb',
'ord;': '\u2a5d',
'order;': '\u2134',
'orderof;': '\u2134',
'ordf': '\xaa',
'ordf;': '\xaa',
'ordm': '\xba',
'ordm;': '\xba',
'origof;': '\u22b6',
'oror;': '\u2a56',
'orslope;': '\u2a57',
'orv;': '\u2a5b',
'oS;': '\u24c8',
'Oscr;': '\U0001d4aa',
'oscr;': '\u2134',
'Oslash': '\xd8',
'oslash': '\xf8',
'Oslash;': '\xd8',
'oslash;': '\xf8',
'osol;': '\u2298',
'Otilde': '\xd5',
'otilde': '\xf5',
'Otilde;': '\xd5',
'otilde;': '\xf5',
'Otimes;': '\u2a37',
'otimes;': '\u2297',
'otimesas;': '\u2a36',
'Ouml': '\xd6',
'ouml': '\xf6',
'Ouml;': '\xd6',
'ouml;': '\xf6',
'ovbar;': '\u233d',
'OverBar;': '\u203e',
'OverBrace;': '\u23de',
'OverBracket;': '\u23b4',
'OverParenthesis;': '\u23dc',
'par;': '\u2225',
'para': '\xb6',
'para;': '\xb6',
'parallel;': '\u2225',
'parsim;': '\u2af3',
'parsl;': '\u2afd',
'part;': '\u2202',
'PartialD;': '\u2202',
'Pcy;': '\u041f',
'pcy;': '\u043f',
'percnt;': '%',
'period;': '.',
'permil;': '\u2030',
'perp;': '\u22a5',
'pertenk;': '\u2031',
'Pfr;': '\U0001d513',
'pfr;': '\U0001d52d',
'Phi;': '\u03a6',
'phi;': '\u03c6',
'phiv;': '\u03d5',
'phmmat;': '\u2133',
'phone;': '\u260e',
'Pi;': '\u03a0',
'pi;': '\u03c0',
'pitchfork;': '\u22d4',
'piv;': '\u03d6',
'planck;': '\u210f',
'planckh;': '\u210e',
'plankv;': '\u210f',
'plus;': '+',
'plusacir;': '\u2a23',
'plusb;': '\u229e',
'pluscir;': '\u2a22',
'plusdo;': '\u2214',
'plusdu;': '\u2a25',
'pluse;': '\u2a72',
'PlusMinus;': '\xb1',
'plusmn': '\xb1',
'plusmn;': '\xb1',
'plussim;': '\u2a26',
'plustwo;': '\u2a27',
'pm;': '\xb1',
'Poincareplane;': '\u210c',
'pointint;': '\u2a15',
'Popf;': '\u2119',
'popf;': '\U0001d561',
'pound': '\xa3',
'pound;': '\xa3',
'Pr;': '\u2abb',
'pr;': '\u227a',
'prap;': '\u2ab7',
'prcue;': '\u227c',
'prE;': '\u2ab3',
'pre;': '\u2aaf',
'prec;': '\u227a',
'precapprox;': '\u2ab7',
'preccurlyeq;': '\u227c',
'Precedes;': '\u227a',
'PrecedesEqual;': '\u2aaf',
'PrecedesSlantEqual;': '\u227c',
'PrecedesTilde;': '\u227e',
'preceq;': '\u2aaf',
'precnapprox;': '\u2ab9',
'precneqq;': '\u2ab5',
'precnsim;': '\u22e8',
'precsim;': '\u227e',
'Prime;': '\u2033',
'prime;': '\u2032',
'primes;': '\u2119',
'prnap;': '\u2ab9',
'prnE;': '\u2ab5',
'prnsim;': '\u22e8',
'prod;': '\u220f',
'Product;': '\u220f',
'profalar;': '\u232e',
'profline;': '\u2312',
'profsurf;': '\u2313',
'prop;': '\u221d',
'Proportion;': '\u2237',
'Proportional;': '\u221d',
'propto;': '\u221d',
'prsim;': '\u227e',
'prurel;': '\u22b0',
'Pscr;': '\U0001d4ab',
'pscr;': '\U0001d4c5',
'Psi;': '\u03a8',
'psi;': '\u03c8',
'puncsp;': '\u2008',
'Qfr;': '\U0001d514',
'qfr;': '\U0001d52e',
'qint;': '\u2a0c',
'Qopf;': '\u211a',
'qopf;': '\U0001d562',
'qprime;': '\u2057',
'Qscr;': '\U0001d4ac',
'qscr;': '\U0001d4c6',
'quaternions;': '\u210d',
'quatint;': '\u2a16',
'quest;': '?',
'questeq;': '\u225f',
'QUOT': '"',
'quot': '"',
'QUOT;': '"',
'quot;': '"',
'rAarr;': '\u21db',
'race;': '\u223d\u0331',
'Racute;': '\u0154',
'racute;': '\u0155',
'radic;': '\u221a',
'raemptyv;': '\u29b3',
'Rang;': '\u27eb',
'rang;': '\u27e9',
'rangd;': '\u2992',
'range;': '\u29a5',
'rangle;': '\u27e9',
'raquo': '\xbb',
'raquo;': '\xbb',
'Rarr;': '\u21a0',
'rArr;': '\u21d2',
'rarr;': '\u2192',
'rarrap;': '\u2975',
'rarrb;': '\u21e5',
'rarrbfs;': '\u2920',
'rarrc;': '\u2933',
'rarrfs;': '\u291e',
'rarrhk;': '\u21aa',
'rarrlp;': '\u21ac',
'rarrpl;': '\u2945',
'rarrsim;': '\u2974',
'Rarrtl;': '\u2916',
'rarrtl;': '\u21a3',
'rarrw;': '\u219d',
'rAtail;': '\u291c',
'ratail;': '\u291a',
'ratio;': '\u2236',
'rationals;': '\u211a',
'RBarr;': '\u2910',
'rBarr;': '\u290f',
'rbarr;': '\u290d',
'rbbrk;': '\u2773',
'rbrace;': '}',
'rbrack;': ']',
'rbrke;': '\u298c',
'rbrksld;': '\u298e',
'rbrkslu;': '\u2990',
'Rcaron;': '\u0158',
'rcaron;': '\u0159',
'Rcedil;': '\u0156',
'rcedil;': '\u0157',
'rceil;': '\u2309',
'rcub;': '}',
'Rcy;': '\u0420',
'rcy;': '\u0440',
'rdca;': '\u2937',
'rdldhar;': '\u2969',
'rdquo;': '\u201d',
'rdquor;': '\u201d',
'rdsh;': '\u21b3',
'Re;': '\u211c',
'real;': '\u211c',
'realine;': '\u211b',
'realpart;': '\u211c',
'reals;': '\u211d',
'rect;': '\u25ad',
'REG': '\xae',
'reg': '\xae',
'REG;': '\xae',
'reg;': '\xae',
'ReverseElement;': '\u220b',
'ReverseEquilibrium;': '\u21cb',
'ReverseUpEquilibrium;': '\u296f',
'rfisht;': '\u297d',
'rfloor;': '\u230b',
'Rfr;': '\u211c',
'rfr;': '\U0001d52f',
'rHar;': '\u2964',
'rhard;': '\u21c1',
'rharu;': '\u21c0',
'rharul;': '\u296c',
'Rho;': '\u03a1',
'rho;': '\u03c1',
'rhov;': '\u03f1',
'RightAngleBracket;': '\u27e9',
'RightArrow;': '\u2192',
'Rightarrow;': '\u21d2',
'rightarrow;': '\u2192',
'RightArrowBar;': '\u21e5',
'RightArrowLeftArrow;': '\u21c4',
'rightarrowtail;': '\u21a3',
'RightCeiling;': '\u2309',
'RightDoubleBracket;': '\u27e7',
'RightDownTeeVector;': '\u295d',
'RightDownVector;': '\u21c2',
'RightDownVectorBar;': '\u2955',
'RightFloor;': '\u230b',
'rightharpoondown;': '\u21c1',
'rightharpoonup;': '\u21c0',
'rightleftarrows;': '\u21c4',
'rightleftharpoons;': '\u21cc',
'rightrightarrows;': '\u21c9',
'rightsquigarrow;': '\u219d',
'RightTee;': '\u22a2',
'RightTeeArrow;': '\u21a6',
'RightTeeVector;': '\u295b',
'rightthreetimes;': '\u22cc',
'RightTriangle;': '\u22b3',
'RightTriangleBar;': '\u29d0',
'RightTriangleEqual;': '\u22b5',
'RightUpDownVector;': '\u294f',
'RightUpTeeVector;': '\u295c',
'RightUpVector;': '\u21be',
'RightUpVectorBar;': '\u2954',
'RightVector;': '\u21c0',
'RightVectorBar;': '\u2953',
'ring;': '\u02da',
'risingdotseq;': '\u2253',
'rlarr;': '\u21c4',
'rlhar;': '\u21cc',
'rlm;': '\u200f',
'rmoust;': '\u23b1',
'rmoustache;': '\u23b1',
'rnmid;': '\u2aee',
'roang;': '\u27ed',
'roarr;': '\u21fe',
'robrk;': '\u27e7',
'ropar;': '\u2986',
'Ropf;': '\u211d',
'ropf;': '\U0001d563',
'roplus;': '\u2a2e',
'rotimes;': '\u2a35',
'RoundImplies;': '\u2970',
'rpar;': ')',
'rpargt;': '\u2994',
'rppolint;': '\u2a12',
'rrarr;': '\u21c9',
'Rrightarrow;': '\u21db',
'rsaquo;': '\u203a',
'Rscr;': '\u211b',
'rscr;': '\U0001d4c7',
'Rsh;': '\u21b1',
'rsh;': '\u21b1',
'rsqb;': ']',
'rsquo;': '\u2019',
'rsquor;': '\u2019',
'rthree;': '\u22cc',
'rtimes;': '\u22ca',
'rtri;': '\u25b9',
'rtrie;': '\u22b5',
'rtrif;': '\u25b8',
'rtriltri;': '\u29ce',
'RuleDelayed;': '\u29f4',
'ruluhar;': '\u2968',
'rx;': '\u211e',
'Sacute;': '\u015a',
'sacute;': '\u015b',
'sbquo;': '\u201a',
'Sc;': '\u2abc',
'sc;': '\u227b',
'scap;': '\u2ab8',
'Scaron;': '\u0160',
'scaron;': '\u0161',
'sccue;': '\u227d',
'scE;': '\u2ab4',
'sce;': '\u2ab0',
'Scedil;': '\u015e',
'scedil;': '\u015f',
'Scirc;': '\u015c',
'scirc;': '\u015d',
'scnap;': '\u2aba',
'scnE;': '\u2ab6',
'scnsim;': '\u22e9',
'scpolint;': '\u2a13',
'scsim;': '\u227f',
'Scy;': '\u0421',
'scy;': '\u0441',
'sdot;': '\u22c5',
'sdotb;': '\u22a1',
'sdote;': '\u2a66',
'searhk;': '\u2925',
'seArr;': '\u21d8',
'searr;': '\u2198',
'searrow;': '\u2198',
'sect': '\xa7',
'sect;': '\xa7',
'semi;': ';',
'seswar;': '\u2929',
'setminus;': '\u2216',
'setmn;': '\u2216',
'sext;': '\u2736',
'Sfr;': '\U0001d516',
'sfr;': '\U0001d530',
'sfrown;': '\u2322',
'sharp;': '\u266f',
'SHCHcy;': '\u0429',
'shchcy;': '\u0449',
'SHcy;': '\u0428',
'shcy;': '\u0448',
'ShortDownArrow;': '\u2193',
'ShortLeftArrow;': '\u2190',
'shortmid;': '\u2223',
'shortparallel;': '\u2225',
'ShortRightArrow;': '\u2192',
'ShortUpArrow;': '\u2191',
'shy': '\xad',
'shy;': '\xad',
'Sigma;': '\u03a3',
'sigma;': '\u03c3',
'sigmaf;': '\u03c2',
'sigmav;': '\u03c2',
'sim;': '\u223c',
'simdot;': '\u2a6a',
'sime;': '\u2243',
'simeq;': '\u2243',
'simg;': '\u2a9e',
'simgE;': '\u2aa0',
'siml;': '\u2a9d',
'simlE;': '\u2a9f',
'simne;': '\u2246',
'simplus;': '\u2a24',
'simrarr;': '\u2972',
'slarr;': '\u2190',
'SmallCircle;': '\u2218',
'smallsetminus;': '\u2216',
'smashp;': '\u2a33',
'smeparsl;': '\u29e4',
'smid;': '\u2223',
'smile;': '\u2323',
'smt;': '\u2aaa',
'smte;': '\u2aac',
'smtes;': '\u2aac\ufe00',
'SOFTcy;': '\u042c',
'softcy;': '\u044c',
'sol;': '/',
'solb;': '\u29c4',
'solbar;': '\u233f',
'Sopf;': '\U0001d54a',
'sopf;': '\U0001d564',
'spades;': '\u2660',
'spadesuit;': '\u2660',
'spar;': '\u2225',
'sqcap;': '\u2293',
'sqcaps;': '\u2293\ufe00',
'sqcup;': '\u2294',
'sqcups;': '\u2294\ufe00',
'Sqrt;': '\u221a',
'sqsub;': '\u228f',
'sqsube;': '\u2291',
'sqsubset;': '\u228f',
'sqsubseteq;': '\u2291',
'sqsup;': '\u2290',
'sqsupe;': '\u2292',
'sqsupset;': '\u2290',
'sqsupseteq;': '\u2292',
'squ;': '\u25a1',
'Square;': '\u25a1',
'square;': '\u25a1',
'SquareIntersection;': '\u2293',
'SquareSubset;': '\u228f',
'SquareSubsetEqual;': '\u2291',
'SquareSuperset;': '\u2290',
'SquareSupersetEqual;': '\u2292',
'SquareUnion;': '\u2294',
'squarf;': '\u25aa',
'squf;': '\u25aa',
'srarr;': '\u2192',
'Sscr;': '\U0001d4ae',
'sscr;': '\U0001d4c8',
'ssetmn;': '\u2216',
'ssmile;': '\u2323',
'sstarf;': '\u22c6',
'Star;': '\u22c6',
'star;': '\u2606',
'starf;': '\u2605',
'straightepsilon;': '\u03f5',
'straightphi;': '\u03d5',
'strns;': '\xaf',
'Sub;': '\u22d0',
'sub;': '\u2282',
'subdot;': '\u2abd',
'subE;': '\u2ac5',
'sube;': '\u2286',
'subedot;': '\u2ac3',
'submult;': '\u2ac1',
'subnE;': '\u2acb',
'subne;': '\u228a',
'subplus;': '\u2abf',
'subrarr;': '\u2979',
'Subset;': '\u22d0',
'subset;': '\u2282',
'subseteq;': '\u2286',
'subseteqq;': '\u2ac5',
'SubsetEqual;': '\u2286',
'subsetneq;': '\u228a',
'subsetneqq;': '\u2acb',
'subsim;': '\u2ac7',
'subsub;': '\u2ad5',
'subsup;': '\u2ad3',
'succ;': '\u227b',
'succapprox;': '\u2ab8',
'succcurlyeq;': '\u227d',
'Succeeds;': '\u227b',
'SucceedsEqual;': '\u2ab0',
'SucceedsSlantEqual;': '\u227d',
'SucceedsTilde;': '\u227f',
'succeq;': '\u2ab0',
'succnapprox;': '\u2aba',
'succneqq;': '\u2ab6',
'succnsim;': '\u22e9',
'succsim;': '\u227f',
'SuchThat;': '\u220b',
'Sum;': '\u2211',
'sum;': '\u2211',
'sung;': '\u266a',
'sup1': '\xb9',
'sup1;': '\xb9',
'sup2': '\xb2',
'sup2;': '\xb2',
'sup3': '\xb3',
'sup3;': '\xb3',
'Sup;': '\u22d1',
'sup;': '\u2283',
'supdot;': '\u2abe',
'supdsub;': '\u2ad8',
'supE;': '\u2ac6',
'supe;': '\u2287',
'supedot;': '\u2ac4',
'Superset;': '\u2283',
'SupersetEqual;': '\u2287',
'suphsol;': '\u27c9',
'suphsub;': '\u2ad7',
'suplarr;': '\u297b',
'supmult;': '\u2ac2',
'supnE;': '\u2acc',
'supne;': '\u228b',
'supplus;': '\u2ac0',
'Supset;': '\u22d1',
'supset;': '\u2283',
'supseteq;': '\u2287',
'supseteqq;': '\u2ac6',
'supsetneq;': '\u228b',
'supsetneqq;': '\u2acc',
'supsim;': '\u2ac8',
'supsub;': '\u2ad4',
'supsup;': '\u2ad6',
'swarhk;': '\u2926',
'swArr;': '\u21d9',
'swarr;': '\u2199',
'swarrow;': '\u2199',
'swnwar;': '\u292a',
'szlig': '\xdf',
'szlig;': '\xdf',
'Tab;': '\t',
'target;': '\u2316',
'Tau;': '\u03a4',
'tau;': '\u03c4',
'tbrk;': '\u23b4',
'Tcaron;': '\u0164',
'tcaron;': '\u0165',
'Tcedil;': '\u0162',
'tcedil;': '\u0163',
'Tcy;': '\u0422',
'tcy;': '\u0442',
'tdot;': '\u20db',
'telrec;': '\u2315',
'Tfr;': '\U0001d517',
'tfr;': '\U0001d531',
'there4;': '\u2234',
'Therefore;': '\u2234',
'therefore;': '\u2234',
'Theta;': '\u0398',
'theta;': '\u03b8',
'thetasym;': '\u03d1',
'thetav;': '\u03d1',
'thickapprox;': '\u2248',
'thicksim;': '\u223c',
'ThickSpace;': '\u205f\u200a',
'thinsp;': '\u2009',
'ThinSpace;': '\u2009',
'thkap;': '\u2248',
'thksim;': '\u223c',
'THORN': '\xde',
'thorn': '\xfe',
'THORN;': '\xde',
'thorn;': '\xfe',
'Tilde;': '\u223c',
'tilde;': '\u02dc',
'TildeEqual;': '\u2243',
'TildeFullEqual;': '\u2245',
'TildeTilde;': '\u2248',
'times': '\xd7',
'times;': '\xd7',
'timesb;': '\u22a0',
'timesbar;': '\u2a31',
'timesd;': '\u2a30',
'tint;': '\u222d',
'toea;': '\u2928',
'top;': '\u22a4',
'topbot;': '\u2336',
'topcir;': '\u2af1',
'Topf;': '\U0001d54b',
'topf;': '\U0001d565',
'topfork;': '\u2ada',
'tosa;': '\u2929',
'tprime;': '\u2034',
'TRADE;': '\u2122',
'trade;': '\u2122',
'triangle;': '\u25b5',
'triangledown;': '\u25bf',
'triangleleft;': '\u25c3',
'trianglelefteq;': '\u22b4',
'triangleq;': '\u225c',
'triangleright;': '\u25b9',
'trianglerighteq;': '\u22b5',
'tridot;': '\u25ec',
'trie;': '\u225c',
'triminus;': '\u2a3a',
'TripleDot;': '\u20db',
'triplus;': '\u2a39',
'trisb;': '\u29cd',
'tritime;': '\u2a3b',
'trpezium;': '\u23e2',
'Tscr;': '\U0001d4af',
'tscr;': '\U0001d4c9',
'TScy;': '\u0426',
'tscy;': '\u0446',
'TSHcy;': '\u040b',
'tshcy;': '\u045b',
'Tstrok;': '\u0166',
'tstrok;': '\u0167',
'twixt;': '\u226c',
'twoheadleftarrow;': '\u219e',
'twoheadrightarrow;': '\u21a0',
'Uacute': '\xda',
'uacute': '\xfa',
'Uacute;': '\xda',
'uacute;': '\xfa',
'Uarr;': '\u219f',
'uArr;': '\u21d1',
'uarr;': '\u2191',
'Uarrocir;': '\u2949',
'Ubrcy;': '\u040e',
'ubrcy;': '\u045e',
'Ubreve;': '\u016c',
'ubreve;': '\u016d',
'Ucirc': '\xdb',
'ucirc': '\xfb',
'Ucirc;': '\xdb',
'ucirc;': '\xfb',
'Ucy;': '\u0423',
'ucy;': '\u0443',
'udarr;': '\u21c5',
'Udblac;': '\u0170',
'udblac;': '\u0171',
'udhar;': '\u296e',
'ufisht;': '\u297e',
'Ufr;': '\U0001d518',
'ufr;': '\U0001d532',
'Ugrave': '\xd9',
'ugrave': '\xf9',
'Ugrave;': '\xd9',
'ugrave;': '\xf9',
'uHar;': '\u2963',
'uharl;': '\u21bf',
'uharr;': '\u21be',
'uhblk;': '\u2580',
'ulcorn;': '\u231c',
'ulcorner;': '\u231c',
'ulcrop;': '\u230f',
'ultri;': '\u25f8',
'Umacr;': '\u016a',
'umacr;': '\u016b',
'uml': '\xa8',
'uml;': '\xa8',
'UnderBar;': '_',
'UnderBrace;': '\u23df',
'UnderBracket;': '\u23b5',
'UnderParenthesis;': '\u23dd',
'Union;': '\u22c3',
'UnionPlus;': '\u228e',
'Uogon;': '\u0172',
'uogon;': '\u0173',
'Uopf;': '\U0001d54c',
'uopf;': '\U0001d566',
'UpArrow;': '\u2191',
'Uparrow;': '\u21d1',
'uparrow;': '\u2191',
'UpArrowBar;': '\u2912',
'UpArrowDownArrow;': '\u21c5',
'UpDownArrow;': '\u2195',
'Updownarrow;': '\u21d5',
'updownarrow;': '\u2195',
'UpEquilibrium;': '\u296e',
'upharpoonleft;': '\u21bf',
'upharpoonright;': '\u21be',
'uplus;': '\u228e',
'UpperLeftArrow;': '\u2196',
'UpperRightArrow;': '\u2197',
'Upsi;': '\u03d2',
'upsi;': '\u03c5',
'upsih;': '\u03d2',
'Upsilon;': '\u03a5',
'upsilon;': '\u03c5',
'UpTee;': '\u22a5',
'UpTeeArrow;': '\u21a5',
'upuparrows;': '\u21c8',
'urcorn;': '\u231d',
'urcorner;': '\u231d',
'urcrop;': '\u230e',
'Uring;': '\u016e',
'uring;': '\u016f',
'urtri;': '\u25f9',
'Uscr;': '\U0001d4b0',
'uscr;': '\U0001d4ca',
'utdot;': '\u22f0',
'Utilde;': '\u0168',
'utilde;': '\u0169',
'utri;': '\u25b5',
'utrif;': '\u25b4',
'uuarr;': '\u21c8',
'Uuml': '\xdc',
'uuml': '\xfc',
'Uuml;': '\xdc',
'uuml;': '\xfc',
'uwangle;': '\u29a7',
'vangrt;': '\u299c',
'varepsilon;': '\u03f5',
'varkappa;': '\u03f0',
'varnothing;': '\u2205',
'varphi;': '\u03d5',
'varpi;': '\u03d6',
'varpropto;': '\u221d',
'vArr;': '\u21d5',
'varr;': '\u2195',
'varrho;': '\u03f1',
'varsigma;': '\u03c2',
'varsubsetneq;': '\u228a\ufe00',
'varsubsetneqq;': '\u2acb\ufe00',
'varsupsetneq;': '\u228b\ufe00',
'varsupsetneqq;': '\u2acc\ufe00',
'vartheta;': '\u03d1',
'vartriangleleft;': '\u22b2',
'vartriangleright;': '\u22b3',
'Vbar;': '\u2aeb',
'vBar;': '\u2ae8',
'vBarv;': '\u2ae9',
'Vcy;': '\u0412',
'vcy;': '\u0432',
'VDash;': '\u22ab',
'Vdash;': '\u22a9',
'vDash;': '\u22a8',
'vdash;': '\u22a2',
'Vdashl;': '\u2ae6',
'Vee;': '\u22c1',
'vee;': '\u2228',
'veebar;': '\u22bb',
'veeeq;': '\u225a',
'vellip;': '\u22ee',
'Verbar;': '\u2016',
'verbar;': '|',
'Vert;': '\u2016',
'vert;': '|',
'VerticalBar;': '\u2223',
'VerticalLine;': '|',
'VerticalSeparator;': '\u2758',
'VerticalTilde;': '\u2240',
'VeryThinSpace;': '\u200a',
'Vfr;': '\U0001d519',
'vfr;': '\U0001d533',
'vltri;': '\u22b2',
'vnsub;': '\u2282\u20d2',
'vnsup;': '\u2283\u20d2',
'Vopf;': '\U0001d54d',
'vopf;': '\U0001d567',
'vprop;': '\u221d',
'vrtri;': '\u22b3',
'Vscr;': '\U0001d4b1',
'vscr;': '\U0001d4cb',
'vsubnE;': '\u2acb\ufe00',
'vsubne;': '\u228a\ufe00',
'vsupnE;': '\u2acc\ufe00',
'vsupne;': '\u228b\ufe00',
'Vvdash;': '\u22aa',
'vzigzag;': '\u299a',
'Wcirc;': '\u0174',
'wcirc;': '\u0175',
'wedbar;': '\u2a5f',
'Wedge;': '\u22c0',
'wedge;': '\u2227',
'wedgeq;': '\u2259',
'weierp;': '\u2118',
'Wfr;': '\U0001d51a',
'wfr;': '\U0001d534',
'Wopf;': '\U0001d54e',
'wopf;': '\U0001d568',
'wp;': '\u2118',
'wr;': '\u2240',
'wreath;': '\u2240',
'Wscr;': '\U0001d4b2',
'wscr;': '\U0001d4cc',
'xcap;': '\u22c2',
'xcirc;': '\u25ef',
'xcup;': '\u22c3',
'xdtri;': '\u25bd',
'Xfr;': '\U0001d51b',
'xfr;': '\U0001d535',
'xhArr;': '\u27fa',
'xharr;': '\u27f7',
'Xi;': '\u039e',
'xi;': '\u03be',
'xlArr;': '\u27f8',
'xlarr;': '\u27f5',
'xmap;': '\u27fc',
'xnis;': '\u22fb',
'xodot;': '\u2a00',
'Xopf;': '\U0001d54f',
'xopf;': '\U0001d569',
'xoplus;': '\u2a01',
'xotime;': '\u2a02',
'xrArr;': '\u27f9',
'xrarr;': '\u27f6',
'Xscr;': '\U0001d4b3',
'xscr;': '\U0001d4cd',
'xsqcup;': '\u2a06',
'xuplus;': '\u2a04',
'xutri;': '\u25b3',
'xvee;': '\u22c1',
'xwedge;': '\u22c0',
'Yacute': '\xdd',
'yacute': '\xfd',
'Yacute;': '\xdd',
'yacute;': '\xfd',
'YAcy;': '\u042f',
'yacy;': '\u044f',
'Ycirc;': '\u0176',
'ycirc;': '\u0177',
'Ycy;': '\u042b',
'ycy;': '\u044b',
'yen': '\xa5',
'yen;': '\xa5',
'Yfr;': '\U0001d51c',
'yfr;': '\U0001d536',
'YIcy;': '\u0407',
'yicy;': '\u0457',
'Yopf;': '\U0001d550',
'yopf;': '\U0001d56a',
'Yscr;': '\U0001d4b4',
'yscr;': '\U0001d4ce',
'YUcy;': '\u042e',
'yucy;': '\u044e',
'yuml': '\xff',
'Yuml;': '\u0178',
'yuml;': '\xff',
'Zacute;': '\u0179',
'zacute;': '\u017a',
'Zcaron;': '\u017d',
'zcaron;': '\u017e',
'Zcy;': '\u0417',
'zcy;': '\u0437',
'Zdot;': '\u017b',
'zdot;': '\u017c',
'zeetrf;': '\u2128',
'ZeroWidthSpace;': '\u200b',
'Zeta;': '\u0396',
'zeta;': '\u03b6',
'Zfr;': '\u2128',
'zfr;': '\U0001d537',
'ZHcy;': '\u0416',
'zhcy;': '\u0436',
'zigrarr;': '\u21dd',
'Zopf;': '\u2124',
'zopf;': '\U0001d56b',
'Zscr;': '\U0001d4b5',
'zscr;': '\U0001d4cf',
'zwj;': '\u200d',
'zwnj;': '\u200c',
}
# maps the Unicode codepoint to the HTML entity name
codepoint2name = {}
# maps the HTML entity name to the character
# (or a character reference if the character is outside the Latin-1 range)
entitydefs = {}
for (name, codepoint) in name2codepoint.items():
codepoint2name[codepoint] = name
entitydefs[name] = chr(codepoint)
del name, codepoint
| gpl-3.0 |
arifsetiawan/edx-platform | cms/djangoapps/contentstore/features/course-outline.py | 24 | 4642 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from common import *
from nose.tools import assert_true, assert_false # pylint: disable=no-name-in-module
from logging import getLogger
logger = getLogger(__name__)
@step(u'I have a course with no sections$')
def have_a_course(step):
world.clear_courses()
course = world.CourseFactory.create()
@step(u'I have a course with 1 section$')
def have_a_course_with_1_section(step):
world.clear_courses()
course = world.CourseFactory.create()
section = world.ItemFactory.create(parent_location=course.location)
subsection1 = world.ItemFactory.create(
parent_location=section.location,
category='sequential',
display_name='Subsection One',)
@step(u'I have a course with multiple sections$')
def have_a_course_with_two_sections(step):
world.clear_courses()
course = world.CourseFactory.create()
section = world.ItemFactory.create(parent_location=course.location)
subsection1 = world.ItemFactory.create(
parent_location=section.location,
category='sequential',
display_name='Subsection One',)
section2 = world.ItemFactory.create(
parent_location=course.location,
display_name='Section Two',)
subsection2 = world.ItemFactory.create(
parent_location=section2.location,
category='sequential',
display_name='Subsection Alpha',)
subsection3 = world.ItemFactory.create(
parent_location=section2.location,
category='sequential',
display_name='Subsection Beta',)
@step(u'I navigate to the course outline page$')
def navigate_to_the_course_outline_page(step):
create_studio_user(is_staff=True)
log_into_studio()
course_locator = 'a.course-link'
world.css_click(course_locator)
@step(u'I navigate to the outline page of a course with multiple sections')
def nav_to_the_outline_page_of_a_course_with_multiple_sections(step):
step.given('I have a course with multiple sections')
step.given('I navigate to the course outline page')
@step(u'I add a section')
def i_add_a_section(step):
add_section()
@step(u'I press the section delete icon')
def i_press_the_section_delete_icon(step):
delete_locator = 'section .outline-section > .section-header a.delete-button'
world.css_click(delete_locator)
@step(u'I will confirm all alerts')
def i_confirm_all_alerts(step):
confirm_locator = '.prompt .nav-actions a.action-primary'
world.css_click(confirm_locator)
@step(u'I see the "([^"]*) All Sections" link$')
def i_see_the_collapse_expand_all_span(step, text):
if text == "Collapse":
span_locator = '.button-toggle-expand-collapse .collapse-all .label'
elif text == "Expand":
span_locator = '.button-toggle-expand-collapse .expand-all .label'
assert_true(world.css_visible(span_locator))
@step(u'I do not see the "([^"]*) All Sections" link$')
def i_do_not_see_the_collapse_expand_all_span(step, text):
if text == "Collapse":
span_locator = '.button-toggle-expand-collapse .collapse-all .label'
elif text == "Expand":
span_locator = '.button-toggle-expand-collapse .expand-all .label'
assert_false(world.css_visible(span_locator))
@step(u'I click the "([^"]*) All Sections" link$')
def i_click_the_collapse_expand_all_span(step, text):
if text == "Collapse":
span_locator = '.button-toggle-expand-collapse .collapse-all .label'
elif text == "Expand":
span_locator = '.button-toggle-expand-collapse .expand-all .label'
assert_true(world.browser.is_element_present_by_css(span_locator))
world.css_click(span_locator)
@step(u'I ([^"]*) the first section$')
def i_collapse_expand_a_section(step, text):
if text == "collapse":
locator = 'section .outline-section .ui-toggle-expansion'
elif text == "expand":
locator = 'section .outline-section .ui-toggle-expansion'
world.css_click(locator)
@step(u'all sections are ([^"]*)$')
def all_sections_are_collapsed_or_expanded(step, text):
subsection_locator = 'div.subsection-list'
subsections = world.css_find(subsection_locator)
for index in range(len(subsections)):
if text == "collapsed":
assert_false(world.css_visible(subsection_locator, index=index))
elif text == "expanded":
assert_true(world.css_visible(subsection_locator, index=index))
@step(u"I change an assignment's grading status")
def change_grading_status(step):
world.css_find('a.menu-toggle').click()
world.css_find('.menu li').first.click()
| agpl-3.0 |
saurvs/servo | tests/wpt/css-tests/css-fonts-3_dev/html/reference/support/fonts/makegsubfonts.py | 1616 | 14125 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
| mpl-2.0 |
hbrunn/OCB | addons/account/wizard/account_invoice_refund.py | 10 | 13046 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
class account_invoice_refund(osv.osv_memory):
"""Refunds invoice"""
_name = "account.invoice.refund"
_description = "Invoice Refund"
_columns = {
'date': fields.date('Date', help='This date will be used as the invoice date for credit note and period will be chosen accordingly!'),
'period': fields.many2one('account.period', 'Force period'),
'journal_id': fields.many2one('account.journal', 'Refund Journal', help='You can select here the journal to use for the credit note that will be created. If you leave that field empty, it will use the same journal as the current invoice.'),
'description': fields.char('Reason', required=True),
'filter_refund': fields.selection([('refund', 'Create a draft refund'), ('cancel', 'Cancel: create refund and reconcile'),('modify', 'Modify: create refund, reconcile and create a new draft invoice')], "Refund Method", required=True, help='Refund base on this type. You can not Modify and Cancel if the invoice is already reconciled'),
}
def _get_journal(self, cr, uid, context=None):
obj_journal = self.pool.get('account.journal')
user_obj = self.pool.get('res.users')
if context is None:
context = {}
inv_type = context.get('type', 'out_invoice')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
type = (inv_type == 'out_invoice') and 'sale_refund' or \
(inv_type == 'out_refund') and 'sale' or \
(inv_type == 'in_invoice') and 'purchase_refund' or \
(inv_type == 'in_refund') and 'purchase'
journal = obj_journal.search(cr, uid, [('type', '=', type), ('company_id','=',company_id)], limit=1, context=context)
return journal and journal[0] or False
def _get_reason(self, cr, uid, context=None):
active_id = context and context.get('active_id', False)
if active_id:
inv = self.pool.get('account.invoice').browse(cr, uid, active_id, context=context)
return inv.name
else:
return ''
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d'),
'journal_id': _get_journal,
'filter_refund': 'refund',
'description': _get_reason,
}
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
journal_obj = self.pool.get('account.journal')
user_obj = self.pool.get('res.users')
# remove the entry with key 'form_view_ref', otherwise fields_view_get crashes
context = dict(context or {})
context.pop('form_view_ref', None)
res = super(account_invoice_refund,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
type = context.get('type', 'out_invoice')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
journal_type = (type == 'out_invoice') and 'sale_refund' or \
(type == 'out_refund') and 'sale' or \
(type == 'in_invoice') and 'purchase_refund' or \
(type == 'in_refund') and 'purchase'
for field in res['fields']:
if field == 'journal_id':
journal_select = journal_obj._name_search(cr, uid, '', [('type', '=', journal_type), ('company_id','child_of',[company_id])], context=context)
res['fields'][field]['selection'] = journal_select
return res
def compute_refund(self, cr, uid, ids, mode='refund', context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: the account invoice refund’s ID or list of IDs
"""
inv_obj = self.pool.get('account.invoice')
reconcile_obj = self.pool.get('account.move.reconcile')
account_m_line_obj = self.pool.get('account.move.line')
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
inv_tax_obj = self.pool.get('account.invoice.tax')
inv_line_obj = self.pool.get('account.invoice.line')
res_users_obj = self.pool.get('res.users')
if context is None:
context = {}
for form in self.browse(cr, uid, ids, context=context):
created_inv = []
date = False
period = False
description = False
company = res_users_obj.browse(cr, uid, uid, context=context).company_id
journal_id = form.journal_id.id
for inv in inv_obj.browse(cr, uid, context.get('active_ids'), context=context):
if inv.state in ['draft', 'proforma2', 'cancel']:
raise osv.except_osv(_('Error!'), _('Cannot %s draft/proforma/cancel invoice.') % (mode))
if inv.reconciled and mode in ('cancel', 'modify'):
raise osv.except_osv(_('Error!'), _('Cannot %s invoice which is already reconciled, invoice should be unreconciled first. You can only refund this invoice.') % (mode))
if form.period.id:
period = form.period.id
else:
period = inv.period_id and inv.period_id.id or False
if not journal_id:
journal_id = inv.journal_id.id
if form.date:
date = form.date
if not form.period.id:
cr.execute("select name from ir_model_fields \
where model = 'account.period' \
and name = 'company_id'")
result_query = cr.fetchone()
if result_query:
cr.execute("""select p.id from account_fiscalyear y, account_period p where y.id=p.fiscalyear_id \
and date(%s) between p.date_start AND p.date_stop and y.company_id = %s limit 1""", (date, company.id,))
else:
cr.execute("""SELECT id
from account_period where date(%s)
between date_start AND date_stop \
limit 1 """, (date,))
res = cr.fetchone()
if res:
period = res[0]
else:
date = inv.date_invoice
if form.description:
description = form.description
else:
description = inv.name
if not period:
raise osv.except_osv(_('Insufficient Data!'), \
_('No period found on the invoice.'))
refund_id = inv_obj.refund(cr, uid, [inv.id], date, period, description, journal_id, context=context)
refund = inv_obj.browse(cr, uid, refund_id[0], context=context)
inv_obj.write(cr, uid, [refund.id], {'date_due': date,
'check_total': inv.check_total})
inv_obj.button_compute(cr, uid, refund_id)
created_inv.append(refund_id[0])
if mode in ('cancel', 'modify'):
movelines = inv.move_id.line_id
to_reconcile_ids = {}
for line in movelines:
if line.account_id.id == inv.account_id.id:
to_reconcile_ids.setdefault(line.account_id.id, []).append(line.id)
if line.reconcile_id:
line.reconcile_id.unlink()
refund.signal_workflow('invoice_open')
refund = inv_obj.browse(cr, uid, refund_id[0], context=context)
for tmpline in refund.move_id.line_id:
if tmpline.account_id.id == inv.account_id.id:
to_reconcile_ids[tmpline.account_id.id].append(tmpline.id)
for account in to_reconcile_ids:
account_m_line_obj.reconcile(cr, uid, to_reconcile_ids[account],
writeoff_period_id=period,
writeoff_journal_id = inv.journal_id.id,
writeoff_acc_id=inv.account_id.id
)
if mode == 'modify':
invoice = inv_obj.read(cr, uid, [inv.id],
['name', 'type', 'number', 'reference',
'comment', 'date_due', 'partner_id',
'partner_insite', 'partner_contact',
'partner_ref', 'payment_term', 'account_id',
'currency_id', 'invoice_line', 'tax_line',
'journal_id', 'period_id', 'fiscal_position'], context=context)
invoice = invoice[0]
del invoice['id']
invoice_lines = inv_line_obj.browse(cr, uid, invoice['invoice_line'], context=context)
invoice_lines = inv_obj._refund_cleanup_lines(cr, uid, invoice_lines, context=context)
tax_lines = inv_tax_obj.browse(cr, uid, invoice['tax_line'], context=context)
tax_lines = inv_obj._refund_cleanup_lines(cr, uid, tax_lines, context=context)
invoice.update({
'type': inv.type,
'date_invoice': date,
'state': 'draft',
'number': False,
'invoice_line': invoice_lines,
'tax_line': tax_lines,
'period_id': period,
'name': description
})
for field in ('partner_id', 'account_id', 'currency_id',
'payment_term', 'journal_id', 'fiscal_position'):
invoice[field] = invoice[field] and invoice[field][0]
inv_id = inv_obj.create(cr, uid, invoice, {})
if inv.payment_term.id:
data = inv_obj.onchange_payment_term_date_invoice(cr, uid, [inv_id], inv.payment_term.id, date)
if 'value' in data and data['value']:
inv_obj.write(cr, uid, [inv_id], data['value'])
created_inv.append(inv_id)
xml_id = (inv.type == 'out_refund') and 'action_invoice_tree1' or \
(inv.type == 'in_refund') and 'action_invoice_tree2' or \
(inv.type == 'out_invoice') and 'action_invoice_tree3' or \
(inv.type == 'in_invoice') and 'action_invoice_tree4'
result = mod_obj.get_object_reference(cr, uid, 'account', xml_id)
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
invoice_domain = eval(result['domain'])
invoice_domain.append(('id', 'in', created_inv))
result['domain'] = invoice_domain
return result
def invoice_refund(self, cr, uid, ids, context=None):
data_refund = self.read(cr, uid, ids, ['filter_refund'],context=context)[0]['filter_refund']
return self.compute_refund(cr, uid, ids, data_refund, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ARM-software/lisa | external/workload-automation/doc/source/conf.py | 4 | 11132 | # -*- coding: utf-8 -*-
# Copyright 2018 ARM Limited
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# WA3 documentation build configuration file.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
this_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(this_dir, '..'))
sys.path.insert(0, os.path.join(this_dir, '../..'))
import wa
from build_plugin_docs import (generate_plugin_documentation,
generate_run_config_documentation,
generate_meta_config_documentation,
generate_target_documentation)
from build_instrument_method_map import generate_instrument_method_map
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['static/templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wa'
copyright = u'2018, ARM Limited'
author = u'ARM Limited'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = wa.framework.version.get_wa_version()
# The full version, including alpha/beta/rc tags.
release = wa.framework.version.get_wa_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['../build', 'developer_information',
'user_information', 'run_config']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo_only': True
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'WA-logo-white.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'wadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'wa.tex', u'wa Documentation',
u'Arm Limited', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'wa', u'wa Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'wa', u'wa Documentation',
author, 'wa', 'A framework for automating workload execution on mobile devices.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def setup(app):
module_dir = os.path.join('..', '..', 'wa')
excluded_extensions = [os.path.join(module_dir, 'framework'),
os.path.join(module_dir, 'tests')]
os.chdir(os.path.dirname(__file__))
generate_plugin_documentation(module_dir, 'plugins', excluded_extensions)
generate_target_documentation('plugins')
generate_run_config_documentation('run_config')
generate_meta_config_documentation('run_config')
generate_instrument_method_map(os.path.join('developer_information', 'developer_guide',
'instrument_method_map.rst'))
app.add_object_type('confval', 'confval',
objname='configuration value',
indextemplate='pair: %s; configuration value')
| apache-2.0 |
thebjorn/pymagic | magicmethods.py | 1 | 4244 | # -*- coding: utf-8 -*-
"""
From http://www.wellho.net/mouth/3002_a-list-of-special-method-and-attribute-names-in-python.html
and also http://www.rafekettler.com/magicmethods.html
__new__ True constructor - usually wraps __init__
__init__ Object constructor
__call__
__getattr__ Handling object attributes [example] and [calling example]
__setattr__
__delattr__
__getattribute__
__getitem__ subscripting with [..]
__setitem__
__delitem__
__del__ Destructor / wrapper around del
__repr__ Convert to Python source
__str__ Convert to printable string
__cmp__ Compare two objects
__lt__ Less Than
__gt__ Greater Than
__eq__ Equal to
__ne__ Not Equal to
__le__ Less Than or Equal
__ge__ Greater Than or Equal
__hash__ Calculate an (integer) hash value
__nonzero__ Is it nonzero
__unicode__ Convert to Unicode String
__get__
__set__
__delete__
__instancecheck__ isinstance builtin function
__subclasscheck__
__getslice__ Working with slices .. [..:..][example]
__setslice__
__delslice__
__len__ len building function
__add__ + [example]
__mul__ *
__contains__
__coerce__
__iter__
__reversed__
__sub__ -
__div__ /
__floordiv__ //
__mod__ %
__divmod__
__pow__ ^
__lshift__ <<
__rshift__ >>
__and__ &
__xor__ ~
__or__ |
__truediv__ __future__ > /
__radd__ "r" methods operate on object to right
__rmul__
__rsub__
__rdiv__
__rtruediv__
__rfloordiv__
__rmod__
__rdivmod__
__rpow__
__rlshift__
__rrshift__
__rand__
__rxor__
__ror__
__iadd__ +=
__imul__ *=
__isub__ -=
__idiv__ /=
__itruediv__ __future__ > /=
__ifloordiv__ //=
__imod__ %=
__ipow__ ^=
__ilshift__ <<=
__irshift__ >>=
__iand__ &=
__ixor__
__ior__ |=
__neg__ monadic -
__pos__ monadic +
__abs__ abs built in function
__invert__ monadic ~
__complex__ complex built in function
__int__ int built in function
__long__ long built in function
__float__ float built in function
__oct__ oct built in function
__hex__ hex built in function
__index__
__enter__
__exit__
"""
lifecycle = [
'__new__',
'__init__',
'__del__',
]
comparison = [
'__cmp__',
'__eq__',
'__ne__',
'__lt__',
'__gt__',
'__le__',
'__ge__',
]
unary = [
'__pos__',
'__neg__',
'__abs__',
'__invert__',
'__round__',
'__floor__',
'__ceil__',
'__trunc__',
]
arithmetic = [
'__add__',
'__sub__',
'__mul__',
'__floordiv__',
'__div__',
'__truediv__',
'__mod__',
'__divmod__',
'__pow__',
'__lshift__',
'__rshift__',
'__and__',
'__or__',
'__xor__',
]
rarithmetic = [
'__radd__',
'__rsub__',
'__rmul__',
'__rfloordiv__',
'__rdiv__',
'__rtruediv__',
'__rmod__',
'__rdivmod__',
'__rpow__',
'__rlshift__',
'__rrshift__',
'__rand__',
'__ror__',
'__rxor__',
]
iassign = [
'__iadd__',
'__isub__',
'__imul__',
'__ifloordiv__',
'__idiv__',
'__itruediv__',
'__imod__',
'__ipow__',
'__ilshift__',
'__irshift__',
'__iand__',
'__ior__',
'__ixor__',
]
typeconv = [
'__int__',
'__long__',
'__float__',
'__complex__',
'__oct__',
'__hex__',
'__index__',
'__trunc__',
'__coerce__',
]
representation = [
'__str__',
'__repr__',
'__unicode__',
'__bytes__',
'__format__',
'__hash__',
'__nonzero__',
'__bool__',
'__dir__',
'__sizeof__',
]
attributes = [
'__getattr__',
'__setattr__',
'__delattr__',
'__getattribute__',
]
containers = [
'__len__',
'__getitem__',
'__setitem__',
'__delitem__',
'__iter__',
'__reversed__',
'__contains__',
'__missing__',
]
reflection = [
'__instancecheck__',
'__subclasscheck__',
]
callables = [
'__call__',
]
contextmanagers = [
'__enter__',
'__exit__',
]
descriptors = [
'__get__',
'__set__',
'__delete__',
]
copying = [
'__copy__',
'__deepcopy__',
]
pickling = [
'__getinitargs__',
'__getnewargs__',
'__getstate__',
'__setstate__',
'__reduce__',
'__reduce_ex__',
]
all = (lifecycle + comparison + unary + arithmetic + rarithmetic + iassign +
typeconv + representation + attributes + containers + reflection +
callables + contextmanagers + descriptors + copying + pickling)
| lgpl-2.1 |
JeanKossaifi/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
StelixROM/android_kernel_google_msm | scripts/gcc-wrapper.py | 473 | 3422 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"mmu.c:602",
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
ScalaInc/exp-python2-sdk | tests/enterpriceSDKTestScript/RssReader/expmessage.py | 1 | 1524 | import exp_sdk
import scala5
import scalalib
from scalalib import sharedvars
scalaVars = sharedvars()
scala5.ScalaPlayer.Log('Starting EXP message listen')
try:
# authentication
exp = exp_sdk.start(uuid=scalaVars.uuid, api_key=scalaVars.api_key, host=scalaVars.host)
# Wait for a connection.
while not exp.is_connected:
scalalib.sleep(1000)
# setup channel
channel = exp.get_channel('scala-test-channel', consumer=True)
listener = channel.listen('my-message', max_age=30)
# listen to message
while True:
broadcast = listener.wait()
if broadcast:
scala5.ScalaPlayer.Log('Message received')
scalaVars.EXPmessage = broadcast.payload
scala5.ScalaPlayer.Log('Received message: ' + broadcast.payload)
broadcast.respond('Message received thank you!')
scalalib.sleep(1000)
exp.stop()
except exp_sdk.ExpError or exp_sdk.UnexpectedError:
scala5.ScalaPlayer.LogExternalError(1000, 'ExpError', 'Error opening channel to EXP')
except exp_sdk.RuntimeError:
scala5.ScalaPlayer.LogExternalError(1000, 'RuntimeError', 'Please check start options of EXP SDK')
except exp_sdk.AuthenticationError:
scala5.ScalaPlayer.LogExternalError(1000, 'AuthenticationError',
'Unable to connect to EXP, please check credentials')
except exp_sdk.ApiError:
scala5.ScalaPlayer.LogExternalError(1000, 'ApiError', exp_sdk.ApiError.message)
| mit |
ShownX/incubator-mxnet | tests/python/unittest/test_optimizer.py | 1 | 26312 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import mxnet as mx
import mxnet.lr_scheduler as lr_scheduler
from nose.tools import raises
import math
from mxnet.test_utils import *
def test_learning_rate():
o1 = mx.optimizer.Optimizer(learning_rate=0.01)
o1.set_learning_rate(0.2)
assert o1.learning_rate == 0.2
lr_s = lr_scheduler.FactorScheduler(step=1)
o2 = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)
assert o2.learning_rate == 0.3
o2.lr_scheduler.base_lr = 0.4
assert o2.learning_rate == 0.4
@raises(UserWarning)
def test_learning_rate_expect_user_warning():
lr_s = lr_scheduler.FactorScheduler(step=1)
o = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)
o.set_learning_rate(0.5)
def test_lr_wd_mult():
data = mx.sym.Variable('data')
bias = mx.sym.Variable('fc1_bias', lr_mult=1.0)
fc1 = mx.sym.FullyConnected(data=data, bias=bias, name='fc1', num_hidden=10, lr_mult=0)
fc2 = mx.sym.FullyConnected(data=fc1, name='fc2', num_hidden=10, wd_mult=0.5)
mod = mx.mod.Module(symbol=fc2, label_names=None, context=default_context())
mod.bind(data_shapes=[('data', (5,10))])
mod.init_params(initializer=mx.init.Uniform(1.0))
mod.init_optimizer(optimizer_params={'learning_rate': 1.0})
args1, _ = mod.get_params()
args1 = {k: v.asnumpy() for k, v in args1.items()}
mod.forward(mx.io.DataBatch(data=[mx.random.uniform(low=-1.0, high=1.0, shape=(5,10))], label=None), is_train=True)
mod.backward(mod.get_outputs())
mod.update()
args2, _ = mod.get_params()
args2 = {k: v.asnumpy() for k, v in args2.items()}
assert mod._optimizer.lr_mult == {'fc1_bias': 1.0, 'fc1_weight': 0.0}
assert mod._optimizer.wd_mult == {'fc2_bias': 0.5, 'fc2_weight': 0.5, 'fc1_bias': 0.0}
assert mx.test_utils.almost_equal(args1['fc1_weight'], args2['fc1_weight'], 1e-10)
assert not mx.test_utils.almost_equal(args1['fc1_bias'], args2['fc1_bias'], 1e-1)
assert not mx.test_utils.almost_equal(args1['fc2_weight'], args2['fc2_weight'], 1e-1)
def compare_ndarray_tuple(t1, t2, rtol=None, atol=None):
if t1 is not None and t2 is not None:
if isinstance(t1, tuple):
for s1, s2 in zip(t1, t2):
compare_ndarray_tuple(s1, s2, rtol, atol)
else:
assert_almost_equal(t1.asnumpy(), t2.asnumpy(), rtol=rtol, atol=atol)
def compare_optimizer(opt1, opt2, shape, dtype, w_stype='default', g_stype='default'):
if w_stype == 'default':
w2 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)
w1 = w2.copyto(default_context())
elif w_stype == 'row_sparse' or w_stype == 'csr':
w2 = rand_ndarray(shape, w_stype, density=1, dtype=dtype)
w1 = w2.copyto(default_context()).tostype('default')
else:
raise Exception("type not supported yet")
if g_stype == 'default':
g2 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)
g1 = g2.copyto(default_context())
elif g_stype == 'row_sparse' or g_stype == 'csr':
g2 = rand_ndarray(shape, g_stype, dtype=dtype)
g1 = g2.copyto(default_context()).tostype('default')
else:
raise Exception("type not supported yet")
state1 = opt1.create_state_multi_precision(0, w1)
state2 = opt2.create_state_multi_precision(0, w2)
compare_ndarray_tuple(state1, state2)
opt1.update_multi_precision(0, w1, g1, state1)
opt2.update_multi_precision(0, w2, g2, state2)
compare_ndarray_tuple(state1, state2, rtol=1e-4, atol=1e-5)
assert_almost_equal(w1.asnumpy(), w2.asnumpy(), rtol=1e-4, atol=1e-5)
# SGD
class PySGD(mx.optimizer.Optimizer):
"""python reference implemenation of sgd"""
def __init__(self, learning_rate=0.01, momentum=0.0, multi_precision=False, **kwargs):
super(PySGD, self).__init__(learning_rate=learning_rate, **kwargs)
self.momentum = momentum
self.multi_precision = multi_precision
def create_state(self, index, weight):
"""Create additional optimizer state: momentum
Parameters
----------
weight : NDArray
The weight data
"""
momentum = None
weight_master_copy = None
do_multi_precision = self.multi_precision and weight.dtype == np.float16
if do_multi_precision:
if self.momentum != 0.0:
momentum = mx.nd.zeros(weight.shape, weight.context, dtype=np.float32)
weight_master_copy = array(weight, ctx=weight.context, dtype=np.float32)
return (momentum, weight_master_copy)
else:
if self.momentum != 0.0:
momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)
return momentum
def create_state_multi_precision(self, index, weight):
return self.create_state(index, weight)
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
use_multi_precision = isinstance(state, list) or isinstance(state, tuple)
if not use_multi_precision:
if self.momentum == 0.0:
if self.clip_gradient is not None:
weight[:] = ((1 - lr*wd)*weight -
lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
else:
weight[:] = (1 - lr*wd)*weight - lr*self.rescale_grad*grad
else:
mom = state
if self.clip_gradient is not None:
mom[:] = (self.momentum*mom - lr*wd*weight -
lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
weight += mom
else:
mom[:] = self.momentum*mom - lr*wd*weight - lr*self.rescale_grad*grad
weight += mom
else:
grad32 = array(grad, ctx=grad.context, dtype=np.float32)
mom = state[0]
weight32 = state[1]
if self.momentum == 0.0:
if self.clip_gradient is not None:
weight32[:] = ((1 - lr*wd)*weight32 -
lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
else:
weight32[:] = (1 - lr*wd)*weight32 - lr*self.rescale_grad*grad32
else:
if self.clip_gradient is not None:
mom[:] = (self.momentum*mom - lr*wd*weight32 -
lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
weight32 += mom
else:
mom[:] = self.momentum*mom - lr*wd*weight32 - lr*self.rescale_grad*grad32
weight32 += mom
tmp = weight32.astype(weight.dtype)
tmp.copyto(weight)
def update_multi_precision(self, index, weight, grad, state):
self.update(index, weight, grad, state)
def test_sgd():
mx.random.seed(0)
opt1 = PySGD
opt2 = mx.optimizer.SGD
shape = (3, 4, 5)
mom_options = [{}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float16, np.float32, np.float64]:
for mom_option in mom_options:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(mom_option)
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
if (dtype == np.float16 and
('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)
# test operator fallback on cpu
if (default_context() == mx.cpu()):
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,
g_stype='row_sparse')
if dtype != np.float16:
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape[:2],
dtype, w_stype='csr', g_stype='csr')
class PySparseSGD(mx.optimizer.Optimizer):
"""python reference implemenation of sgd"""
def __init__(self, learning_rate=0.01, momentum=0.0, **kwargs):
super(PySparseSGD, self).__init__(learning_rate=learning_rate, **kwargs)
self.momentum = momentum
def create_state(self, index, weight):
"""Create additional optimizer state: momentum
Parameters
----------
weight : NDArray
The weight data
"""
if self.momentum == 0.0:
return None
else:
return mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
num_rows = weight.shape[0]
if self.momentum == 0.0:
# Update on a per row basis, skip all-zero rows
for row in range(num_rows):
grad_row = grad[row].asnumpy()
all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))
if all_zeros:
continue
if self.clip_gradient is not None:
weight[row] = ((1 - lr*wd)*weight[row] -
lr*mx.nd.clip(grad[row]*self.rescale_grad,
-self.clip_gradient, self.clip_gradient))
else:
weight[row] = (1 - lr*wd)*weight[row] - lr*self.rescale_grad*grad[row]
else:
mom = state
for row in range(num_rows):
grad_row = grad[row].asnumpy()
all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))
if all_zeros:
continue
if self.clip_gradient is not None:
mom[row] = (self.momentum*mom[row] - lr*wd*weight[row] -
lr*mx.nd.clip(grad[row]*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
weight[row] += mom[row]
else:
mom[row] = self.momentum*mom[row] - lr*wd*weight[row] - lr*self.rescale_grad*grad[row]
weight[row] += mom[row]
def test_sparse_sgd():
mx.random.seed(0)
opt1 = PySparseSGD
opt2 = mx.optimizer.SGD
shape = (3, 4, 5)
mom_options = [{}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float32]:
for mom_option in mom_options:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(mom_option)
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,
w_stype='row_sparse', g_stype='row_sparse')
# ADAM
class PyAdam(mx.optimizer.Optimizer):
"""python reference implemenation of adam"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
decay_factor=(1 - 1e-8), sparse_update=False, **kwargs):
super(PyAdam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.decay_factor = decay_factor
self.sparse_update = sparse_update
def create_state(self, index, weight):
"""Create additional optimizer state: mean, variance
Parameters
----------
weight : NDArray
The weight data
"""
return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
self._update_count(index)
t = self._index_update_count[index]
mean, variance = state
wd = self._get_wd(index)
num_rows = weight.shape[0]
coef1 = 1. - self.beta1**t
coef2 = 1. - self.beta2**t
lr *= math.sqrt(coef2)/coef1
for row in range(num_rows):
# check row slices of all zeros
all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))
# skip zeros during sparse update
if all_zeros and self.sparse_update:
continue
grad[row] = grad[row] * self.rescale_grad + wd * weight[row]
# clip gradients
if self.clip_gradient is not None:
mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])
# update mean
mean[row] *= self.beta1
mean[row] += grad[row] * (1. - self.beta1)
# update variance
variance[row] *= self.beta2
variance[row] += (1 - self.beta2) * mx.nd.square(grad[row], out=grad[row])
# update weight
weight[row] -= lr*mean[row]/(mx.nd.sqrt(variance[row]) + self.epsilon)
def test_adam():
mx.random.seed(0)
opt1 = PyAdam
opt2 = mx.optimizer.Adam
shape = (3, 4, 5)
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float16, np.float32, np.float64]:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
if (dtype == np.float16 and
('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)
if (default_context() == mx.cpu()):
compare_optimizer(opt1(sparse_update=True, **kwarg), opt2(**kwarg), shape,
dtype, w_stype='row_sparse', g_stype='row_sparse')
# RMSProp
class PyRMSProp(mx.optimizer.Optimizer):
"""RMSProp optimizer of Tieleman & Hinton, 2012,
For centered=False, the code follows the version in
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by
Tieleman & Hinton, 2012
For centered=True, the code follows the version in
http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013.
Parameters
----------
learning_rate : float, optional
Step size.
Default value is set to 0.001.
gamma1: float, optional
decay factor of moving average for gradient, gradient^2.
Default value is set to 0.9.
gamma2: float, optional
"momentum" factor.
Default value if set to 0.9.
Only used if centered=True
epsilon : float, optional
Default value is set to 1e-8.
centered : boolean, optional
Use Graves or Tielemans & Hintons version of RMSProp
wd : float, optional
L2 regularization coefficient add to all the weights
rescale_grad : float, optional
rescaling factor of gradient.
clip_gradient : float, optional
clip gradient in range [-clip_gradient, clip_gradient]
clip_weights : float, optional
clip weights in range [-clip_weights, clip_weights]
"""
def __init__(self, learning_rate=0.001, gamma1=0.9, gamma2=0.9,
epsilon=1e-8, centered=False, clip_weights=None, **kwargs):
super(PyRMSProp, self).__init__(learning_rate=learning_rate, **kwargs)
self.centered = centered
self.gamma1 = gamma1
self.gamma2 = gamma2
self.epsilon = epsilon
self.clip_weights = clip_weights
def create_state(self, index, weight):
"""Create additional optimizer state.
For centered=False: n
For centered=True: n, g, delta
Parameters
----------
weight : NDArray
The weight data
"""
if self.centered:
return (mx.nd.zeros(weight.shape, weight.context), # n
mx.nd.zeros(weight.shape, weight.context), # g
mx.nd.zeros(weight.shape, weight.context)) # delta
else:
return (mx.nd.zeros(weight.shape, weight.context), ) # n
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad + wd * weight
if not self.centered:
(n, ) = state
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n
weight[:] -= lr * grad/(mx.nd.sqrt(n + self.epsilon))
else:
n, g, delta = state
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n
g[:] = (1 - self.gamma1) * grad + self.gamma1 * g
delta[:] = (self.gamma2) * delta - lr * grad/(mx.nd.sqrt(n - g*g + self.epsilon))
weight[:] += delta
if self.clip_weights:
mx.ndarray.clip(weight, -self.clip_weights, self.clip_weights, out=weight)
def test_rms():
mx.random.seed(0)
opt1 = PyRMSProp
opt2 = mx.optimizer.RMSProp
shape = (3, 4, 5)
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
cw_options = [{}, {'clip_weights': 0.01}]
center_options = [{}, {'centered': False}, {'centered': True}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float16, np.float32]:
for cw_option in cw_options:
for cg_option in cg_options:
for center_option in center_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(cw_option)
kwarg.update(cg_option)
kwarg.update(center_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
if (dtype == np.float16 and
('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)
if (default_context() == mx.cpu()):
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype, g_stype='row_sparse')
class PyFtrl(mx.optimizer.Optimizer):
"""The Ftrl optimizer.
Referenced from *Ad Click Prediction: a View from the Trenches*, available at
http://dl.acm.org/citation.cfm?id=2488200.
Parameters
----------
lamda1 : float, optional
L1 regularization coefficient.
learning_rate : float, optional
The initial learning rate.
beta : float, optional
Per-coordinate learning rate correlation parameter.
eta :
.. math::
\\eta_{t,i} = \\frac{learningrate}{\\beta+\\sqrt{\\sum_{s=1}^tg_{s,i}^t}}
"""
def __init__(self, lamda1=0.01, learning_rate=0.1, beta=1, sparse_update=False, **kwargs):
super(PyFtrl, self).__init__(**kwargs)
self.lamda1 = lamda1
self.beta = beta
self.lr = learning_rate
self.sparse_update = sparse_update
def create_state(self, index, weight):
return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # dn
mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # n
def update(self, index, weight, grad, state):
self._update_count(index)
wd = self._get_wd(index)
lr = self._get_lr(index)
num_rows = weight.shape[0]
dn, n = state
for row in range(num_rows):
all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))
if all_zeros and self.sparse_update:
continue
grad[row] = grad[row] * self.rescale_grad
if self.clip_gradient is not None:
mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])
#update dn, n
dn[row] += grad[row] - (mx.nd.sqrt(n[row] + grad[row] * grad[row]) - mx.nd.sqrt(n[row])) * weight[row] / lr
n[row] += grad[row] * grad[row]
# update weight
weight[row] = (mx.nd.sign(dn[row]) * self.lamda1 - dn[row]) / \
((self.beta + mx.nd.sqrt(n[row])) / lr + wd) * (mx.nd.abs(dn[row]) > self.lamda1)
def test_ftrl():
mx.random.seed(0)
opt1 = PyFtrl
opt2 = mx.optimizer.Ftrl
shape = (3, 4, 5)
kwargs = [{},
{'clip_gradient': 0.5},
{'clip_gradient': 0.4, 'rescale_grad': 0.14},
{'rescale_grad': 0.8},
{'clip_gradient': 0.5, 'wd': 0.07},
{'clip_gradient': 0.4, 'rescale_grad': 0.14, 'wd': 0.03},
{'rescale_grad': 0.8, 'wd': 0.05},
{'rescale_grad': 0.8, 'wd': 0.05, 'lamda1': 0.01},
{'clip_gradient': 0.5, 'wd': 0.07, 'lamda1': 1.0}]
for kwarg in kwargs:
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, np.float32)
compare_optimizer(opt1(sparse_update=True, **kwarg), opt2(**kwarg), shape,
np.float32, w_stype='row_sparse', g_stype='row_sparse')
if __name__ == '__main__':
import nose
nose.runmodule()
| apache-2.0 |
atplanet/ansible-modules-extras | database/postgresql/postgresql_ext.py | 29 | 5803 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: postgresql_ext
short_description: Add or remove PostgreSQL extensions from a database.
description:
- Add or remove PostgreSQL extensions from a database.
version_added: "1.9"
options:
name:
description:
- name of the extension to add or remove
required: true
default: null
db:
description:
- name of the database to add or remove the extension to/from
required: true
default: null
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: localhost
port:
description:
- Database port to connect to.
required: false
default: 5432
state:
description:
- The database extension state
required: false
default: present
choices: [ "present", "absent" ]
notes:
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
requirements: [ psycopg2 ]
author: "Daniel Schep (@dschep)"
'''
EXAMPLES = '''
# Adds postgis to the database "acme"
- postgresql_ext: name=postgis db=acme
'''
try:
import psycopg2
import psycopg2.extras
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def ext_exists(cursor, ext):
query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
cursor.execute(query, {'ext': ext})
return cursor.rowcount == 1
def ext_delete(cursor, ext):
if ext_exists(cursor, ext):
query = "DROP EXTENSION \"%s\"" % ext
cursor.execute(query)
return True
else:
return False
def ext_create(cursor, ext):
if not ext_exists(cursor, ext):
query = 'CREATE EXTENSION "%s"' % ext
cursor.execute(query)
return True
else:
return False
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default=""),
login_host=dict(default=""),
port=dict(default="5432"),
db=dict(required=True),
ext=dict(required=True, aliases=['name']),
state=dict(default="present", choices=["absent", "present"]),
),
supports_check_mode = True
)
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
db = module.params["db"]
ext = module.params["ext"]
port = module.params["port"]
state = module.params["state"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
if k in params_map and v != '' )
try:
db_connection = psycopg2.connect(database=db, **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2
.extensions
.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(
cursor_factory=psycopg2.extras.DictCursor)
except Exception, e:
module.fail_json(msg="unable to connect to database: %s" % e)
try:
if module.check_mode:
if state == "absent":
changed = not ext_exists(cursor, ext)
elif state == "present":
changed = ext_exists(cursor, ext)
module.exit_json(changed=changed,ext=ext)
if state == "absent":
changed = ext_delete(cursor, ext)
elif state == "present":
changed = ext_create(cursor, ext)
except NotSupportedError, e:
module.fail_json(msg=str(e))
except Exception, e:
module.fail_json(msg="Database query failed: %s" % e)
module.exit_json(changed=changed, db=db)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
sloede/config | vim/vim/AutomaticTexPlugin_12.1/ftplugin/ATP_files/atplib/check_bracket.py | 1 | 21903 | #!/usr/bin/env python
"""
Check if a bracket '(' is closed. Return the (line, col) of a not closed
bracket.
"""
__all__ = [ 'byte_pos', 'line_pos', 'check_bracket_frompos', 'check_bracket' ]
DEBUG = False
def byte_pos(text, line, col):
""" Return position index of (line, col)
line is line index, col is column index
The returning byte position __includes__ all '\n's.
Text is unicode.
"""
if type(text) != list:
lines = text.splitlines(True)[:line+1]
else:
lines = text[:line+1]
b = len(''.join(lines[:line])) + len(lines[line][:col])
return b
def line_pos(text, b):
""" Return line, col position of byte b."""
beg = text[:b]
line = beg.count('\n')
lines = text.splitlines(True)
if line:
col = b-byte_pos(text, line-1, len(lines[line-1])-1)-1
else:
col = b
return (line, col)
def check_bracket_frompos(text, o_bra, c_bra, pos):
""" Check if the bracket is closed, starting counting from pos (including).
Return the idx of position where the bracket is closed or -1."""
length = len(text)
if pos >= length:
return -1
end = text[pos+1:]
count_open = 1
count_close = 0
idx = pos
if DEBUG:
print(" >> count_open = %d" % count_open)
while idx < length:
if text[idx:idx+len(o_bra)] == o_bra:
count_open +=1
if text[idx:idx+len(c_bra)] == c_bra:
count_close +=1
# if DEBUG:
# print(" >> (%d,%s) (%d,%d)" % (idx, text[idx], count_open, count_close))
if count_open == count_close:
if DEBUG:
print(" >> True; finished at (%d, %s)" % (idx, repr(text[idx])))
return idx
idx += 1
else:
if DEBUG:
print(" >> False; finished at (%d, %s)" % (idx, repr(text[idx-1])))
return -1
def check_bracket(text, line, col, bracket_dict):
""" Return position of the first opened and not closed bracket before
(line, col) [excluding]
text - text to search within
line - the line where to start (lines start count from 0)
col - the columnt where to start (columns start count from 0)
bracket_dict - dictinoary of keys:values : '(' : ')'
Returns triple (line, col, ket) - line, col position where ket was opened.
"""
pos = byte_pos(text, line, col)
beg = text[:pos+1] # don't exclude the curent byte
stack = [] # Holds list of all brackets which are opened before the pos and not closed or closed after
# the function returns the first position in this stack.
found_closed = False
o_count = 0
x = pos
while x >= 0:
x -= 1
o_cond = False
c_cond = False
if not found_closed:
for (O_BRA, C_BRA) in bracket_dict.items():
if text[x:x+len(O_BRA)] == O_BRA:
o_cond = True
if DEBUG:
lpos = line_pos(text, x)
print("-- o_cond: %d, (%d, %d), %s" % (x, lpos[0], lpos[1], O_BRA))
break
elif text[x:x+len(C_BRA)] == C_BRA:
c_cond = True
break
else:
if text[x:x+len(O_BRA)] == O_BRA:
o_cond = True
for (o_bra, c_bra) in bracket_dict.items():
if text[x:x+len(c_bra)] == c_bra:
c_cond = True
break
if o_cond:
stack.append((x, O_BRA))
if DEBUG:
print("-- cbf args: (%s, %s, %d)" % (O_BRA, C_BRA, pos))
print("-- (%d, %d)" % line_pos(text, pos))
closed = check_bracket_frompos(text, O_BRA, C_BRA, pos)
if DEBUG:
print("-- closed: %s" % closed)
if closed >= 0:
pos = closed + 1
if not found_closed:
# If we found closed bracket we cannot expect that in between there are other brackets.
# ( { ( X ) - we should not close at X the opened { even if it is opened.
found_closed = True
if DEBUG:
print("-- (%d, %s)" % (x, O_BRA))
if closed == -1:
""" We can return here since we skip all the ' ( ... ) ' """
lpos =line_pos(text, stack[0][0])
if DEBUG:
pos = line_pos(text, x)
print("break at (%d,%d,%s)" % (pos[0], pos[1], text[x]))
return (lpos[0], lpos[1], stack[0][1])
elif c_cond:
# Skip till the matching o_bra.
s_x = x
count_open = 0
count_closed = 1
# If the bracket is one of \), \}, \] jump to the matching one.
if x and text[x-1] == "\\":
backslash = True
else:
backslash = False
while count_open != count_closed:
""" We should modify the text here so it simplifies. This is
might be important for check_bracket_atpos()
"""
x -= 1
t = beg[x]
if text[x:x+len(O_BRA)] == O_BRA:
if backslash :
if x-1 and text[x-1] == "\\":
count_open += 1
else:
count_open += 1
if text[x:x+len(C_BRA)] == C_BRA:
if backslash :
if x-1 and text[x-1] == "\\":
count_closed += 1
else:
count_closed += 1
if x == 0:
# Ups. There was no matching (.
return (-1, -1, O_BRA)
if DEBUG:
print("Skipping `%s:%s` (%d:%s,%d,%s)." % (O_BRA, C_BRA, s_x, text[s_x], x, text[x]))
return (-1, -1, '')
if __name__ == "__main__":
"""
==========
Test suite
==========
"""
test_1=u"""(
(
test 1 [line-inedx: 3]
)
)
"""
# before implementing the stack in check_bracket:
test_2=u"""(<-considered as open
(<- considered as closed
test 2 unclosed bracket
)
"""
# after adding the stack in check_bracket:
"""I want the test two to return."""
test_2a=u"""(<-considered as closed
(<- considered as open
test 2 unclosed bracket
)
"""
test_3=u"""(
( ) <- skip
(
test 3 unclosed bracket with skipping
)
"""
test_4=u"""(
( ( ( ) ) ) <- skip
(
test 3 unclosed bracket with skipping
)
)
"""
test_5=u"""(
[
( { }
HERE we should close the round bracket. Now the algorithm returns the right
bracket but it stops at the square bracket two lines above.
should return 0, 0
)
"""
test_6=u"""(
( [
LINE 2 should return -1, -1
]"""
test_7=u"""(
(
)
(
(
)
)
X opened at 0
(
(
(
)
)
)
"""
test_8=u"""
(--------)
(
X
{----}
)
-------
"""
test_9=u"""(
\( ( \)
X
)"""
real_test_1="""Now let us give a construction of a generalised quotient of
\(\mathcal{U}(\mathfrak{g})\). Let \(\pi:\mathfrak{g}\eir\mathfrak{h}\) be
a \(\k\)-linear quotient. Let \(\mathcal{U}(\mathfrak{h})\) be the quotient of
the free \(\mathcal{U}(\mathfrak{g})\)-module
\(\mathsf{F}(\k1\oplus\mathfrak{h})\) on \(\k1\oplus\mathfrak{h}\) by the
following relation:
\[1\cdot X = \pi(X)\]
for \(X\in\mathfrak{g}\). There is a right
\(\mathcal{U}(\mathfrak{g})\)-module homomorphism from
\(\mathcal{U}(\mathfrak{g})\) to \(\mathcal{U}(\mathfrak{n})\):
\[\mathcal{U}(\pi):\mathcal{U}(\mathfrak{g})\sir\mathcal{U}(\mathfrak{n})\]
which is uniquely determined by \(\mathcal{U}(\pi)(X)=\pi(X)\) for
\(X\in\mathfrak{g}\), where \(\pi(X)\in\mathfrak{h}\) is treated as an element
of \(\mathcal{U}(\mathfrak{h})\) and \(\mathcal{U}(\pi)(1)=1\). The map
\(\mathcal{U}(\pi)\) is well defined by the above Poincar\'{e}--Birkhoff--Witt
Theorem. Note that there is the following relation satisfied in
\(\mathcal{U}(\mathfrak{g})\):""".decode('utf-8')
real_test_2=r"""Van~Oystaeyen and Zhang introduce a remarkable construction of an
\emph{associated Hopf algebra} to an $H$-extension $A/A^{co\,H}$, where $A$ as
well as $H$ are supposed to be commutative
(see~\cite[Sec.~3]{fo-yz:gal-cor-hopf-galois}, for noncommutative
generalisation see:~\cite{ps:hopf-bigalois,ps:gal-cor-hopf-bigal}). We will
denote this Hopf algebra by $L(H,A)$.
% It satisfies the following two conditions: \begin{enumerate} \item[(i)]
% $A/A^{co\,H}$ becomes a \emph{biGalois extension}, i.e. a left
% $L(H,A)$-comodule algebra and a right $H$-comodule algebra such that both
% coactions commute and $A/A^{co\,H}$ is both left $L(H,A)$-Galois and right
% $H$-Galois extension, \item[(ii)] if $H$ is \emph{cocommutative} then
% \(L(H,A)\simeq A^{co\,H}\otimes H\) (the proof in the commutative
% case~\cite[Cor.~3.4]{fo-yz:gal-cor-hopf-galois} works also in the
% noncommutative case). \end{enumerate}
\citet[Prop.~3.2]{ps:gal-cor-hopf-bigal} generalises the van Oystaeyen and
Zhang correspondence (see also~\cite[Thm~6.4]{ps:hopf-bigalois}) to Galois
connection between generalised quotients of the associated Hopf algebra
\(L(H,A)\) (i.e. quotients by right ideal coideals) and subextensions of
a faithfully flat \(H\)-Hopf Galois extension of the base ring, dropping
commutativity of \(A\). In this work we construct a Galois correspondence
without the assumption that the coinvariants subalgebra is commutative and we
also \( \) drop the Hopf--Galois assumption (Theorem~\ref{thm:existence}). Let us
also note that we work over a commutative base ring rather than a field.
Instead of Hopf theoretic approach of van Oystaeyen, Zhang and Schauenburg we
propose to look from the lattice theoretic perspective. Using an existence
theorem for Galois connections we show that if the comodule algebra \(A\) is
flat over \(R\) and the functor \(A\otimes_R-\) preserves infinite
intersections then there exists a Galois correspondence between subalgebras of
\(A\) and generalised quotients of the Hopf algebra \(H\). It turns out that
such modules are exactly the Mittag--Leffler modules
(Corollary~\ref{cor:mittag-leffler}). We consider modules with intersection
property in Section~\ref{sec:modules_with_int_property}, where we also give
examples of flat and faithfully flat modules which fail to have it. Then we
discuss Galois closedness of generalised quotients and subalgebras. We show
that if a generalised quotient \(Q\) is such that \(A/A^{co\,Q}\) is
\(Q\)-Galois then it is necessarily closed under the assumption that the
canonical map of \(A/A^{co\,H}\) is onto
(Corollary~\ref{cor:Q-Galois_closed}). Later we prove that this is also
a necessary condition for Galois closedness if \(A=H\) or, more generally, if
\(A/A^{co\,H}\) is a crossed product, \(H\) is flat and \(A^{co\,H}\) is
a flat Mittag--Leffler \(R\)-module (Theorem~\ref{thm:cleft-case}). We also
consider the dual case: of \(H\)-module coalgebras, which later gives us
a simple proof of bijective correspondence between generalised quotients and
left ideal subalgebras of~\(H\) if it is finite dimensional
(Theorem~\ref{thm:newTakeuchi}). This Takeuchi correspondence, dropping the
assumptions of faithfully (co)flatness
of~\cite[Thm.~3.10]{ps:gal-cor-hopf-bigal}, was proved
by~\cite{ss:projectivity-over-comodule-algebras}, who showed that a finite
dimensional Hopf algebra is free over any its left coideal subalgebra. Our
proof avoids using this result. We also characterise closed elements of this
Galois correspondence in general case (Theorem~\ref{thm:closed-of-qquot}). As
we already mentioned, we show that a generalised quotient \(Q\) is closed if
and only if \(H/H^{co\,Q}\) is a \(Q\)-Galois extension. Furthermore, we show
that a left coideal subalgebra~\(K\) is closed if and only if \(H\sir H/K^+H\)
is a \(K\)-Galois coextension (see Definition~\ref{defi:coGalois}). This gives
an answer to the question when the bijective correspondence between
generalised quotients over which~\(H\) is faithfully coflat and coideal
subalgebra over which~\(H\) is faithfully flat holds without (co)flatness
assumptions. In the last section we extend the characterisation of closed
subalgebras and closed generalised quotients to crossed products.
( X
\section{Preliminaries}\label{subsec:basics}"""
real_test_3=r"""Van~Oystaeyen and Zhang introduce a remarkable construction of an
\emph{associated Hopf algebra} to an $H$-extension $A/A^{co\,H}$, where $A$ as
well as $H$ are supposed to be commutative
(see~\cite[Sec.~3]{fo-yz:gal-cor-hopf-galois}, for noncommutative
generalisation see:~\cite{ps:hopf-bigalois,ps:gal-cor-hopf-bigal}). We will
denote this Hopf algebra by $L(H,A)$.
% It satisfies the following two conditions: \begin{enumerate} \item[(i)]
% $A/A^{co\,H}$ becomes a \emph{biGalois extension}, i.e. a left
% $L(H,A)$-comodule algebra and a right $H$-comodule algebra such that both
% coactions commute and $A/A^{co\,H}$ is both left $L(H,A)$-Galois and right
% $H$-Galois extension, \item[(ii)] if $H$ is \emph{cocommutative} then
% \(L(H,A)\simeq A^{co\,H}\otimes H\) (the proof in the commutative
% case~\cite[Cor.~3.4]{fo-yz:gal-cor-hopf-galois} works also in the
% noncommutative case). \end{enumerate}
\citet[Prop.~3.2]{ps:gal-cor-hopf-bigal} generalises the van Oystaeyen and
Zhang correspondence (see also~\cite[Thm~6.4]{ps:hopf-bigalois}) to Galois
connection between generalised quotients of the associated Hopf algebra
\(L(H,A)\) (i.e. quotients by right ideal coideals) and subextensions of
a faithfully flat \(H\)-Hopf Galois extension of the base ring, dropping
commutativity of \(A\). In this work we construct a Galois correspondence
without the assumption that the coinvariants subalgebra is commutative and we
also \( \) drop the Hopf--Galois assumption (Theorem~\ref{thm:existence}). Let us
also note that we work over a commutative base ring rather than a field.
Instead of Hopf theoretic approach of van Oystaeyen, Zhang and Schauenburg we
propose to look from the lattice theoretic perspective. Using an existence
theorem for Galois connections we show that if the comodule algebra \(A\) is
flat over \(R\) and the functor \(A\otimes_R-\) preserves infinite
intersections then there exists a Galois correspondence between subalgebras of
\(A\) and generalised quotients of the Hopf algebra \(H\). It turns out that
such modules are exactly the Mittag--Leffler modules
(Corollary~\ref{cor:mittag-leffler}. We consider modules with intersection
property in Section~\ref{sec:modules_with_int_property}, where we also give
examples of flat and faithfully flat modules which fail to have it. Then we
discuss Galois closedness of generalised quotients and subalgebras. We show
that if a generalised quotient \(Q\) is such that \(A/A^{co\,Q}\) is
\(Q\)-Galois then it is necessarily closed under the assumption that the
canonical map of \(A/A^{co\,H}\) is onto
(Corollary~\ref{cor:Q-Galois_closed}). Later we prove that this is also
a necessary condition for Galois closedness if \(A=H\) or, more generally, if
\(A/A^{co\,H}\) is a crossed product, \(H\) is flat and \(A^{co\,H}\) is
a flat Mittag--Leffler \(R\)-module (Theorem~\ref{thm:cleft-case}). We also
consider the dual case: of \(H\)-module coalgebras, which later gives us
a simple proof of bijective correspondence between generalised quotients and
left ideal subalgebras of~\(H\) if it is finite dimensional
(Theorem~\ref{thm:newTakeuchi}). This Takeuchi correspondence, dropping the
assumptions of faithfully (co)flatness
of~\cite[Thm.~3.10]{ps:gal-cor-hopf-bigal}, was proved
by~\cite{ss:projectivity-over-comodule-algebras}, who showed that a finite
dimensional Hopf algebra is free over any its left coideal subalgebra. Our
proof avoids using this result. We also characterise closed elements of this
Galois correspondence in general case (Theorem~\ref{thm:closed-of-qquot}). As
we already mentioned, we show that a generalised quotient \(Q\) is closed if
and only if \(H/H^{co\,Q}\) is a \(Q\)-Galois extension. Furthermore, we show
that a left coideal subalgebra~\(K\) is closed if and only if \(H\sir H/K^+H\)
is a \(K\)-Galois coextension (see Definition~\ref{defi:coGalois}). This gives
an answer to the question when the bijective correspondence between
generalised quotients over which~\(H\) is faithfully coflat and coideal
subalgebra over which~\(H\) is faithfully flat holds without (co)flatness
assumptions. In the last section we extend the characterisation of closed
subalgebras and closed generalised quotients to crossed products.
( X
\section{Preliminaries}\label{subsec:basics}"""
bracket_dict = {'[': ']', '(': ')', '{': '}', '\\lceil': '\\rceil', '\\begin': '\\end', '\\lfloor': '\\rfloor', '\\langle': '\\rangle'}
# bracket_dict = {'[': ']', '(': ')', '{': '}', '\\lceil': '\\rceil', '\\begin': '\\end', '\\lfloor': '\\rfloor'}
# bracket_dict = {'[': ']', '(': ')', '{': '}'}
# bracket_dict = {'(': ')'}
for lpos in [ (0,0), (20,10), (30, 10), (40, 10), (50, 10), (60, 3), (61, 44)]:
bpos = byte_pos(real_test_2, *lpos)
nlpos = line_pos(real_test_2, bpos)
if lpos != nlpos:
raise AssertionError('line_pos->byte_pos->line_pos: %s %d %s' % (lpos, bpos, nlpos))
for bpos in [ 0, 100, 1000, 2000, 2500, 3400, 4000, 4280]:
lpos = line_pos(real_test_2, bpos)
nbpos = byte_pos(real_test_2, *lpos)
if bpos != nbpos:
raise AssertionError('byte_pos->line_pos->byte_pos: %d %s %d' % (bpos, lpos, nbpos))
print("-"*10)
print("test_1:")
test = check_bracket(test_1, 2, 0, bracket_dict)
print(test)
if test != (-1, -1, ''):
raise AssertionError('test 1: FAILED')
print("\n"+"-"*10)
print("test_2:")
test = check_bracket(test_2, 2, 0, bracket_dict)
print(test)
if test != (1, 0, '('):
raise AssertionError('test 2: FAILED')
print("\n"+"-"*10)
print("test_3:")
test = check_bracket(test_3, 3, 0, bracket_dict)
print(test)
if test != (2, 0, '('):
raise AssertionError('test 3: FAILED')
print("\n"+"-"*10)
print("test_4:")
test = check_bracket(test_4, 3, 0, bracket_dict)
print(test)
if test != (-1, -1, ''):
raise AssertionError('test 4: FAILED')
print("\n"+"-"*10)
print("test_5:")
test = check_bracket(test_5, 3, 10, bracket_dict)
print(test)
if test != (2, 0, '('):
raise AssertionError('test 5: FAILED')
print("\n"+"-"*10)
print("test_6:")
test = check_bracket(test_6, 2, 0, bracket_dict)
print(test)
# if test[:2] != (-1, -1):
# raise AssertionError('test 6: FAILED')
print("\n"+"-"*10)
print("test_7:")
test = check_bracket(test_7, 7, 0, bracket_dict)
print(test)
if test != (0, 0, '('):
raise AssertionError('test 7: FAILED')
print("\n"+"-"*10)
print("test_8:")
test = check_bracket(test_8, 2, 0, bracket_dict)
print(test)
if test[:2] != (-1, -1):
raise AssertionError('test 8: FAILED')
print("\n"+"-"*10)
print("test_9:")
test = check_bracket(test_9, 2, 0, bracket_dict)
print(test)
if test[:2] != (-1, -1):
raise AssertionError('test 9: FAILED')
print("\n"+"-"*10)
print("real_test_1:")
real_test_1_lines = real_test_1.splitlines()
line = len(real_test_1_lines)-1
col = len(real_test_1_lines[line])
del real_test_1_lines
print(check_bracket(real_test_1, line, col, bracket_dict))
print("\n"+"-"*10)
spos = byte_pos(real_test_2, 30, 10)
print("real_test_2 at %d (30,10)" % spos)
print("line 30: `%s`" % real_test_2.splitlines()[30])
test = check_bracket(real_test_2, 30, 10, bracket_dict)
print(test)
if test[:2] != (-1, -1):
raise AssertionError('real_test_2: FAILED')
print("\n"+"-"*10)
spos = byte_pos(real_test_3, 30, 10)
print("real_test_3 at %d (30,10)" % spos)
print("line 30: `%s`" % real_test_2.splitlines()[30])
test = check_bracket(real_test_3, 30, 10, bracket_dict)
print(test)
if test[:2] != (30, 0):
raise AssertionError('real_test_3: FAILED')
if True:
# speed test
import time
print("\n"+"-"*10)
print("real_test_1 (time test):")
debug = DEBUG
DEBUG = False
times = []
for z in range(100):
stime = time.time()
check_bracket(real_test_1, line, col, bracket_dict)
etime = time.time()
times.append(etime-stime)
del etime
del stime
print(sum(times)/len(times))
DEBUG = debug
"""NOTE:
The avrage is ~0.016, atplib#complete#CheckBracket(g:atp_bracketdict)
over the same paragraphs complets in ~0.15.
"""
debug = DEBUG
DEBUG = False
if True:
print("\n"+"-"*10)
print("real_test_2 (time test):")
# speed test
import time
times = []
for z in range(100):
stime = time.time()
check_bracket(real_test_2, 30, 10, bracket_dict)
etime = time.time()
times.append(etime-stime)
del etime
del stime
print(sum(times)/len(times))
DEBUG = debug
debug = DEBUG
DEBUG = False
if True:
print("\n"+"-"*10)
print("real_test_3 (time test):")
# speed test
import time
times = []
for z in range(100):
stime = time.time()
check_bracket(real_test_3, 30, 10, bracket_dict)
etime = time.time()
times.append(etime-stime)
del etime
del stime
print(sum(times)/len(times))
DEBUG = debug
| unlicense |
shakamunyi/neutron-vrrp | neutron/plugins/ofagent/agent/ports.py | 4 | 2816 | # Copyright (C) 2014 VA Linux Systems Japan K.K.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: YAMAMOTO Takashi, VA Linux Systems Japan K.K.
class OFPort(object):
def __init__(self, port_name, ofport):
self.port_name = port_name
self.ofport = ofport
@classmethod
def from_ofp_port(cls, ofp_port):
"""Convert from ryu OFPPort."""
return cls(port_name=ofp_port.name, ofport=ofp_port.port_no)
PORT_NAME_LEN = 14
PORT_NAME_PREFIXES = [
"tap", # common cases, including ovs_use_veth=True
"qvo", # nova hybrid interface driver
"qr-", # l3-agent INTERNAL_DEV_PREFIX (ovs_use_veth=False)
"qg-", # l3-agent EXTERNAL_DEV_PREFIX (ovs_use_veth=False)
]
def _is_neutron_port(name):
"""Return True if the port name looks like a neutron port."""
if len(name) != PORT_NAME_LEN:
return False
for pref in PORT_NAME_PREFIXES:
if name.startswith(pref):
return True
return False
def get_normalized_port_name(interface_id):
"""Convert from neutron device id (uuid) to "normalized" port name.
This needs to be synced with ML2 plugin's _device_to_port_id().
An assumption: The switch uses an OS's interface name as the
corresponding OpenFlow port name.
NOTE(yamamoto): While it's true for Open vSwitch, it isn't
necessarily true everywhere. For example, LINC uses something
like "LogicalSwitch0-Port2".
NOTE(yamamoto): The actual prefix might be different. For example,
with the hybrid interface driver, it's "qvo". However, we always
use "tap" prefix throughout the agent and plugin for simplicity.
Some care should be taken when talking to the switch.
"""
return ("tap" + interface_id)[0:PORT_NAME_LEN]
def _normalize_port_name(name):
"""Normalize port name.
See comments in _get_ofport_name.
"""
for pref in PORT_NAME_PREFIXES:
if name.startswith(pref):
return "tap" + name[len(pref):]
return name
class Port(OFPort):
def is_neutron_port(self):
"""Return True if the port looks like a neutron port."""
return _is_neutron_port(self.port_name)
def normalized_port_name(self):
return _normalize_port_name(self.port_name)
| apache-2.0 |
ds-hwang/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py | 3 | 27322 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The Manager runs a series of tests (TestType interface) against a set
of test files. If a test file fails a TestType, it returns a list of TestFailure
objects to the Manager. The Manager then aggregates the TestFailures to
create a final report.
"""
import datetime
import json
import logging
import random
import sys
import time
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models.test_input import TestInput
from webkitpy.tool import grammar
_log = logging.getLogger(__name__)
# Builder base URL where we have the archived test results.
BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
TestExpectations = test_expectations.TestExpectations
class Manager(object):
"""A class for managing running a series of tests on a series of layout
test files."""
def __init__(self, port, options, printer):
"""Initialize test runner data structures.
Args:
port: an object implementing port-specific
options: a dictionary of command line options
printer: a Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR
self.PERF_SUBDIR = 'perf'
self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
self.VIRTUAL_HTTP_SUBDIR = port.TEST_PATH_SEPARATOR.join([
'virtual', 'stable', 'http'])
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
self.ARCHIVED_RESULTS_LIMIT = 25
self._http_server_started = False
self._wptserve_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._finder = LayoutTestFinder(self._port, self._options)
self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def _collect_tests(self, args):
return self._finder.find_tests(args, test_list=self._options.test_list,
fastest_percentile=self._options.fastest)
def _is_http_test(self, test):
return (
test.startswith(self.HTTP_SUBDIR) or
self._is_websocket_test(test) or
self.VIRTUAL_HTTP_SUBDIR in test
)
def _is_inspector_test(self, test):
return self.INSPECTOR_SUBDIR in test
def _is_websocket_test(self, test):
if self._port.is_wpt_enabled() and self._port.is_wpt_test(test):
return False
return self.WEBSOCKET_SUBDIR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
tests_to_run = [test for test in test_names if test not in tests_to_skip]
if not tests_to_run:
return tests_to_run, tests_to_skip
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
if self._options.order == 'natural':
tests_to_run.sort(key=self._port.test_key)
elif self._options.order == 'random':
random.shuffle(tests_to_run)
elif self._options.order == 'random-seeded':
rnd = random.Random()
rnd.seed(4) # http://xkcd.com/221/
rnd.shuffle(tests_to_run)
tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
tests_to_skip.update(tests_in_other_chunks)
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file):
return TestInput(test_file,
self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
self._test_requires_lock(test_file),
should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file)))
def _test_requires_lock(self, test_file):
"""Return True if the test needs to be locked when
running multiple copies of NRWTs. Perf tests are locked
because heavy load caused by running other tests in parallel
might cause some of them to timeout."""
return self._is_http_test(test_file) or self._is_perf_test(test_file)
def _test_is_expected_missing(self, test_file):
expectations = self._expectations.model().get_expectations(test_file)
return test_expectations.MISSING in expectations or test_expectations.NEEDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in expectations
def _test_is_slow(self, test_file):
return test_expectations.SLOW in self._expectations.model().get_expectations(test_file)
def needs_servers(self, test_names):
return any(self._test_requires_lock(test_name) for test_name in test_names)
def _rename_results_folder(self):
try:
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html"))))
except (IOError, OSError), e:
# It might be possible that results.html was not generated in previous run, because the test
# run was interrupted even before testing started. In those cases, don't archive the folder.
# Simply override the current folder contents with new results.
import errno
if e.errno == errno.EEXIST or e.errno == errno.ENOENT:
self._printer.write_update("No results.html file found in previous run, skipping it.")
return None
archived_name = ''.join((self._filesystem.basename(self._results_directory), "_", timestamp))
archived_path = self._filesystem.join(self._filesystem.dirname(self._results_directory), archived_name)
self._filesystem.move(self._results_directory, archived_path)
def _delete_dirs(self, dir_list):
for dir in dir_list:
self._filesystem.rmtree(dir)
def _limit_archived_results_count(self):
results_directory_path = self._filesystem.dirname(self._results_directory)
file_list = self._filesystem.listdir(results_directory_path)
results_directories = []
for dir in file_list:
file_path = self._filesystem.join(results_directory_path, dir)
if self._filesystem.isdir(file_path) and self._results_directory in file_path:
results_directories.append(file_path)
results_directories.sort(key=lambda x: self._filesystem.mtime(x))
self._printer.write_update("Clobbering excess archived results in %s" % results_directory_path)
self._delete_dirs(results_directories[:-self.ARCHIVED_RESULTS_LIMIT])
def _set_up_run(self, test_names):
self._printer.write_update("Checking build ...")
if self._options.build:
exit_code = self._port.check_build(self.needs_servers(test_names), self._printer)
if exit_code:
_log.error("Build check failed")
return exit_code
# This must be started before we check the system dependencies,
# since the helper may do things to make the setup correct.
if self._options.pixel_tests:
self._printer.write_update("Starting pixel test helper ...")
self._port.start_helper()
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update("Checking system dependencies ...")
exit_code = self._port.check_sys_deps(self.needs_servers(test_names))
if exit_code:
self._port.stop_helper()
return exit_code
if self._options.clobber_old_results:
self._clobber_old_results()
elif self._filesystem.exists(self._results_directory):
self._limit_archived_results_count()
# Rename the existing results folder for archiving.
self._rename_results_folder()
# Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(self._results_directory)
self._port.setup_test_run()
return test_run_results.OK_EXIT_STATUS
def run(self, args):
"""Run the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update("Collecting tests ...")
running_all_tests = False
try:
paths, test_names, running_all_tests = self._collect_tests(args)
except IOError:
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
self._printer.write_update("Parsing expectations ...")
self._expectations = test_expectations.TestExpectations(self._port, test_names)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
if not tests_to_run:
_log.critical('No tests to run.')
return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
# Don't retry failures if an explicit list of tests was passed in.
if self._options.retry_failures is None:
should_retry_failures = len(paths) < len(test_names)
else:
should_retry_failures = self._options.retry_failures
enabled_pixel_tests_in_retry = False
try:
self._start_servers(tests_to_run)
num_workers = self._port.num_workers(int(self._options.child_processes))
initial_results = self._run_tests(
tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
num_workers)
# Don't retry failures when interrupted by user or failures limit exception.
should_retry_failures = should_retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
for retry_attempt in xrange(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info('Retrying %s, attempt %d of %d...' %
(grammar.pluralize('unexpected failure', len(tests_to_retry)),
retry_attempt, self._options.num_retries))
retry_results = self._run_tests(tests_to_retry,
tests_to_skip=set(),
repeat_each=1,
iterations=1,
num_workers=num_workers,
retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
if enabled_pixel_tests_in_retry:
self._options.pixel_tests = False
finally:
self._stop_servers()
self._clean_up_run()
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
self._printer.write_update("looking for new crash logs")
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
_log.debug("summarizing results")
summarized_full_results = test_run_results.summarize_results(
self._port, self._expectations, initial_results, all_retry_results,
enabled_pixel_tests_in_retry)
summarized_failing_results = test_run_results.summarize_results(
self._port, self._expectations, initial_results, all_retry_results,
enabled_pixel_tests_in_retry, only_include_failing=True)
exit_code = summarized_failing_results['num_regressions']
if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)' %
(exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS))
exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests)
if self._options.write_full_results_to:
self._filesystem.copyfile(self._filesystem.join(self._results_directory, "full_results.json"),
self._options.write_full_results_to)
self._upload_json_files()
results_path = self._filesystem.join(self._results_directory, "results.html")
self._copy_results_html_file(results_path)
if initial_results.keyboard_interrupted:
exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = test_run_results.EARLY_EXIT_STATUS
if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)):
self._port.show_results_html_file(results_path)
self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)
self._check_for_stale_w3c_dir()
return test_run_results.RunDetails(
exit_code, summarized_full_results, summarized_failing_results,
initial_results, all_retry_results, enabled_pixel_tests_in_retry)
def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations,
num_workers, retry_attempt=0):
test_inputs = []
for _ in xrange(iterations):
for test in tests_to_run:
for _ in xrange(repeat_each):
test_inputs.append(self._test_input_for_file(test))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers, retry_attempt)
def _start_servers(self, tests_to_run):
if self._port.is_wpt_enabled() and any(self._port.is_wpt_test(test) for test in tests_to_run):
self._printer.write_update('Starting WPTServe ...')
self._port.start_wptserve()
self._wptserve_started = True
if self._port.requires_http_server() or any((self._is_http_test(test) or self._is_inspector_test(test)) for test in tests_to_run):
self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(additional_dirs={}, number_of_drivers=self._options.max_locked_shards)
self._http_server_started = True
if any(self._is_websocket_test(test) for test in tests_to_run):
self._printer.write_update('Starting WebSocket server ...')
self._port.start_websocket_server()
self._websockets_server_started = True
def _stop_servers(self):
if self._wptserve_started:
self._printer.write_update('Stopping WPTServe ...')
self._wptserve_started = False
self._port.stop_wptserve()
if self._http_server_started:
self._printer.write_update('Stopping HTTP server ...')
self._http_server_started = False
self._port.stop_http_server()
if self._websockets_server_started:
self._printer.write_update('Stopping WebSocket server ...')
self._websockets_server_started = False
self._port.stop_websocket_server()
def _clean_up_run(self):
_log.debug("Flushing stdout")
sys.stdout.flush()
_log.debug("Flushing stderr")
sys.stderr.flush()
_log.debug("Stopping helper")
self._port.stop_helper()
_log.debug("Cleaning up port")
self._port.clean_up_test_run()
def _check_for_stale_w3c_dir(self):
# TODO(dpranke): Remove this check after 1/1/2015 and let people deal with the warnings.
# Remove the check in port/base.py as well.
fs = self._port.host.filesystem
layout_tests_dir = self._port.layout_tests_dir()
if fs.isdir(fs.join(layout_tests_dir, 'w3c')):
_log.warning('WARNING: You still have the old LayoutTests/w3c directory in your checkout. You should delete it!')
def _force_pixel_tests_if_needed(self):
if self._options.pixel_tests:
return False
_log.debug("Restarting helper")
self._port.stop_helper()
self._options.pixel_tests = True
self._port.start_helper()
return True
def _look_for_new_crash_logs(self, run_results, start_time):
"""Since crash logs can take a long time to be written out if the system is
under stress do a second pass at the end of the test run.
run_results: the results of the test run
start_time: time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
for test, result in run_results.unexpected_results_by_name.iteritems():
if (result.type != test_expectations.CRASH):
continue
for failure in result.failures:
if not isinstance(failure, test_failures.FailureCrash):
continue
if failure.has_log:
continue
crashed_processes.append([test, failure.process_name, failure.pid])
sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
if sample_files:
for test, sample_file in sample_files.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.copy_sample_file(sample_file)
crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
if crash_logs:
for test, crash_log in crash_logs.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.write_crash_log(crash_log)
def _clobber_old_results(self):
dir_above_results_path = self._filesystem.dirname(self._results_directory)
self._printer.write_update("Clobbering old results in %s" % dir_above_results_path)
if not self._filesystem.exists(dir_above_results_path):
return
file_list = self._filesystem.listdir(dir_above_results_path)
results_directories = []
for dir in file_list:
file_path = self._filesystem.join(dir_above_results_path, dir)
if self._filesystem.isdir(file_path) and self._results_directory in file_path:
results_directories.append(file_path)
self._delete_dirs(results_directories)
# Port specific clean-up.
self._port.clobber_old_port_specific_results()
def _tests_to_retry(self, run_results):
# TODO(ojan): This should also check that result.type != test_expectations.MISSING since retrying missing expectations is silly.
# But that's a bit tricky since we only consider the last retry attempt for the count of unexpected regressions.
return [result.test_name for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS]
def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results, running_all_tests):
_log.debug("Writing JSON files in %s." % self._results_directory)
# FIXME: Upload stats.json to the server and delete times_ms.
times_trie = json_results_generator.test_timings_trie(initial_results.results_by_name.values())
times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
# Save out the times data so we can use it for --fastest in the future.
if running_all_tests:
bot_test_times_path = self._port.bot_test_times_path()
self._filesystem.maybe_make_directory(self._filesystem.dirname(bot_test_times_path))
json_results_generator.write_json(self._filesystem, times_trie, bot_test_times_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._results_directory, "stats.json")
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)
full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
# We write failing_results.json out as jsonp because we need to load it from a file url for results.html and Chromium doesn't allow that.
json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS")
_log.debug("Finished writing JSON files.")
def _upload_json_files(self):
if not self._options.test_results_server:
return
if not self._options.master_name:
_log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
return
_log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
attrs = [("builder", self._options.builder_name),
("testtype", "webkit_tests"),
("master", self._options.master_name)]
files = [(file, self._filesystem.join(self._results_directory, file)) for file in ["failing_results.json", "full_results.json", "times_ms.json"]]
url = "http://%s/testfile/upload" % self._options.test_results_server
# Set uploading timeout in case appengine server is having problems.
# 120 seconds are more than enough to upload test results.
uploader = FileUploader(url, 120)
try:
response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
if response:
if response.code == 200:
_log.debug("JSON uploaded.")
else:
_log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read()))
else:
_log.error("JSON upload failed; no response returned")
except Exception, err:
_log.error("Upload failed: %s" % err)
def _copy_results_html_file(self, destination_path):
base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
results_file = self._filesystem.join(base_dir, 'results.html')
# Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
# so make sure it exists before we try to copy it.
if self._filesystem.exists(results_file):
self._filesystem.copyfile(results_file, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != test_expectations.SKIP:
stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
stats_trie = {}
for name, value in stats.iteritems():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie
| bsd-3-clause |
cherub0526/codecombat | scripts/devSetup/downloader.py | 70 | 1628 | from __future__ import print_function
__author__ = 'schmatz'
from configuration import Configuration
import sys
if sys.version_info.major < 3:
import urllib
else:
import urllib.request as urllib
from dependency import Dependency
class Downloader:
def __init__(self,dependency):
assert isinstance(dependency, Dependency)
self.dependency = dependency
@property
def download_directory(self):
raise NotImplementedError
def download(self):
raise NotImplementedError
def download_file(self,url,filePath):
urllib.urlretrieve(url,filePath,self.__progress_bar_reporthook)
def decompress(self):
raise NotImplementedError
def check_download(self):
raise NotImplementedError
def __progress_bar_reporthook(self,blocknum,blocksize,totalsize):
#http://stackoverflow.com/a/13895723/1928667
#http://stackoverflow.com/a/3173331/1928667
bars_to_display = 70
amount_of_data_downloaded_so_far = blocknum * blocksize
if totalsize > 0:
progress_fraction = float(amount_of_data_downloaded_so_far) / float(totalsize)
progress_percentage = progress_fraction * 1e2
stringToDisplay = '\r[{0}] {1:.1f}%'.format('#'*int(bars_to_display*progress_fraction),progress_percentage)
print(stringToDisplay,end=' ')
if amount_of_data_downloaded_so_far >= totalsize:
print("\n",end=' ')
else:
stringToDisplay = '\r File size unknown. Read {0} bytes.'.format(amount_of_data_downloaded_so_far)
print(stringToDisplay,end=' ')
| mit |
xforce/jc3-handling-editor | tools/gyp/test/mac/gyptest-xcode-gcc.py | 15 | 1838 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xcode-style GCC_... settings are handled properly.
"""
import TestGyp
import os
import subprocess
import sys
def IgnoreOutput(string, expected_string):
return True
def CompilerVersion(compiler):
stdout = subprocess.check_output([compiler, '-v'], stderr=subprocess.STDOUT)
return stdout.rstrip('\n')
def CompilerSupportsWarnAboutInvalidOffsetOfMacro(test):
# "clang" does not support the "-Winvalid-offsetof" flag, and silently
# ignore it. Starting with Xcode 5.0.0, "gcc" is just a "clang" binary with
# some hard-coded include path hack, so use the output of "-v" to detect if
# the compiler supports the flag or not.
return 'clang' not in CompilerVersion('/usr/bin/cc')
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
if test.format == 'xcode-ninja':
test.skip_test()
CHDIR = 'xcode-gcc'
test.run_gyp('test.gyp', chdir=CHDIR)
# List of targets that'll pass. It expects targets of the same name with
# '-fail' appended that'll fail to build.
targets = [
'warn_about_missing_newline',
]
# clang doesn't warn on invalid offsetofs, it silently ignores
# -Wno-invalid-offsetof.
if CompilerSupportsWarnAboutInvalidOffsetOfMacro(test):
targets.append('warn_about_invalid_offsetof_macro')
for target in targets:
test.build('test.gyp', target, chdir=CHDIR)
test.built_file_must_exist(target, chdir=CHDIR)
fail_target = target + '-fail'
test.build('test.gyp', fail_target, chdir=CHDIR, status=None,
stderr=None, match=IgnoreOutput)
test.built_file_must_not_exist(fail_target, chdir=CHDIR)
test.pass_test()
| mit |
kcrisman/git-trac-command | git_trac/py26_compat.py | 1 | 4359 | """
Python 2.6 hacks
"""
import sys
import subprocess
########################################################################################
def check_output(*popenargs, **kwargs):
"""
Emulation of check_output
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
########################################################################################
# Backport of importlib.import_module from 3.x.
#
# Code from: http://code.activestate.com/recipes/576685/
def total_ordering(cls):
"""
Backport to work with Python 2.6
Class decorator that fills in missing ordering methods
"""
convert = {
'__lt__': [
(
'__gt__',
lambda self, other: not (self < other or self == other)
),
(
'__le__',
lambda self, other: self < other or self == other
),
(
'__ge__',
lambda self, other: not self < other
)],
'__le__': [
(
'__ge__',
lambda self, other: not self <= other or self == other
),
(
'__lt__',
lambda self, other: self <= other and not self == other
),
(
'__gt__',
lambda self, other: not self <= other
)],
'__gt__': [
(
'__lt__',
lambda self, other: not (self > other or self == other)
),
(
'__ge__',
lambda self, other: self > other or self == other
),
(
'__le__',
lambda self, other: not self > other
)],
'__ge__': [
(
'__le__',
lambda self, other: (not self >= other) or self == other
),
(
'__gt__',
lambda self, other: self >= other and not self == other
),
(
'__lt__',
lambda self, other: not self >= other
)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError(
'must define at least one ordering operation: < > <= >='
)
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
########################################################################################
# Backport of importlib.import_module from 3.x.
#
# Taken from https://pypi.python.org/pypi/importlib
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
| gpl-3.0 |
jefffohl/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/bezier.py | 70 | 14387 | """
A module providing some utility functions regarding bezier path manipulation.
"""
import numpy as np
from math import sqrt
from matplotlib.path import Path
from operator import xor
# some functions
def get_intersection(cx1, cy1, cos_t1, sin_t1,
cx2, cy2, cos_t2, sin_t2):
""" return a intersecting point between a line through (cx1, cy1)
and having angle t1 and a line through (cx2, cy2) and angle t2.
"""
# line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.
# line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1
line1_rhs = sin_t1 * cx1 - cos_t1 * cy1
line2_rhs = sin_t2 * cx2 - cos_t2 * cy2
# rhs matrix
a, b = sin_t1, -cos_t1
c, d = sin_t2, -cos_t2
ad_bc = a*d-b*c
if ad_bc == 0.:
raise ValueError("Given lines do not intersect")
#rhs_inverse
a_, b_ = d, -b
c_, d_ = -c, a
a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]
x = a_* line1_rhs + b_ * line2_rhs
y = c_* line1_rhs + d_ * line2_rhs
return x, y
def get_normal_points(cx, cy, cos_t, sin_t, length):
"""
For a line passing through (*cx*, *cy*) and having a angle *t*,
return locations of the two points located along its perpendicular line at the distance of *length*.
"""
if length == 0.:
return cx, cy, cx, cy
cos_t1, sin_t1 = sin_t, -cos_t
cos_t2, sin_t2 = -sin_t, cos_t
x1, y1 = length*cos_t1 + cx, length*sin_t1 + cy
x2, y2 = length*cos_t2 + cx, length*sin_t2 + cy
return x1, y1, x2, y2
## BEZIER routines
# subdividing bezier curve
# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html
def _de_casteljau1(beta, t):
next_beta = beta[:-1] * (1-t) + beta[1:] * t
return next_beta
def split_de_casteljau(beta, t):
"""split a bezier segment defined by its controlpoints *beta*
into two separate segment divided at *t* and return their control points.
"""
beta = np.asarray(beta)
beta_list = [beta]
while True:
beta = _de_casteljau1(beta, t)
beta_list.append(beta)
if len(beta) == 1:
break
left_beta = [beta[0] for beta in beta_list]
right_beta = [beta[-1] for beta in reversed(beta_list)]
return left_beta, right_beta
def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t, inside_closedpath,
t0=0., t1=1., tolerence=0.01):
""" Find a parameter t0 and t1 of the given bezier path which
bounds the intersecting points with a provided closed
path(*inside_closedpath*). Search starts from *t0* and *t1* and it
uses a simple bisecting algorithm therefore one of the end point
must be inside the path while the orther doesn't. The search stop
when |t0-t1| gets smaller than the given tolerence.
value for
- bezier_point_at_t : a function which returns x, y coordinates at *t*
- inside_closedpath : return True if the point is insed the path
"""
# inside_closedpath : function
start = bezier_point_at_t(t0)
end = bezier_point_at_t(t1)
start_inside = inside_closedpath(start)
end_inside = inside_closedpath(end)
if not xor(start_inside, end_inside):
raise ValueError("the segment does not seemed to intersect with the path")
while 1:
# return if the distance is smaller than the tolerence
if (start[0]-end[0])**2 + (start[1]-end[1])**2 < tolerence**2:
return t0, t1
# calculate the middle point
middle_t = 0.5*(t0+t1)
middle = bezier_point_at_t(middle_t)
middle_inside = inside_closedpath(middle)
if xor(start_inside, middle_inside):
t1 = middle_t
end = middle
end_inside = middle_inside
else:
t0 = middle_t
start = middle
start_inside = middle_inside
class BezierSegment:
"""
A simple class of a 2-dimensional bezier segment
"""
# Highrt order bezier lines can be supported by simplying adding
# correcponding values.
_binom_coeff = {1:np.array([1., 1.]),
2:np.array([1., 2., 1.]),
3:np.array([1., 3., 3., 1.])}
def __init__(self, control_points):
"""
*control_points* : location of contol points. It needs have a
shpae of n * 2, where n is the order of the bezier line. 1<=
n <= 3 is supported.
"""
_o = len(control_points)
self._orders = np.arange(_o)
_coeff = BezierSegment._binom_coeff[_o - 1]
_control_points = np.asarray(control_points)
xx = _control_points[:,0]
yy = _control_points[:,1]
self._px = xx * _coeff
self._py = yy * _coeff
def point_at_t(self, t):
"evaluate a point at t"
one_minus_t_powers = np.power(1.-t, self._orders)[::-1]
t_powers = np.power(t, self._orders)
tt = one_minus_t_powers * t_powers
_x = sum(tt * self._px)
_y = sum(tt * self._py)
return _x, _y
def split_bezier_intersecting_with_closedpath(bezier,
inside_closedpath,
tolerence=0.01):
"""
bezier : control points of the bezier segment
inside_closedpath : a function which returns true if the point is inside the path
"""
bz = BezierSegment(bezier)
bezier_point_at_t = bz.point_at_t
t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,
inside_closedpath,
tolerence=tolerence)
_left, _right = split_de_casteljau(bezier, (t0+t1)/2.)
return _left, _right
def find_r_to_boundary_of_closedpath(inside_closedpath, xy,
cos_t, sin_t,
rmin=0., rmax=1., tolerence=0.01):
"""
Find a radius r (centered at *xy*) between *rmin* and *rmax* at
which it intersect with the path.
inside_closedpath : function
cx, cy : center
cos_t, sin_t : cosine and sine for the angle
rmin, rmax :
"""
cx, cy = xy
def _f(r):
return cos_t*r + cx, sin_t*r + cy
find_bezier_t_intersecting_with_closedpath(_f, inside_closedpath,
t0=rmin, t1=rmax, tolerence=tolerence)
## matplotlib specific
def split_path_inout(path, inside, tolerence=0.01, reorder_inout=False):
""" divide a path into two segment at the point where inside(x, y)
becomes False.
"""
path_iter = path.iter_segments()
ctl_points, command = path_iter.next()
begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
bezier_path = None
ctl_points_old = ctl_points
concat = np.concatenate
iold=0
i = 1
for ctl_points, command in path_iter:
iold=i
i += len(ctl_points)/2
if inside(ctl_points[-2:]) != begin_inside:
bezier_path = concat([ctl_points_old[-2:], ctl_points])
break
ctl_points_old = ctl_points
if bezier_path is None:
raise ValueError("The path does not seem to intersect with the patch")
bp = zip(bezier_path[::2], bezier_path[1::2])
left, right = split_bezier_intersecting_with_closedpath(bp,
inside,
tolerence)
if len(left) == 2:
codes_left = [Path.LINETO]
codes_right = [Path.MOVETO, Path.LINETO]
elif len(left) == 3:
codes_left = [Path.CURVE3, Path.CURVE3]
codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
elif len(left) == 4:
codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
else:
raise ValueError()
verts_left = left[1:]
verts_right = right[:]
#i += 1
if path.codes is None:
path_in = Path(concat([path.vertices[:i], verts_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]))
else:
path_in = Path(concat([path.vertices[:iold], verts_left]),
concat([path.codes[:iold], codes_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]),
concat([codes_right, path.codes[i:]]))
if reorder_inout and begin_inside == False:
path_in, path_out = path_out, path_in
return path_in, path_out
def inside_circle(cx, cy, r):
r2 = r**2
def _f(xy):
x, y = xy
return (x-cx)**2 + (y-cy)**2 < r2
return _f
# quadratic bezier lines
def get_cos_sin(x0, y0, x1, y1):
dx, dy = x1-x0, y1-y0
d = (dx*dx + dy*dy)**.5
return dx/d, dy/d
def get_parallels(bezier2, width):
"""
Given the quadraitc bezier control points *bezier2*, returns
control points of quadrativ bezier lines roughly parralel to given
one separated by *width*.
"""
# The parallel bezier lines constructed by following ways.
# c1 and c2 are contol points representing the begin and end of the bezier line.
# cm is the middle point
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c2x, c2y = bezier2[2]
# t1 and t2 is the anlge between c1 and cm, cm, c2.
# They are also a angle of the tangential line of the path at c1 and c2
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c2_left and
# c2_right with respect to c2.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
c2x_left, c2y_left, c2x_right, c2y_right = \
get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
# find cm_left which is the intersectng point of a line through
# c1_left with angle t1 and a line throught c2_left with angle
# t2. Same with cm_right.
cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1, sin_t1,
c2x_left, c2y_left, cos_t2, sin_t2)
cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1, sin_t1,
c2x_right, c2y_right, cos_t2, sin_t2)
# the parralel bezier lines are created with control points of
# [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
path_left = [(c1x_left, c1y_left), (cmx_left, cmy_left), (c2x_left, c2y_left)]
path_right = [(c1x_right, c1y_right), (cmx_right, cmy_right), (c2x_right, c2y_right)]
return path_left, path_right
def make_wedged_bezier2(bezier2, length, shrink_factor=0.5):
"""
Being similar to get_parallels, returns
control points of two quadrativ bezier lines having a width roughly parralel to given
one separated by *width*.
"""
xx1, yy1 = bezier2[2]
xx2, yy2 = bezier2[1]
xx3, yy3 = bezier2[0]
cx, cy = xx3, yy3
x0, y0 = xx2, yy2
dist = sqrt((x0-cx)**2 + (y0-cy)**2)
cos_t, sin_t = (x0-cx)/dist, (y0-cy)/dist,
x1, y1, x2, y2 = get_normal_points(cx, cy, cos_t, sin_t, length)
xx12, yy12 = (xx1+xx2)/2., (yy1+yy2)/2.,
xx23, yy23 = (xx2+xx3)/2., (yy2+yy3)/2.,
dist = sqrt((xx12-xx23)**2 + (yy12-yy23)**2)
cos_t, sin_t = (xx12-xx23)/dist, (yy12-yy23)/dist,
xm1, ym1, xm2, ym2 = get_normal_points(xx2, yy2, cos_t, sin_t, length*shrink_factor)
l_plus = [(x1, y1), (xm1, ym1), (xx1, yy1)]
l_minus = [(x2, y2), (xm2, ym2), (xx1, yy1)]
return l_plus, l_minus
def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
""" Find control points of the bezier line throught c1, mm, c2. We
simply assume that c1, mm, c2 which have parameteric value 0, 0.5, and 1.
"""
cmx = .5 * (4*mmx - (c1x + c2x))
cmy = .5 * (4*mmy - (c1y + c2y))
return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
"""
Being similar to get_parallels, returns
control points of two quadrativ bezier lines having a width roughly parralel to given
one separated by *width*.
"""
# c1, cm, c2
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c3x, c3y = bezier2[2]
# t1 and t2 is the anlge between c1 and cm, cm, c3.
# They are also a angle of the tangential line of the path at c1 and c3
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c3_left and
# c3_right with respect to c3.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width*w1)
c3x_left, c3y_left, c3x_right, c3y_right = \
get_normal_points(c3x, c3y, cos_t2, sin_t2, width*w2)
# find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and c12-c23
c12x, c12y = (c1x+cmx)*.5, (c1y+cmy)*.5
c23x, c23y = (cmx+c3x)*.5, (cmy+c3y)*.5
c123x, c123y = (c12x+c23x)*.5, (c12y+c23y)*.5
# tangential angle of c123 (angle between c12 and c23)
cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
c123x_left, c123y_left, c123x_right, c123y_right = \
get_normal_points(c123x, c123y, cos_t123, sin_t123, width*wm)
path_left = find_control_points(c1x_left, c1y_left,
c123x_left, c123y_left,
c3x_left, c3y_left)
path_right = find_control_points(c1x_right, c1y_right,
c123x_right, c123y_right,
c3x_right, c3y_right)
return path_left, path_right
if 0:
path = Path([(0, 0), (1, 0), (2, 2)],
[Path.MOVETO, Path.CURVE3, Path.CURVE3])
left, right = divide_path_inout(path, inside)
clf()
ax = gca()
| gpl-3.0 |
shaunstanislaus/differential-line | main_collapse.py | 3 | 2402 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from numpy import pi, zeros, linspace, cos, sin
from numpy.random import random, seed
from modules.growth import spawn, spawn_curl
NMAX = 10**7
SIZE = 2000
ONE = 1./SIZE
STP = ONE*0.01
NEARL = 3*ONE
FARL = 150*ONE
FREEZE_DISTANCE = ONE*5
PROCS = 4
MID = 0.5
LINEWIDTH = 5.*ONE
STEPS_ITT = 1000
INIT_NUM = 200
BACK = [1,1,1,1]
FRONT = [0,0,0,1]
TWOPI = pi*2.
ZONEWIDTH = 2.*FARL/ONE
NZ = int(SIZE/ZONEWIDTH)
print 'NZ', NZ
print 'ZONEWIDTH', ZONEWIDTH
np_coords = zeros(shape=(NMAX,4), dtype='float')
def steps(df,steps_itt):
from math import ceil
from modules.growth import collapse
for i in xrange(steps_itt):
active_num = df.get_active_vertex_count()
print(active_num)
if active_num<1:
rad = df.get_greatest_distance(MID,MID) + FREEZE_DISTANCE*3
circ = rad*4*3.14
nodes = ceil(circ/NEARL)
print(rad, nodes)
angles = sorted(random(nodes)*TWOPI)
df.init_circle_segment(MID,MID, rad, angles)
collapse(df, NEARL*0.9, 0.1)
df.split_long_edges(NEARL*3)
df.optimize_contract(STP, FREEZE_DISTANCE)
def main():
from time import time
from itertools import count
from render.render import Render
from modules.helpers import print_stats
from modules.show import show
from differentialLine import DifferentialLine
DF = DifferentialLine(NMAX, NZ, NEARL, FARL, PROCS)
render = Render(SIZE, BACK, FRONT)
render.ctx.set_source_rgba(*FRONT)
render.ctx.set_line_width(LINEWIDTH)
#angles = sorted(random(INIT_NUM)*TWOPI)
#DF.init_passive_circle_segment(MID,MID,100*ONE, angles)
angles = sorted(random(INIT_NUM)*pi*5/8)
xys = []
for a in angles:
x = 0.5 + cos(a)*0.01
y = 0.5 + sin(a)*0.01
xys.append((x,y))
DF.init_passive_line_segment(xys)
for i in count():
t_start = time()
steps(DF,STEPS_ITT)
t_stop = time()
print_stats(i*STEPS_ITT,t_stop-t_start,DF)
fn = './res/collapse_e_{:010d}.png'.format(i*STEPS_ITT)
num = DF.np_get_edges_coordinates(np_coords)
show(render,np_coords[:num,:],fn,ONE)
if __name__ == '__main__':
if False:
import pyximport
pyximport.install()
import pstats, cProfile
fn = './profile/profile'
cProfile.runctx("main()", globals(), locals(), fn)
p = pstats.Stats(fn)
p.strip_dirs().sort_stats('cumulative').print_stats()
else:
main()
| mit |
soltanmm-google/grpc | tools/buildgen/plugins/expand_bin_attrs.py | 31 | 2515 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Buildgen expand binary attributes plugin.
This fills in any optional attributes.
"""
def mako_plugin(dictionary):
"""The exported plugin code for expand_filegroups.
The list of libs in the build.yaml file can contain "filegroups" tags.
These refer to the filegroups in the root object. We will expand and
merge filegroups on the src, headers and public_headers properties.
"""
targets = dictionary.get('targets')
default_platforms = ['windows', 'posix', 'linux', 'mac']
for tgt in targets:
tgt['flaky'] = tgt.get('flaky', False)
tgt['platforms'] = sorted(tgt.get('platforms', default_platforms))
tgt['ci_platforms'] = sorted(tgt.get('ci_platforms', tgt['platforms']))
tgt['boringssl'] = tgt.get('boringssl', False)
tgt['zlib'] = tgt.get('zlib', False)
tgt['gtest'] = tgt.get('gtest', False)
libs = dictionary.get('libs')
for lib in libs:
lib['boringssl'] = lib.get('boringssl', False)
lib['zlib'] = lib.get('zlib', False)
| bsd-3-clause |
minrk/jupyter | docs/source/development_guide/template.py | 5 | 1524 | """A one-line description.
A longer description that spans multiple lines. Explain the purpose of the
file and provide a short list of the key classes/functions it contains. This
is the docstring shown when some does 'import foo;foo?' in IPython, so it
should be reasonably useful and informative.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
# [remove this comment in production]
#
# List all imports, sorted within each section (stdlib/third-party/ipython).
# For 'import foo', use one import per line. For 'from foo.bar import a, b, c'
# it's OK to import multiple items, use the parenthesized syntax 'from foo
# import (a, b, ...)' if the list needs multiple lines.
# Separate stdlib, third-party, and IPython imports by a blank line.
# [remove this comment in production]
#
# If a file is large and has many sections, you may want to use broad section
# headers like this one that make it easier to navigate the file,
# with descriptive titles. For complex classes, simliar (but indented)
# headers are useful to organize the internal class structure.
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
| bsd-3-clause |
mcus/SickRage | sickbeard/providers/scenetime.py | 3 | 6631 | # Author: Idan Gutman
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import urllib
import traceback
import logging
from sickbeard import tvcache
from sickbeard.providers import generic
from sickbeard.bs4_parser import BS4Parser
class SceneTimeProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "SceneTime")
self.supportsBacklog = True
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.cache = SceneTimeCache(self)
self.urls = {'base_url': 'https://www.scenetime.com',
'login': 'https://www.scenetime.com/takelogin.php',
'detail': 'https://www.scenetime.com/details.php?id=%s',
'search': 'https://www.scenetime.com/browse.php?search=%s%s',
'download': 'https://www.scenetime.com/download.php/%s/%s'}
self.url = self.urls[b'base_url']
self.categories = "&c2=1&c43=13&c9=1&c63=1&c77=1&c79=1&c100=1&c101=1"
def _doLogin(self):
login_params = {'username': self.username,
'password': self.password}
response = self.getURL(self.urls[b'login'], post_data=login_params, timeout=30)
if not response:
logging.warning("Unable to connect to provider")
return False
if re.search('Username or password incorrect', response):
logging.warning("Invalid username or password. Check your settings")
return False
return True
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if not self._doLogin():
return results
for mode in search_params.keys():
logging.debug("Search Mode: %s" % mode)
for search_string in search_params[mode]:
if mode is not 'RSS':
logging.debug("Search string: %s " % search_string)
searchURL = self.urls[b'search'] % (urllib.quote(search_string), self.categories)
logging.debug("Search URL: %s" % searchURL)
data = self.getURL(searchURL)
if not data:
continue
try:
with BS4Parser(data, features=["html5lib", "permissive"]) as html:
torrent_table = html.select("#torrenttable table")
torrent_rows = torrent_table[0].select("tr") if torrent_table else []
# Continue only if one Release is found
if len(torrent_rows) < 2:
logging.debug("Data returned from provider does not contain any torrents")
continue
# Scenetime apparently uses different number of cells in #torrenttable based
# on who you are. This works around that by extracting labels from the first
# <tr> and using their index to find the correct download/seeders/leechers td.
labels = [label.get_text() for label in torrent_rows[0].find_all('td')]
for result in torrent_rows[1:]:
cells = result.find_all('td')
link = cells[labels.index('Name')].find('a')
full_id = link[b'href'].replace('details.php?id=', '')
torrent_id = full_id.split("&")[0]
try:
title = link.contents[0].get_text()
filename = "%s.torrent" % title.replace(" ", ".")
download_url = self.urls[b'download'] % (torrent_id, filename)
seeders = int(cells[labels.index('Seeders')].get_text())
leechers = int(cells[labels.index('Leechers')].get_text())
# FIXME
size = -1
except (AttributeError, TypeError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode is not 'RSS':
logging.debug(
"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
title, seeders, leechers))
continue
item = title, download_url, size, seeders, leechers
if mode is not 'RSS':
logging.debug("Found result: %s " % title)
items[mode].append(item)
except Exception as e:
logging.error("Failed parsing provider. Traceback: %s" % traceback.format_exc())
# For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
class SceneTimeCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll SceneTime every 20 minutes max
self.minTime = 20
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_params)}
provider = SceneTimeProvider()
| gpl-3.0 |
hassaanaliw/flaskbook | app/helpers.py | 1 | 1376 | __author__ = 'hassaanaliw'
'''
Includes several helper functions for the main app that are used a number of
times to avoid using code multiple times.
'''
from app.posts.models import Posts
from app.user.models import User
class Messages():
LOGIN_ERROR_MESSAGE = "Email/Password is Wrong. Please Try Again."
LOGIN_SUCCESSFUL_MESSAGE = "Logged in Successfully."
REGISTER_EMAIL_EMPTY_MESSAGE = "The Email Field Cannot be Empty."
REGISTER_PASSWORD_EMPTY_MESSAGE = "The Password Field Cannot be Empty."
REGISTER_EMAIL_EXISTS_MESSAGE = "An Account is already registered with this Email."
REGISTER_USERNAME_EXISTS_MESSAGE = "An Account is already registered with this Username."
def get_self_posts(user_id):
"""
Returns Posts submitted by current user.
"""
posts = Posts.query.filter_by(user_id=user_id).order_by(Posts.pub_date.desc()).all()
return posts
def get_following_posts(user_id):
"""
Returns Posts submitted by people the user follows.
"""
user = User.query.get(user_id)
if not user or not user.following:
return []
friends = user.following.split(',')
# Postgres doesn't play well with empty elements. Raises sqlalchemy.exc.DataError exception
friends.remove(u'')
posts = Posts.query.filter(Posts.user_id.in_(friends)).order_by(Posts.pub_date.desc()).all()
return posts
| mit |
Gateworks/platform-external-chromium_org | tools/gdb/gdb_chrome.py | 23 | 10110 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GDB support for Chrome types.
Add this to your gdb by amending your ~/.gdbinit as follows:
python
import sys
sys.path.insert(0, "/path/to/tools/gdb/")
import gdb_chrome
end
This module relies on the WebKit gdb module already existing in
your Python path.
Use
(gdb) p /r any_variable
to print |any_variable| without using any printers.
"""
import datetime
import gdb
import gdb.printing
import webkit
# When debugging this module, set the below variable to True, and then use
# (gdb) python del sys.modules['gdb_chrome']
# (gdb) python import gdb_chrome
# to reload.
_DEBUGGING = False
pp_set = gdb.printing.RegexpCollectionPrettyPrinter("chromium")
def typed_ptr(ptr):
"""Prints a pointer along with its exact type.
By default, gdb would print just the address, which takes more
steps to interpret.
"""
# Returning this as a cast expression surrounded by parentheses
# makes it easier to cut+paste inside of gdb.
return '((%s)%s)' % (ptr.dynamic_type, ptr)
class Printer(object):
def __init__(self, val):
self.val = val
class StringPrinter(Printer):
def display_hint(self):
return 'string'
class String16Printer(StringPrinter):
def to_string(self):
return webkit.ustring_to_string(self.val['_M_dataplus']['_M_p'])
pp_set.add_printer(
'string16',
'^string16|std::basic_string<(unsigned short|char16|base::char16).*>$',
String16Printer);
class GURLPrinter(StringPrinter):
def to_string(self):
return self.val['spec_']
pp_set.add_printer('GURL', '^GURL$', GURLPrinter)
class FilePathPrinter(StringPrinter):
def to_string(self):
return self.val['path_']['_M_dataplus']['_M_p']
pp_set.add_printer('FilePath', '^FilePath$', FilePathPrinter)
class SizePrinter(Printer):
def to_string(self):
return '%sx%s' % (self.val['width_'], self.val['height_'])
pp_set.add_printer('gfx::Size', '^gfx::(Size|SizeF|SizeBase<.*>)$', SizePrinter)
class PointPrinter(Printer):
def to_string(self):
return '%s,%s' % (self.val['x_'], self.val['y_'])
pp_set.add_printer('gfx::Point', '^gfx::(Point|PointF|PointBase<.*>)$',
PointPrinter)
class RectPrinter(Printer):
def to_string(self):
return '%s %s' % (self.val['origin_'], self.val['size_'])
pp_set.add_printer('gfx::Rect', '^gfx::(Rect|RectF|RectBase<.*>)$',
RectPrinter)
class SmartPtrPrinter(Printer):
def to_string(self):
return '%s%s' % (self.typename, typed_ptr(self.ptr()))
class ScopedRefPtrPrinter(SmartPtrPrinter):
typename = 'scoped_refptr'
def ptr(self):
return self.val['ptr_']
pp_set.add_printer('scoped_refptr', '^scoped_refptr<.*>$', ScopedRefPtrPrinter)
class LinkedPtrPrinter(SmartPtrPrinter):
typename = 'linked_ptr'
def ptr(self):
return self.val['value_']
pp_set.add_printer('linked_ptr', '^linked_ptr<.*>$', LinkedPtrPrinter)
class WeakPtrPrinter(SmartPtrPrinter):
typename = 'base::WeakPtr'
def ptr(self):
flag = ScopedRefPtrPrinter(self.val['ref_']['flag_']).ptr()
if flag and flag['is_valid_']:
return self.val['ptr_']
return gdb.Value(0).cast(self.val['ptr_'].type)
pp_set.add_printer('base::WeakPtr', '^base::WeakPtr<.*>$', WeakPtrPrinter)
class CallbackPrinter(Printer):
"""Callbacks provide no usable information so reduce the space they take."""
def to_string(self):
return '...'
pp_set.add_printer('base::Callback', '^base::Callback<.*>$', CallbackPrinter)
class LocationPrinter(Printer):
def to_string(self):
return '%s()@%s:%s' % (self.val['function_name_'].string(),
self.val['file_name_'].string(),
self.val['line_number_'])
pp_set.add_printer('tracked_objects::Location', '^tracked_objects::Location$',
LocationPrinter)
class LockPrinter(Printer):
def to_string(self):
try:
if self.val['owned_by_thread_']:
return 'Locked by thread %s' % self.val['owning_thread_id_']
else:
return 'Unlocked'
except gdb.error:
return 'Unknown state'
pp_set.add_printer('base::Lock', '^base::Lock$', LockPrinter)
class TimeDeltaPrinter(object):
def __init__(self, val):
self._timedelta = datetime.timedelta(microseconds=int(val['delta_']))
def timedelta(self):
return self._timedelta
def to_string(self):
return str(self._timedelta)
pp_set.add_printer('base::TimeDelta', '^base::TimeDelta$', TimeDeltaPrinter)
class TimeTicksPrinter(TimeDeltaPrinter):
def __init__(self, val):
self._timedelta = datetime.timedelta(microseconds=int(val['ticks_']))
pp_set.add_printer('base::TimeTicks', '^base::TimeTicks$', TimeTicksPrinter)
class TimePrinter(object):
def __init__(self, val):
timet_offset = gdb.parse_and_eval(
'base::Time::kTimeTToMicrosecondsOffset')
self._datetime = (datetime.datetime.fromtimestamp(0) +
datetime.timedelta(microseconds=
int(val['us_'] - timet_offset)))
def datetime(self):
return self._datetime
def to_string(self):
return str(self._datetime)
pp_set.add_printer('base::Time', '^base::Time$', TimePrinter)
class IpcMessagePrinter(Printer):
def header(self):
return self.val['header_'].cast(
gdb.lookup_type('IPC::Message::Header').pointer())
def to_string(self):
message_type = self.header()['type']
return '%s of kind %s line %s' % (
self.val.dynamic_type,
(message_type >> 16).cast(gdb.lookup_type('IPCMessageStart')),
message_type & 0xffff)
def children(self):
yield ('header_', self.header().dereference())
yield ('capacity_', self.val['capacity_'])
yield ('variable_buffer_offset_', self.val['variable_buffer_offset_'])
for field in self.val.type.fields():
if field.is_base_class:
continue
yield (field.name, self.val[field.name])
pp_set.add_printer('IPC::Message', '^IPC::Message$', IpcMessagePrinter)
class NotificationRegistrarPrinter(Printer):
def to_string(self):
try:
registrations = self.val['registered_']
vector_finish = registrations['_M_impl']['_M_finish']
vector_start = registrations['_M_impl']['_M_start']
if vector_start == vector_finish:
return 'Not watching notifications'
if vector_start.dereference().type.sizeof == 0:
# Incomplete type: b/8242773
return 'Watching some notifications'
return ('Watching %s notifications; '
'print %s->registered_ for details') % (
int(vector_finish - vector_start),
typed_ptr(self.val.address))
except gdb.error:
return 'NotificationRegistrar'
pp_set.add_printer('content::NotificationRegistrar',
'^content::NotificationRegistrar$',
NotificationRegistrarPrinter)
class SiteInstanceImplPrinter(object):
def __init__(self, val):
self.val = val.cast(val.dynamic_type)
def to_string(self):
return 'SiteInstanceImpl@%s for %s' % (
self.val.address, self.val['site_'])
def children(self):
yield ('id_', self.val['id_'])
yield ('has_site_', self.val['has_site_'])
if self.val['browsing_instance_']['ptr_']:
yield ('browsing_instance_', self.val['browsing_instance_']['ptr_'])
if self.val['process_']:
yield ('process_', typed_ptr(self.val['process_']))
if self.val['render_process_host_factory_']:
yield ('render_process_host_factory_',
self.val['render_process_host_factory_'])
pp_set.add_printer('content::SiteInstanceImpl', '^content::SiteInstanceImpl$',
SiteInstanceImplPrinter)
class RenderProcessHostImplPrinter(object):
def __init__(self, val):
self.val = val.cast(val.dynamic_type)
def to_string(self):
pid = ''
try:
child_process_launcher_ptr = (
self.val['child_process_launcher_']['impl_']['data_']['ptr'])
if child_process_launcher_ptr:
context = (child_process_launcher_ptr['context_']['ptr_'])
if context:
pid = ' PID %s' % str(context['process_']['process_'])
except gdb.error:
# The definition of the Context type may not be available.
# b/8242773
pass
return 'RenderProcessHostImpl@%s%s' % (self.val.address, pid)
def children(self):
yield ('id_', self.val['id_'])
yield ('render_widget_hosts_',
self.val['render_widget_hosts_']['data_'])
yield ('fast_shutdown_started_', self.val['fast_shutdown_started_'])
yield ('deleting_soon_', self.val['deleting_soon_'])
yield ('pending_views_', self.val['pending_views_'])
yield ('visible_widgets_', self.val['visible_widgets_'])
yield ('backgrounded_', self.val['backgrounded_'])
yield ('widget_helper_', self.val['widget_helper_'])
yield ('is_initialized_', self.val['is_initialized_'])
yield ('browser_context_', typed_ptr(self.val['browser_context_']))
yield ('sudden_termination_allowed_',
self.val['sudden_termination_allowed_'])
yield ('ignore_input_events_', self.val['ignore_input_events_'])
yield ('is_guest_', self.val['is_guest_'])
pp_set.add_printer('content::RenderProcessHostImpl',
'^content::RenderProcessHostImpl$',
RenderProcessHostImplPrinter)
gdb.printing.register_pretty_printer(gdb, pp_set, replace=_DEBUGGING)
| bsd-3-clause |
lamby/live-studio | contrib/django_extensions/management/commands/export_emails.py | 22 | 4801 | from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User, Group
from optparse import make_option
from sys import stdout
from csv import writer
FORMATS = [
'address',
'google',
'outlook',
'linkedin',
'vcard',
]
def full_name(first_name, last_name, username, **extra):
name = u" ".join(n for n in [first_name, last_name] if n)
if not name:
return username
return name
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--group', '-g', action='store', dest='group', default=None,
help='Limit to users which are part of the supplied group name'),
make_option('--format', '-f', action='store', dest='format', default=FORMATS[0],
help="output format. May be one of '" + "', '".join(FORMATS) + "'."),
)
help = ("Export user email address list in one of a number of formats.")
args = "[output file]"
label = 'filename to save to'
requires_model_validation = True
can_import_settings = True
encoding = 'utf-8' # RED_FLAG: add as an option -DougN
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("extra arguments supplied")
group = options['group']
if group and not Group.objects.filter(name=group).count() == 1:
names = u"', '".join(g['name'] for g in Group.objects.values('name')).encode('utf-8')
if names:
names = "'" + names + "'."
raise CommandError("Unknown group '" + group + "'. Valid group names are: " + names)
if len(args) and args[0] != '-':
outfile = file(args[0], 'w')
else:
outfile = stdout
qs = User.objects.all().order_by('last_name', 'first_name', 'username', 'email')
if group:
qs = qs.filter(group__name=group).distinct()
qs = qs.values('last_name', 'first_name', 'username', 'email')
getattr(self, options['format'])(qs, outfile)
def address(self, qs, out):
"""simple single entry per line in the format of:
"full name" <[email protected]>;
"""
out.write(u"\n".join(u'"%s" <%s>;' % (full_name(**ent), ent['email'])
for ent in qs).encode(self.encoding))
out.write("\n")
def google(self, qs, out):
"""CSV format suitable for importing into google GMail
"""
csvf = writer(out)
csvf.writerow(['Name', 'Email'])
for ent in qs:
csvf.writerow([full_name(**ent).encode(self.encoding),
ent['email'].encode(self.encoding)])
def outlook(self, qs, out):
"""CSV format suitable for importing into outlook
"""
csvf = writer(out)
columns = ['Name', 'E-mail Address', 'Notes', 'E-mail 2 Address', 'E-mail 3 Address',
'Mobile Phone', 'Pager', 'Company', 'Job Title', 'Home Phone', 'Home Phone 2',
'Home Fax', 'Home Address', 'Business Phone', 'Business Phone 2',
'Business Fax', 'Business Address', 'Other Phone', 'Other Fax', 'Other Address']
csvf.writerow(columns)
empty = [''] * (len(columns) - 2)
for ent in qs:
csvf.writerow([full_name(**ent).encode(self.encoding),
ent['email'].encode(self.encoding)] + empty)
def linkedin(self, qs, out):
"""CSV format suitable for importing into linkedin Groups.
perfect for pre-approving members of a linkedin group.
"""
csvf = writer(out)
csvf.writerow(['First Name', 'Last Name', 'Email'])
for ent in qs:
csvf.writerow([ent['first_name'].encode(self.encoding),
ent['last_name'].encode(self.encoding),
ent['email'].encode(self.encoding)])
def vcard(self, qs, out):
try:
import vobject
except ImportError:
print self.style.ERROR("Please install python-vobject to use the vcard export format.")
import sys
sys.exit(1)
for ent in qs:
card = vobject.vCard()
card.add('fn').value = full_name(**ent)
if not ent['last_name'] and not ent['first_name']:
# fallback to fullname, if both first and lastname are not declared
card.add('n').value = vobject.vcard.Name(full_name(**ent))
else:
card.add('n').value = vobject.vcard.Name(ent['last_name'], ent['first_name'])
emailpart = card.add('email')
emailpart.value = ent['email']
emailpart.type_param = 'INTERNET'
out.write(card.serialize().encode(self.encoding))
| agpl-3.0 |
jainanisha90/WeVoteServer | support_oppose_deciding/views.py | 1 | 6621 | # support_oppose_deciding/views.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.http import JsonResponse
from django_user_agents.utils import get_user_agent
from position.models import PositionManager
from voter.models import fetch_voter_id_from_voter_device_link
import wevote_functions.admin
from wevote_functions.functions import get_voter_api_device_id
logger = wevote_functions.admin.get_logger(__name__)
def voter_supporting_candidate_campaign_view(request, candidate_campaign_id):
logger.debug("voter_supporting_candidate_campaign_view {candidate_campaign_id}".format(
candidate_campaign_id=candidate_campaign_id
))
user_agent_string = request.META['HTTP_USER_AGENT']
user_agent_object = get_user_agent(request)
voter_api_device_id = get_voter_api_device_id(request)
voter_id = fetch_voter_id_from_voter_device_link(voter_api_device_id)
position_manager = PositionManager()
results = position_manager.toggle_on_voter_support_for_candidate_campaign(voter_id, candidate_campaign_id,
user_agent_string, user_agent_object)
if results['success']:
return JsonResponse({0: "success"})
else:
return JsonResponse({0: "failure"})
def voter_stop_supporting_candidate_campaign_view(request, candidate_campaign_id):
logger.debug("voter_stop_supporting_candidate_campaign_view {candidate_campaign_id}".format(
candidate_campaign_id=candidate_campaign_id
))
user_agent_string = request.META['HTTP_USER_AGENT']
user_agent_object = get_user_agent(request)
voter_api_device_id = get_voter_api_device_id(request)
voter_id = fetch_voter_id_from_voter_device_link(voter_api_device_id)
position_manager = PositionManager()
results = position_manager.toggle_off_voter_support_for_candidate_campaign(voter_id, candidate_campaign_id,
user_agent_string, user_agent_object)
if results['success']:
return JsonResponse({0: "success"})
else:
return JsonResponse({0: "failure"})
def voter_opposing_candidate_campaign_view(request, candidate_campaign_id):
logger.debug("voter_opposing_candidate_campaign_view {candidate_campaign_id}".format(
candidate_campaign_id=candidate_campaign_id
))
user_agent_string = request.META['HTTP_USER_AGENT']
user_agent_object = get_user_agent(request)
voter_api_device_id = get_voter_api_device_id(request)
voter_id = fetch_voter_id_from_voter_device_link(voter_api_device_id)
position_manager = PositionManager()
results = position_manager.toggle_on_voter_oppose_for_candidate_campaign(voter_id, candidate_campaign_id,
user_agent_string, user_agent_object)
if results['success']:
return JsonResponse({0: "success"})
else:
return JsonResponse({0: "failure"})
def voter_stop_opposing_candidate_campaign_view(request, candidate_campaign_id):
logger.debug("voter_stop_opposing_candidate_campaign_view {candidate_campaign_id}".format(
candidate_campaign_id=candidate_campaign_id
))
user_agent_string = request.META['HTTP_USER_AGENT']
user_agent_object = get_user_agent(request)
voter_api_device_id = get_voter_api_device_id(request)
voter_id = fetch_voter_id_from_voter_device_link(voter_api_device_id)
position_manager = PositionManager()
results = position_manager.toggle_off_voter_oppose_for_candidate_campaign(voter_id, candidate_campaign_id,
user_agent_string, user_agent_object)
if results['success']:
return JsonResponse({0: "success"})
else:
return JsonResponse({0: "failure"})
def voter_asking_candidate_campaign_view(request, candidate_campaign_id):
logger.debug("voter_asking_candidate_campaign_view {candidate_campaign_id}".format(
candidate_campaign_id=candidate_campaign_id
))
voter_api_device_id = get_voter_api_device_id(request)
voter_id = fetch_voter_id_from_voter_device_link(voter_api_device_id)
logger.debug("voter_asking_candidate_campaign_view NOT BUILT YET, voter_id: {voter_id}".format(
voter_id=voter_id
))
return JsonResponse({0: "not working yet - needs to be built"})
def voter_stop_asking_candidate_campaign_view(request, candidate_campaign_id):
logger.debug("voter_stop_asking_candidate_campaign_view {candidate_campaign_id}".format(
candidate_campaign_id=candidate_campaign_id
))
voter_api_device_id = get_voter_api_device_id(request)
voter_id = fetch_voter_id_from_voter_device_link(voter_api_device_id)
logger.debug("voter_stop_asking_candidate_campaign_view NOT BUILT YET, voter_id: {voter_id}".format(
voter_id=voter_id
))
return JsonResponse({0: "not working yet - needs to be built"})
def voter_stance_for_candidate_campaign_view(request, candidate_campaign_id):
logger.debug("voter_stance_for_candidate_campaign_view {candidate_campaign_id}".format(
candidate_campaign_id=candidate_campaign_id
))
voter_api_device_id = get_voter_api_device_id(request)
voter_id = fetch_voter_id_from_voter_device_link(voter_api_device_id)
position_manager = PositionManager()
results = position_manager.retrieve_voter_candidate_campaign_position(voter_id, candidate_campaign_id)
if results['position_found']:
if results['is_support']:
return JsonResponse({0: "support"})
elif results['is_oppose']:
return JsonResponse({0: "oppose"})
elif results['is_no_stance']:
return JsonResponse({0: "no_stance"})
elif results['is_information_only']:
return JsonResponse({0: "information_only"})
elif results['is_still_deciding']:
return JsonResponse({0: "still_deciding"})
return JsonResponse({0: "failure"})
def voter_stance_for_contest_measure_view(request, contest_measure_id):
logger.debug("voter_stance_for_contest_measure_view {contest_measure_id}".format(
contest_measure_id=contest_measure_id
))
voter_api_device_id = get_voter_api_device_id(request)
voter_id = fetch_voter_id_from_voter_device_link(voter_api_device_id)
logger.debug("voter_stance_for_contest_measure_view NOT BUILT YET, voter_id: {voter_id}".format(
voter_id=voter_id
))
return JsonResponse({0: "not working yet - needs to be built"})
| mit |
pbrazdil/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py | 127 | 4570 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Starts a local HTTP server which displays layout test failures (given a test
results directory), provides comparisons of expected and actual results (both
images and text) and allows one-click rebaselining of tests."""
from webkitpy.common import system
from webkitpy.common.net.resultsjsonparser import for_each_test, JSONTestResult
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.tool.commands.abstractlocalservercommand import AbstractLocalServerCommand
from webkitpy.tool.servers.rebaselineserver import get_test_baselines, RebaselineHTTPServer, STATE_NEEDS_REBASELINE
class TestConfig(object):
def __init__(self, test_port, layout_tests_directory, results_directory, platforms, filesystem, scm):
self.test_port = test_port
self.layout_tests_directory = layout_tests_directory
self.results_directory = results_directory
self.platforms = platforms
self.filesystem = filesystem
self.scm = scm
class RebaselineServer(AbstractLocalServerCommand):
name = "rebaseline-server"
help_text = __doc__
argument_names = "/path/to/results/directory"
server = RebaselineHTTPServer
def _gather_baselines(self, results_json):
# Rebaseline server and it's associated JavaScript expected the tests subtree to
# be key-value pairs instead of hierarchical.
# FIXME: make the rebaseline server use the hierarchical tree.
new_tests_subtree = {}
def gather_baselines_for_test(test_name, result_dict):
result = JSONTestResult(test_name, result_dict)
if result.did_pass_or_run_as_expected():
return
result_dict['state'] = STATE_NEEDS_REBASELINE
result_dict['baselines'] = get_test_baselines(test_name, self._test_config)
new_tests_subtree[test_name] = result_dict
for_each_test(results_json['tests'], gather_baselines_for_test)
results_json['tests'] = new_tests_subtree
def _prepare_config(self, options, args, tool):
results_directory = args[0]
filesystem = system.filesystem.FileSystem()
scm = self._tool.scm()
print 'Parsing full_results.json...'
results_json_path = filesystem.join(results_directory, 'full_results.json')
results_json = json_results_generator.load_json(filesystem, results_json_path)
port = tool.port_factory.get()
layout_tests_directory = port.layout_tests_dir()
platforms = filesystem.listdir(filesystem.join(layout_tests_directory, 'platform'))
self._test_config = TestConfig(port, layout_tests_directory, results_directory, platforms, filesystem, scm)
print 'Gathering current baselines...'
self._gather_baselines(results_json)
return {
'test_config': self._test_config,
"results_json": results_json,
"platforms_json": {
'platforms': platforms,
'defaultPlatform': port.name(),
},
}
| bsd-3-clause |
olivierdalang/stdm | data/pg_utils.py | 1 | 14720 | """
/***************************************************************************
Name : PostgreSQL/PostGIS util functions
Description : Contains generic util functions for accessing the
PostgreSQL/PostGIS STDM database.
Date : 1/April/2014
copyright : (C) 2014 by John Gitau
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import (
QFile,
QIODevice,
QRegExp,
QTextStream
)
from qgis.core import *
from sqlalchemy.sql.expression import text
import stdm.data
from stdm.data import STDMDb, Base
from stdm.utils import (
getIndex,
PLUGIN_DIR
)
_postGISTables = ["spatial_ref_sys","str_relations","supporting_document"]
_postGISViews = ["geometry_columns","raster_columns","geography_columns","raster_overviews","foreign_key_references"]
_pg_numeric_col_types = ["smallint","integer","bigint","double precision",
"numeric","decimal","real","smallserial","serial",
"bigserial"]
_text_col_types = ["character varying", "text"]
#Flags for specifying data source type
VIEWS = 2500
TABLES = 2501
def spatial_tables(exclude_views=False):
"""
Returns a list of spatial table names in the STDM database.
"""
t = text("select DISTINCT f_table_name from geometry_columns")
result = _execute(t)
spTables = []
views = pg_views()
for r in result:
spTable = r["f_table_name"]
if exclude_views:
tableIndex = getIndex(views,spTable)
if tableIndex == -1:
spTables.append(spTable)
else:
spTables.append(spTable)
return spTables
def pg_tables(schema="public", exclude_lookups=False):
"""
Returns all the tables in the given schema minus the default PostGIS tables.
Views are also excluded. See separate function for retrieving views.
"""
t = text("SELECT table_name FROM information_schema.tables WHERE table_schema = :tschema and table_type = :tbtype " \
"ORDER BY table_name ASC")
result = _execute(t,tschema = schema,tbtype = "BASE TABLE")
pgTables = []
for r in result:
tableName = r["table_name"]
#Remove default PostGIS tables
tableIndex = getIndex(_postGISTables, tableName)
if tableIndex == -1:
if exclude_lookups:
#Validate if table is a lookup table and if it is, then omit
rx = QRegExp("check_*")
rx.setPatternSyntax(QRegExp.Wildcard)
if not rx.exactMatch(tableName):
pgTables.append(tableName)
else:
pgTables.append(tableName)
return pgTables
def pg_views(schema="public"):
"""
Returns the views in the given schema minus the default PostGIS views.
"""
t = text("SELECT table_name FROM information_schema.tables WHERE table_schema = :tschema and table_type = :tbtype " \
"ORDER BY table_name ASC")
result = _execute(t,tschema = schema,tbtype = "VIEW")
pgViews = []
for r in result:
viewName = r["table_name"]
#Remove default PostGIS tables
viewIndex = getIndex(_postGISViews, viewName)
if viewIndex == -1:
pgViews.append(viewName)
return pgViews
def pg_table_exists(table_name, include_views=True, schema="public"):
"""
Checks whether the given table name exists in the current database
connection.
:param table_name: Name of the table or view. If include_views is False
the result will always be False since views have been excluded from the
search.
:type table_name: str
:param include_views: True if view names will be also be included in the
search.
:type include_views: bool
:param schema: Schema to search against. Default is "public" schema.
:type schema: str
:return: True if the table or view (if include_views is True) exists in
currently connected database.
:rtype: bool
"""
tables = pg_tables(schema=schema)
if include_views:
tables.extend(pg_views(schema=schema))
if getIndex(tables, table_name) == -1:
return False
else:
return True
def process_report_filter(tableName,columns,whereStr="",sortStmnt=""):
#Process the report builder filter
sql = "SELECT {0} FROM {1}".format(columns,tableName)
if whereStr != "":
sql += " WHERE {0} ".format(whereStr)
if sortStmnt !="":
sql += sortStmnt
t = text(sql)
return _execute(t)
def table_column_names(tableName, spatialColumns=False):
"""
Returns the column names of the given table name.
If 'spatialColumns' then the function will lookup for spatial columns in the given
table or view.
"""
if spatialColumns:
sql = "select f_geometry_column from geometry_columns where f_table_name = :tbname ORDER BY f_geometry_column ASC"
columnName = "f_geometry_column"
else:
sql = "select column_name from information_schema.columns where table_name = :tbname ORDER BY column_name ASC"
columnName = "column_name"
t = text(sql)
result = _execute(t,tbname = tableName)
columnNames = []
for r in result:
colName = r[columnName]
columnNames.append(colName)
return columnNames
def non_spatial_table_columns(table):
"""
Returns non spatial table columns
Uses list comprehension
"""
all_columns = table_column_names(table)
excluded_columns = [u'id']
spatial_columns = table_column_names(table, True) + excluded_columns
return [x for x in all_columns if x not in spatial_columns]
def delete_table_data(tableName,cascade = True):
"""
Delete all the rows in the target table.
"""
tables = pg_tables()
tableIndex = getIndex(tables, tableName)
if tableIndex != -1:
sql = "DELETE FROM {0}".format(tableName)
if cascade:
sql += " CASCADE"
t = text(sql)
_execute(t)
def geometryType(tableName, spatialColumnName, schemaName = "public"):
"""
Returns a tuple of geometry type and EPSG code of the given column name in the table within the given schema.
"""
sql = "select type,srid from geometry_columns where f_table_name = :tbname and f_geometry_column = :spcolumn and f_table_schema = :tbschema"
t = text(sql)
result = _execute(t,tbname = tableName,spcolumn=spatialColumnName,tbschema=schemaName)
geomType,epsg_code = "", -1
for r in result:
geomType = r["type"]
epsg_code = r["srid"]
break
return (geomType,epsg_code)
def unique_column_values(tableName,columnName,quoteDataTypes=["character varying"]):
"""
Select unique row values in the specified column.
Specify the data types of row values which need to be quoted. Default is varchar.
"""
dataType = columnType(tableName,columnName)
quoteRequired = getIndex(quoteDataTypes, dataType)
sql = "SELECT DISTINCT {0} FROM {1}".format(columnName,tableName)
t = text(sql)
result = _execute(t)
uniqueVals = []
for r in result:
if r[columnName] == None:
if quoteRequired == -1:
uniqueVals.append("NULL")
else:
uniqueVals.append("''")
else:
if quoteRequired == -1:
uniqueVals.append(str(r[columnName]))
else:
uniqueVals.append("'{0}'".format(str(r[columnName])))
return uniqueVals
def columnType(tableName,columnName):
"""
Returns the PostgreSQL data type of the specified column.
"""
sql = "SELECT data_type FROM information_schema.columns where table_name=:tbName AND column_name=:colName"
t = text(sql)
result = _execute(t,tbName = tableName,colName = columnName)
dataType = ""
for r in result:
dataType = r["data_type"]
break
return dataType
def columns_by_type(table, data_types):
"""
:param table: Name of the database table.
:type table: str
:param data_types: List containing matching datatypes that should be
retrieved from the table.
:type data_types: list
:return: Returns those columns of given types from the specified
database table.
:rtype: list
"""
cols = []
table_cols = table_column_names(table)
for tc in table_cols:
col_type = columnType(table, tc)
type_idx = getIndex(data_types, col_type)
if type_idx != -1:
cols.append(tc)
return cols
def numeric_columns(table):
"""
:param table: Name of the database table.
:type table: str
:return: Returns a list of columns that are of number type such as
integer, decimal, double etc.
:rtype: list
"""
return columns_by_type(table, _pg_numeric_col_types)
def numeric_varchar_columns(table, exclude_fk_columns=True):
#Combines numeric and text column types mostly used for display columns
num_char_types = _pg_numeric_col_types + _text_col_types
num_char_cols = columns_by_type(table, num_char_types)
if exclude_fk_columns:
fk_refs = foreign_key_parent_tables(table)
for fk in fk_refs:
local_col = fk[0]
col_idx = getIndex(num_char_cols, local_col)
if col_idx != -1:
num_char_cols.remove(local_col)
return num_char_cols
else:
return num_char_cols
def qgsgeometry_from_wkbelement(wkb_element):
"""
Convert a geoalchemy object in str or WKBElement format to the a
QgsGeometry object.
:return: QGIS Geometry object.
"""
if isinstance(wkb_element, WKBElement):
db_session = STDMDb.instance().session
geom_wkt = db_session.scalar(wkb_element.ST_AsText())
elif isinstance(wkb_element, str):
split_geom = wkb_element.split(";")
if len(split_geom) < 2:
return None
geom_wkt = split_geom[1]
return QgsGeometry.fromWkt(geom_wkt)
def _execute(sql,**kwargs):
"""
Execute the passed in sql statement
"""
conn = STDMDb.instance().engine.connect()
result = conn.execute(sql,**kwargs)
conn.close()
return result
def reset_content_roles():
rolesSet = "truncate table content_base cascade;"
_execute(text(rolesSet))
resetSql = text(rolesSet)
_execute(resetSql)
def delete_table_keys(table):
#clean_delete_table(table)
capabilities = ["Create", "Select", "Update", "Delete"]
for action in capabilities:
init_key = action +" "+ str(table).title()
sql = "DELETE FROM content_roles WHERE content_base_id IN" \
" (SELECT id FROM content_base WHERE name = '{0}');".format(init_key)
sql2 = "DELETE FROM content_base WHERE content_base.id IN" \
" (SELECT id FROM content_base WHERE name = '{0}');".format(init_key)
r = text(sql)
r2 = text(sql2)
_execute(r)
_execute(r2)
Base.metadata._remove_table(table, 'public')
def safely_delete_tables(tables):
for table in tables:
sql = "DROP TABLE if exists {0} CASCADE".format(table)
_execute(text(sql))
Base.metadata._remove_table(table, 'public')
flush_session_activity()
def flush_session_activity():
STDMDb.instance().session._autoflush()
def vector_layer(table_name, sql="", key="id", geom_column=""):
"""
Returns a QgsVectorLayer based on the specified table name.
"""
if not table_name:
return None
conn = stdm.data.app_dbconn
if conn is None:
return None
if not geom_column:
geom_column = None
ds_uri = conn.toQgsDataSourceUri()
ds_uri.setDataSource("public", table_name, geom_column, sql, key)
v_layer = QgsVectorLayer(ds_uri.uri(), table_name, "postgres")
return v_layer
def foreign_key_parent_tables(table_name):
"""
Functions that searches for foreign key references in the specified table.
:param table_name: Name of the database table.
:type table_name: str
:return: A list of tuples containing the local column name, foreign table
name and corresponding foreign column name.
:rtype: list
"""
#Check if the view for listing foreign key references exists
fk_ref_view = pg_table_exists("foreign_key_references")
#Create if it does not exist
if not fk_ref_view:
script_path = PLUGIN_DIR + "/scripts/foreign_key_references.sql"
script_file = QFile(script_path)
if script_file.exists():
if not script_file.open(QIODevice.ReadOnly):
return None
reader = QTextStream(script_file)
sql = reader.readAll()
if sql:
t = text(sql)
_execute(t)
else:
return None
#Fetch foreign key references
sql = "SELECT column_name,foreign_table_name,foreign_column_name FROM " \
"foreign_key_references where table_name=:tb_name"
t = text(sql)
result = _execute(t, tb_name=table_name)
fk_refs = []
for r in result:
fk_ref = r["column_name"], r["foreign_table_name"],\
r["foreign_column_name"]
fk_refs.append(fk_ref)
return fk_refs
| gpl-2.0 |
auduny/home-assistant | tests/helpers/test_intent.py | 8 | 1499 | """Tests for the intent helpers."""
import unittest
import voluptuous as vol
from homeassistant.core import State
from homeassistant.helpers import (intent, config_validation as cv)
import pytest
class MockIntentHandler(intent.IntentHandler):
"""Provide a mock intent handler."""
def __init__(self, slot_schema):
"""Initialize the mock handler."""
self.slot_schema = slot_schema
def test_async_match_state():
"""Test async_match_state helper."""
state1 = State('light.kitchen', 'on')
state2 = State('switch.kitchen', 'on')
state = intent.async_match_state(None, 'kitch', [state1, state2])
assert state is state1
class TestIntentHandler(unittest.TestCase):
"""Test the Home Assistant event helpers."""
def test_async_validate_slots(self):
"""Test async_validate_slots of IntentHandler."""
handler1 = MockIntentHandler({
vol.Required('name'): cv.string,
})
with pytest.raises(vol.error.MultipleInvalid):
handler1.async_validate_slots({})
with pytest.raises(vol.error.MultipleInvalid):
handler1.async_validate_slots({'name': 1})
with pytest.raises(vol.error.MultipleInvalid):
handler1.async_validate_slots({'name': 'kitchen'})
handler1.async_validate_slots({'name': {'value': 'kitchen'}})
handler1.async_validate_slots({
'name': {'value': 'kitchen'},
'probability': {'value': '0.5'}
})
| apache-2.0 |
Amechi101/concepteur-market-app | venv/lib/python2.7/site-packages/django/core/servers/basehttp.py | 62 | 6111 | """
HTTP server that implements the Python WSGI protocol (PEP 333, rev 1.21).
Based on wsgiref.simple_server which is part of the standard library since 2.5.
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. DON'T USE IT FOR PRODUCTION USE!
"""
from __future__ import unicode_literals
from io import BytesIO
import socket
import sys
import traceback
from wsgiref import simple_server
from wsgiref.util import FileWrapper # for backwards compatibility
from django.core.management.color import color_style
from django.core.wsgi import get_wsgi_application
from django.utils.module_loading import import_by_path
from django.utils import six
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.six.moves import socketserver
__all__ = ('WSGIServer', 'WSGIRequestHandler', 'MAX_SOCKET_CHUNK_SIZE')
# If data is too large, socket will choke, so write chunks no larger than 32MB
# at a time. The rationale behind the 32MB can be found on Django's Trac:
# https://code.djangoproject.com/ticket/5596#comment:4
MAX_SOCKET_CHUNK_SIZE = 32 * 1024 * 1024 # 32 MB
def get_internal_wsgi_application():
"""
Loads and returns the WSGI application as configured by the user in
``settings.WSGI_APPLICATION``. With the default ``startproject`` layout,
this will be the ``application`` object in ``projectname/wsgi.py``.
This function, and the ``WSGI_APPLICATION`` setting itself, are only useful
for Django's internal servers (runserver, runfcgi); external WSGI servers
should just be configured to point to the correct application object
directly.
If settings.WSGI_APPLICATION is not set (is ``None``), we just return
whatever ``django.core.wsgi.get_wsgi_application`` returns.
"""
from django.conf import settings
app_path = getattr(settings, 'WSGI_APPLICATION')
if app_path is None:
return get_wsgi_application()
return import_by_path(
app_path,
error_prefix="WSGI application '%s' could not be loaded; " % app_path
)
class ServerHandler(simple_server.ServerHandler, object):
error_status = str("500 INTERNAL SERVER ERROR")
def write(self, data):
"""'write()' callable as specified by PEP 3333"""
assert isinstance(data, bytes), "write() argument must be bytestring"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
data = BytesIO(data)
for chunk in iter(lambda: data.read(MAX_SOCKET_CHUNK_SIZE), b''):
self._write(chunk)
self._flush()
def error_output(self, environ, start_response):
super(ServerHandler, self).error_output(environ, start_response)
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Backport of http://hg.python.org/cpython/rev/d5af1b235dab. See #16241.
# This can be removed when support for Python <= 2.7.3 is deprecated.
def finish_response(self):
try:
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
finally:
self.close()
class WSGIServer(simple_server.WSGIServer, object):
"""BaseHTTPServer that implements the Python WSGI protocol"""
request_queue_size = 10
def __init__(self, *args, **kwargs):
if kwargs.pop('ipv6', False):
self.address_family = socket.AF_INET6
super(WSGIServer, self).__init__(*args, **kwargs)
def server_bind(self):
"""Override server_bind to store the server name."""
super(WSGIServer, self).server_bind()
self.setup_environ()
class WSGIRequestHandler(simple_server.WSGIRequestHandler, object):
def __init__(self, *args, **kwargs):
from django.conf import settings
self.admin_static_prefix = urljoin(settings.STATIC_URL, 'admin/')
# We set self.path to avoid crashes in log_message() on unsupported
# requests (like "OPTIONS").
self.path = ''
self.style = color_style()
super(WSGIRequestHandler, self).__init__(*args, **kwargs)
def address_string(self):
# Short-circuit parent method to not call socket.getfqdn
return self.client_address[0]
def log_message(self, format, *args):
# Don't bother logging requests for admin images or the favicon.
if (self.path.startswith(self.admin_static_prefix)
or self.path == '/favicon.ico'):
return
msg = "[%s] %s\n" % (self.log_date_time_string(), format % args)
# Utilize terminal colors, if available
if args[1][0] == '2':
# Put 2XX first, since it should be the common case
msg = self.style.HTTP_SUCCESS(msg)
elif args[1][0] == '1':
msg = self.style.HTTP_INFO(msg)
elif args[1] == '304':
msg = self.style.HTTP_NOT_MODIFIED(msg)
elif args[1][0] == '3':
msg = self.style.HTTP_REDIRECT(msg)
elif args[1] == '404':
msg = self.style.HTTP_NOT_FOUND(msg)
elif args[1][0] == '4':
msg = self.style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other response
msg = self.style.HTTP_SERVER_ERROR(msg)
sys.stderr.write(msg)
def run(addr, port, wsgi_handler, ipv6=False, threading=False):
server_address = (addr, port)
if threading:
httpd_cls = type(str('WSGIServer'), (socketserver.ThreadingMixIn, WSGIServer), {})
else:
httpd_cls = WSGIServer
httpd = httpd_cls(server_address, WSGIRequestHandler, ipv6=ipv6)
httpd.set_app(wsgi_handler)
httpd.serve_forever()
| mit |
fabiking/plugin.video.Mfabiking | resources/tools/resolvers.py | 1 | 77059 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# PalcoTV ([email protected])
# Conectores multimedia para PalcoTV
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a las librerías de pelisalacarta de Jesús (www.mimediacenter.info)
#------------------------------------------------------------
import os
import sys
import urllib
import urllib2
import re
import string
import shutil
import zipfile
import time
import urlparse
import random
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import scrapertools, plugintools, unwise, unpackerjs, requests, jsunpack, base64, json
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
from __main__ import *
art = addonPath + "/art/"
def allmyvideos(params):
plugintools.log('[%s %s] Allmyvideos %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
url_fixed = page_url.split("/")
url_fixed = 'http://www.allmyvideos.net/' + 'embed-' + url_fixed[3] + '.html'
plugintools.log("url_fixed= "+url_fixed)
# Leemos el código web
headers = {'user-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'}
r = requests.get(page_url, headers=headers)
data = r.text
if "<b>File Not Found</b>" in data or "<b>Archivo no encontrado</b>" in data or '<b class="err">Deleted' in data or '<b class="err">Removed' in data or '<font class="err">No such' in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Archivo borrado!", 3 , art+'icon.png'))
else:
# Normaliza la URL
videoid = page_url.replace("http://allmyvideos.net/","").replace("https://allmyvideos.net/","").strip()
page_url = "http://allmyvideos.net/embed-"+videoid+"-728x400.html"
#data = scrapertools.cache_page(page_url)
r = requests.get(page_url, headers=headers)
data = r.text
if "File was banned" in data:
#data = scrapertools.cache_page(page_url,post="op=download1&usr_login=&id="+videoid+"&fname=&referer=&method_free=1&x=147&y=25")
payload = {'op': 'download1', 'usr_login': '', 'id': videoid, 'fname': '', 'referer': '', 'method_free': '1', 'x': '147', 'y': '25'}
r = requests.get(page_url, params=payload)
data = r.text
# Extrae la URL
match = re.compile('"file" : "(.+?)",').findall(data)
media_url = ""
if len(match) > 0:
for tempurl in match:
if not tempurl.endswith(".png") and not tempurl.endswith(".srt"):
media_url = tempurl
print media_url
if media_url == "":
media_url = match[0]
print media_url
if media_url!="":
media_url+= "&direct=false"
plugintools.log("media_url= "+media_url)
plugintools.play_resolved_url(media_url)
def streamcloud(params):
plugintools.log('[%s %s]Streamcloud %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
plugintools.log("data= "+body)
# Barra de progreso para la espera de 10 segundos
progreso = xbmcgui.DialogProgress()
progreso.create("PalcoTV", "Abriendo Streamcloud..." , url )
i = 13000
j = 0
percent = 0
while j <= 13000 :
percent = ((j + ( 13000 / 10.0 )) / i)*100
xbmc.sleep(i/10) # 10% = 1,3 segundos
j = j + ( 13000 / 10.0 )
msg = "Espera unos segundos, por favor... "
percent = int(round(percent))
print percent
progreso.update(percent, "" , msg, "")
progreso.close()
media_url = plugintools.find_single_match(body , 'file\: "([^"]+)"')
if media_url == "":
op = plugintools.find_single_match(body,'<input type="hidden" name="op" value="([^"]+)"')
usr_login = ""
id = plugintools.find_single_match(body,'<input type="hidden" name="id" value="([^"]+)"')
fname = plugintools.find_single_match(body,'<input type="hidden" name="fname" value="([^"]+)"')
referer = plugintools.find_single_match(body,'<input type="hidden" name="referer" value="([^"]*)"')
hashstring = plugintools.find_single_match(body,'<input type="hidden" name="hash" value="([^"]*)"')
imhuman = plugintools.find_single_match(body,'<input type="submit" name="imhuman".*?value="([^"]+)">').replace(" ","+")
post = "op="+op+"&usr_login="+usr_login+"&id="+id+"&fname="+fname+"&referer="+referer+"&hash="+hashstring+"&imhuman="+imhuman
request_headers.append(["Referer",url])
body,response_headers = plugintools.read_body_and_headers(url, post=post, headers=request_headers)
plugintools.log("data= "+body)
# Extrae la URL
media_url = plugintools.find_single_match( body , 'file\: "([^"]+)"' )
plugintools.log("media_url= "+media_url)
plugintools.play_resolved_url(media_url)
if 'id="justanotice"' in body:
plugintools.log("[streamcloud.py] data="+body)
plugintools.log("[streamcloud.py] Ha saltado el detector de adblock")
return -1
def playedto(params):
plugintools.log('[%s %s] Played.to %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
url = url.split("/")
url_fixed = "http://played.to/embed-" + url[3] + "-640x360.html"
plugintools.log("url_fixed= "+url_fixed)
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
body,response_headers = plugintools.read_body_and_headers(url_fixed, headers=request_headers)
body = body.strip()
if body == "<center>This video has been deleted. We apologize for the inconvenience.</center>":
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Enlace borrado...", 3 , art+'icon.png'))
elif body.find("Removed for copyright infringement") >= 0:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Removed for copyright infringement", 3 , art+'icon.png'))
else:
r = re.findall('file(.+?)\n', body)
for entry in r:
entry = entry.replace('",', "")
entry = entry.replace('"', "")
entry = entry.replace(': ', "")
entry = entry.strip()
plugintools.log("vamos= "+entry)
if entry.endswith("flv"):
plugintools.play_resolved_url(entry)
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Resolviendo enlace...", 3 , art+'icon.png'))
params["url"]=entry
plugintools.log("URL= "+entry)
def vidspot(params):
plugintools.log('[%s %s] Vidspot %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
url = url.split("/")
url_fixed = 'http://www.vidspot.net/' + 'embed-' + url[3] + '.html'
plugintools.log("url_fixed= "+url_fixed)
# Leemos el código web
headers = {'user-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'}
r = requests.get(url_fixed, headers=headers)
body = r.text
try:
if body.find("File was deleted") >= 0:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Archivo borrado", 3 , art+'icon.png'))
else:
r = re.findall('"file" : "(.+?)"', body)
for entry in r:
plugintools.log("vamos= "+entry)
if entry.endswith("mp4?v2"):
url = entry + '&direct=false'
params["url"]=url
plugintools.log("url= "+url)
plugintools.play_resolved_url(url)
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Resolviendo enlace...", 3 , art+'icon.png'))
except:
pass
def vk(params):
plugintools.log('[%s %s] Vk %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
#url = url.replace('http://', 'https://')
#data = data.replace("amp;", "")
#data = scrapertools.cache_page(page_url)
data = plugintools.read(page_url)
plugintools.log("data= "+data)
if "This video has been removed from public access" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Archivo borrado!", 3 , art+'icon.png'))
else:
#data = scrapertools.cache_page(page_url.replace("amp;",""))
data = plugintools.read(page_url.replace("amp;",""))
plugintools.log("data= "+data)
videourl = ""
match = plugintools.find_single_match(data, r'vkid=([^\&]+)\&')
print match
vkid = ""
# Lee la página y extrae el ID del vídeo
data2 = data.replace("\\","")
patron = '"vkid":"([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data2)
if len(matches)>0:
vkid = matches[0]
else:
plugintools.log("No encontró vkid")
plugintools.log("vkid="+vkid)
# Extrae los parámetros del vídeo y añade las calidades a la lista
patron = "var video_host = '([^']+)'.*?"
patron += "var video_uid = '([^']+)'.*?"
patron += "var video_vtag = '([^']+)'.*?"
patron += "var video_no_flv = ([^;]+);.*?"
patron += "var video_max_hd = '([^']+)'"
matches = re.compile(patron,re.DOTALL).findall(data)
print matches
if len(matches)>0:
#01:44:52 T:2957156352 NOTICE: video_host=http://cs509601.vk.com/, video_uid=149623387, video_vtag=1108941f4c, video_no_flv=1, video_max_hd=1
video_host = matches[0][0]
video_uid = matches[0][1]
video_vtag = matches[0][2]
video_no_flv = matches[0][3]
video_max_hd = matches[0][4]
else:
#{"uid":"97482389","vid":"161509127\",\"oid\":\"97482389\","host":"507214",\"vtag\":\"99bca9d028\",\"ltag\":\"l_26f55018\",\"vkid\":\"161509127\",\"md_title\":\"El Libro de La Selva - 1967 - tetelx - spanish\",\"md_author\":\"Tetelx Tete\",\"hd\":1,\"no_flv\":1,\"hd_def\":-1,\"dbg_on\":0,\"t\":\"\",\"thumb\":\"http:\\\/\\\/cs507214.vkontakte.ru\\\/u97482389\\\/video\\\/l_26f55018.jpg\",\"hash\":\"3a576695e9f0bfe3093eb21239bd322f\",\"hash2\":\"be750b8971933dd6\",\"is_vk\":\"1\",\"is_ext\":\"0\",\"lang_add\":\"Add to My Videos\",\"lang_share\":\"Share\",\"lang_like\":\"Like\",\"lang_volume_on\":\"Unmute\",\"lang_volume_off\":\"Mute\",\"lang_volume\":\"Volume\",\"lang_hdsd\":\"Change Video Quality\",\"lang_fullscreen\":\"Full Screen\",\"lang_window\":\"Minimize\",\"lang_rotate\":\"Rotate\",\"video_play_hd\":\"Watch in HD\",\"video_stop_loading\":\"Stop Download\",\"video_player_version\":\"VK Video Player\",\"video_player_author\":\"Author - Alexey Kharkov\",\"goto_orig_video\":\"Go to Video\",\"video_get_video_code\":\"Copy vdeo code\",\"video_load_error\":\"The video has not uploaded yet or the server is not available\",\"video_get_current_url\":\"Copy frame link\",\"nologo\":1,\"liked\":0,\"add_hash\":\"67cd39a080ad6e0ad7\",\"added\":1,\"use_p2p\":0,\"p2p_group_id\":\"fb2d8cfdcbea4f3c\"}
#01:46:05 T:2955558912 NOTICE: video_host=507214, video_uid=97482389, video_vtag=99bca9d028, video_no_flv=1, video_max_hd=1
data2 = data.replace("\\","")
video_host = scrapertools.get_match(data2,'"host":"([^"]+)"')
video_uid = scrapertools.get_match(data2,'"uid":"([^"]+)"')
video_vtag = scrapertools.get_match(data2,'"vtag":"([^"]+)"')
video_no_flv = scrapertools.get_match(data2,'"no_flv":([0-9]+)')
video_max_hd = scrapertools.get_match(data2,'"hd":([0-9]+)')
if not video_host.startswith("http://"):
video_host = "http://cs"+video_host+".vk.com/"
plugintools.log("video_host="+video_host+", video_uid="+video_uid+", video_vtag="+video_vtag+", video_no_flv="+video_no_flv+", video_max_hd="+video_max_hd)
video_urls = []
if video_no_flv.strip() == "0" and video_uid != "0":
tipo = "flv"
if "http://" in video_host:
videourl = "%s/u%s/video/%s.%s" % (video_host,video_uid,video_vtag,tipo)
else:
videourl = "http://%s/u%s/video/%s.%s" % (video_host,video_uid,video_vtag,tipo)
# Lo añade a la lista
video_urls.append( ["FLV [vk]",videourl])
elif video_uid== "0" and vkid != "": #http://447.gt3.vkadre.ru/assets/videos/2638f17ddd39-75081019.vk.flv
tipo = "flv"
if "http://" in video_host:
videourl = "%s/assets/videos/%s%s.vk.%s" % (video_host,video_vtag,vkid,tipo)
else:
videourl = "http://%s/assets/videos/%s%s.vk.%s" % (video_host,video_vtag,vkid,tipo)
# Lo añade a la lista
video_urls.append( ["FLV [vk]",videourl])
else: #http://cs12385.vkontakte.ru/u88260894/video/d09802a95b.360.mp4
#Se reproducirá el stream encontrado de mayor calidad
if video_max_hd=="3":
plugintools.log("Vamos a por el vídeo 720p")
if video_host.endswith("/"):
videourl = "%su%s/videos/%s.%s" % (video_host,video_uid,video_vtag,"720.mp4")
else:
videourl = "%s/u%s/videos/%s.%s" % (video_host,video_uid,video_vtag,"720.mp4")
plugintools.log("videourl= "+videourl)
elif video_max_hd=="2":
plugintools.log("Vamos a por el vídeo 480p")
if video_host.endswith("/"):
videourl = "%su%s/videos/%s.%s" % (video_host,video_uid,video_vtag,"480.mp4")
else:
videourl = "%s/u%s/videos/%s.%s" % (video_host,video_uid,video_vtag,"480.mp4")
plugintools.log("videourl= "+videourl)
elif video_max_hd=="1":
plugintools.log("Vamos a por el vídeo 360p")
if video_host.endswith("/"):
videourl = "%su%s/videos/%s.%s" % (video_host,video_uid,video_vtag,"360.mp4")
else:
videourl = "%s/u%s/videos/%s.%s" % (video_host,video_uid,video_vtag,"360.mp4")
plugintools.log("videourl= "+videourl)
plugintools.play_resolved_url(videourl)
plugintools.log("videourl= "+videourl)
def nowvideo(params):
plugintools.log('[%s %s] Nowvideo %s' % (addonName, addonVersion, repr(params)))
data = plugintools.read(params.get("url"))
#data = data.replace("amp;", "")
if "The file is being converted" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "El archivo está en proceso", 3 , art+'icon.png'))
elif "no longer exists" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "El archivo ha sido borrado", 3 , art+'icon.png'))
else:
#plugintools.log("data= "+data)
domain = plugintools.find_single_match(data, 'flashvars.domain="([^"]+)')
video_id = plugintools.find_single_match(data, 'flashvars.file="([^"]+)')
filekey = plugintools.find_single_match(data, 'flashvars.filekey=([^;]+)')
# En la página nos da el token de esta forma (siendo fkzd el filekey): var fkzd="83.47.1.12-8d68210314d70fb6506817762b0d495e";
token_txt = 'var '+filekey
#plugintools.log("token_txt= "+token_txt)
token = plugintools.find_single_match(data, filekey+'=\"([^"]+)')
token = token.replace(".","%2E").replace("-","%2D")
#plugintools.log("domain= "+domain)
#plugintools.log("video_id= "+video_id)
#plugintools.log("filekey= "+filekey)
#plugintools.log("token= "+token)
if video_id == "":
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Error!", 3 , art+'icon.png'))
else:
#http://www.nowvideo.sx/api/player.api.php?user=undefined&pass=undefined&cid3=undefined&numOfErrors=0&cid2=undefined&key=83%2E47%2E1%2E12%2D8d68210314d70fb6506817762b0d495e&file=b5c8c44fc706f&cid=1
url = 'http://www.nowvideo.sx/api/player.api.php?user=undefined&pass=undefined&cid3=undefined&numOfErrors=0&cid2=undefined&key=' + token + '&file=' + video_id + '&cid=1'
# Vamos a lanzar una petición HTTP de esa URL
referer = 'http://www.nowvideo.sx/video/b5c8c44fc706f'
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
request_headers.append(["Referer",referer])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
# plugintools.log("data= "+body)
# body= url=http://s173.coolcdn.ch/dl/04318aa973a3320b8ced6734f0c20da3/5440513e/ffe369cb0656c0b8de31f6ef353bcff192.flv&title=The.Black.Rider.Revelation.Road.2014.DVDRip.X264.AC3PLAYNOW.mkv%26asdasdas&site_url=http://www.nowvideo.sx/video/b5c8c44fc706f&seekparm=&enablelimit=0
body = body.replace("url=", "")
body = body.split("&")
if len(body) >= 0:
print 'body',body
url = body[0]
plugintools.play_resolved_url(url)
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Cargando vídeo...", 1 , art+'icon.png'))
else:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Error!", 3 , art+'icon.png'))
''' En el navegador...
flashvars.domain="http://www.nowvideo.sx";
flashvars.file="b5c8c44fc706f";
flashvars.filekey=fkzd;
flashvars.advURL="0";
flashvars.autoplay="false";
flashvars.cid="1";
'''
def tumi(params):
plugintools.log('[%s %s] Tumi %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
data = scrapertools.cache_page(page_url)
if "Video is processing now" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "El archivo está en proceso", 3 , art+'icon.png'))
else:
try:
x = scrapertools.find_single_match(data, "\|type\|(.*?)\|file\|").replace("||","|").split("|")
n = scrapertools.find_single_match(data, "//k.j.h.([0-9]+):g/p/v.o")
printf = "http://%s.%s.%s.%s:%s/%s/%s.%s"
if n:
url = printf % (x[3], x[2], x[1], n, x[0], x[8], "v", x[7])
else:
url = printf % (x[4], x[3], x[2], x[1], x[0], x[9], "v", x[8])
except:
url = scrapertools.find_single_match(data, "file:'([^']+)'")
plugintools.log("url_final= "+url)
plugintools.play_resolved_url(url)
def veehd(params):
plugintools.log('[%s %s] VeeHD %s' % (addonName, addonVersion, repr(params)))
uname = plugintools.get_setting("veehd_user")
pword = plugintools.get_setting("veehd_pword")
if uname == '' or pword == '':
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Debes configurar el identificador para Veehd.com", 3 , art+'icon.png'))
return
url = params.get("url")
url_login = 'http://veehd.com/login'
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
request_headers.append(["Referer",url])
post = {'ref': url, 'uname': uname, 'pword': pword, 'submit': 'Login', 'terms': 'on'}
post = urllib.urlencode(post)
body,response_headers = plugintools.read_body_and_headers(url_login, post=post, headers=request_headers, follow_redirects=True)
vpi = plugintools.find_single_match(body, '"/(vpi.+?h=.+?)"')
if not vpi:
if 'type="submit" value="Login" name="submit"' in body:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Error al identificarse en Veehd.com", 3 , art+'icon.png'))
else:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Error buscando el video en Veehd.com", 3 , art+'icon.png'))
return
req = urllib2.Request('http://veehd.com/'+vpi)
for header in request_headers:
req.add_header(header[0], header[1]) # User-Agent
response = urllib2.urlopen(req)
body = response.read()
response.close()
va = plugintools.find_single_match(body, '"/(va/.+?)"')
if va:
req = urllib2.Request('http://veehd.com/'+va)
for header in request_headers:
req.add_header(header[0], header[1]) # User-Agent
urllib2.urlopen(req)
req = urllib2.Request('http://veehd.com/'+vpi)
for header in request_headers:
req.add_header(header[0], header[1]) # User-Agent
response = urllib2.urlopen(req)
body = response.read()
response.close()
video_url = False
if 'application/x-shockwave-flash' in body:
video_url = urllib.unquote(plugintools.find_single_match(body, '"url":"(.+?)"'))
elif 'video/divx' in body:
video_url = urllib.unquote(plugintools.find_single_match(body, 'type="video/divx"\s+src="(.+?)"'))
if not video_url:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Error abriendo el video en Veehd.com", 3 , art+'icon.png'))
return
plugintools.log("video_url= "+video_url)
plugintools.play_resolved_url(video_url)
def turbovideos(params):
url=params['url']
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://turbovideos.net/embed-%s.html' % url
#result = client.request(url)
result = requests.get(url).content
url = re.compile('file *: *"(.+?)"').findall(result)
if len(url) > 0: plugintools.play_resolved_url(url[0])
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
result = unpack(result)
#url = client.parseDOM(result, 'embed', ret='src')
url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
url = [i for i in url if not i.endswith('.srt')]
url = url[0]
plugintools.play_resolved_url(url)
except:
return
#------------------------------------------------------------
# UNPACK creado por Quequino (gracias amigo!)
#------------------------------------------------------------
def unpack(sJavascript,iteration=1, totaliterations=1 ):
aSplit = sJavascript.split("rn p}('")
p1,a1,c1,k1=('','0','0','')
ss="p1,a1,c1,k1=(\'"+aSplit[1].split(".spli")[0]+')';exec(ss)
k1=k1.split('|')
aSplit = aSplit[1].split("))'")
e = '';d = ''
sUnpacked1 = str(__unpack(p1, a1, c1, k1, e, d,iteration))
if iteration>=totaliterations: return sUnpacked1
else: return unpack(sUnpacked1,iteration+1)
def unpack(sJavascript,iteration=1, totaliterations=1 ):
aSplit = sJavascript.split("rn p}('")
p1,a1,c1,k1=('','0','0','')
ss="p1,a1,c1,k1=(\'"+aSplit[1].split(".spli")[0]+')';exec(ss)
k1=k1.split('|')
aSplit = aSplit[1].split("))'")
e = '';d = ''
sUnpacked1 = str(__unpack(p1, a1, c1, k1, e, d,iteration))
if iteration>=totaliterations: return sUnpacked1
else: return unpack(sUnpacked1,iteration+1)
def __unpack(p, a, c, k, e, d, iteration,v=1):
while (c >= 1):
c = c -1
if (k[c]):
aa=str(__itoaNew(c, a))
p=re.sub('\\b' + aa +'\\b', k[c], p)
return p
def __itoa(num, radix):
result = ""
if num==0: return '0'
while num > 0: result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result;num /= radix
return result
def __itoaNew(cc, a):
aa="" if cc < a else __itoaNew(int(cc / a),a)
cc = (cc % a)
bb=chr(cc + 29) if cc> 35 else str(__itoa(cc,36))
return aa+bb
#------------------------------------------------------------
#------------------------------------------------------------
def streaminto(params):
plugintools.log('[%s %s] streaminto %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
if page_url.startswith("http://streamin.to/embed-") == False:
videoid = plugintools.find_single_match(page_url,"streamin.to/([a-z0-9A-Z]+)")
page_url = "http://streamin.to/embed-"+videoid+".html"
plugintools.log("page_url= "+page_url)
# Leemos el código web
headers = {'user-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'}
r = requests.get(page_url, headers=headers)
data = r.text
plugintools.log("data= "+data)
if data == "File was deleted":
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Archivo borrado!", 3 , art+'icon.png'))
else:
# TODO: Si "video not found" en data, mostrar mensaje "Archivo borrado!"
patron_flv = 'file: "([^"]+)"'
patron_jpg = 'image: "(http://[^/]+/)'
try:
host = scrapertools.get_match(data, patron_jpg)
plugintools.log("[streaminto.py] host="+host)
flv_url = scrapertools.get_match(data, patron_flv)
plugintools.log("[streaminto.py] flv_url="+flv_url)
flv = host+flv_url.split("=")[1]+"/v.flv"
plugintools.log("[streaminto.py] flv="+flv)
page_url = flv
except:
plugintools.log("[streaminto] opcion 2")
op = plugintools.find_single_match(data,'<input type="hidden" name="op" value="([^"]+)"')
plugintools.log("[streaminto] op="+op)
usr_login = ""
id = plugintools.find_single_match(data,'<input type="hidden" name="id" value="([^"]+)"')
plugintools.log("[streaminto] id="+id)
fname = plugintools.find_single_match(data,'<input type="hidden" name="fname" value="([^"]+)"')
plugintools.log("[streaminto] fname="+fname)
referer = plugintools.find_single_match(data,'<input type="hidden" name="referer" value="([^"]*)"')
plugintools.log("[streaminto] referer="+referer)
hashstring = plugintools.find_single_match(data,'<input type="hidden" name="hash" value="([^"]*)"')
plugintools.log("[streaminto] hashstring="+hashstring)
imhuman = plugintools.find_single_match(data,'<input type="submit" name="imhuman".*?value="([^"]+)"').replace(" ","+")
plugintools.log("[streaminto] imhuman="+imhuman)
import time
time.sleep(10)
# Lo pide una segunda vez, como si hubieras hecho click en el banner
#op=download1&usr_login=&id=z3nnqbspjyne&fname=Coriolanus_DVDrip_Castellano_by_ARKONADA.avi&referer=&hash=nmnt74bh4dihf4zzkxfmw3ztykyfxb24&imhuman=Continue+to+Video
post = "op="+op+"&usr_login="+usr_login+"&id="+id+"&fname="+fname+"&referer="+referer+"&hash="+hashstring+"&imhuman="+imhuman
request_headers.append(["Referer",page_url])
data_video = plugintools.read_body_and_headers( page_url , post=post, headers=request_headers )
data_video = data_video[0]
rtmp = plugintools.find_single_match(data_video, 'streamer: "([^"]+)"')
print 'rtmp',rtmp
video_id = plugintools.find_single_match(data_video, 'file: "([^"]+)"')
print 'video_id',video_id
swf = plugintools.find_single_match(data_video, 'src: "(.*?)"')
print 'swf',swf
page_url = rtmp+' swfUrl='+swf + ' playpath='+video_id+"/v.flv"
plugintools.play_resolved_url(page_url)
def powvideo(params):
plugintools.log('[%s %s] Powvideo %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
if not "embed" in page_url:
page_url = page_url.replace("http://powvideo.net/","http://powvideo.net/embed-") + "-640x360.html"
headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0', 'Referer': page_url, "Accept-Encoding": "gzip, deflate, sdch" }
page_url= page_url.replace("embed","iframe")
r = requests.get(page_url, headers=headers)
data = r.text
data = plugintools.find_single_match(data,"<script type='text/javascript'>(.*?)</script>")
data = jsunpack.unpack(data)
data = data.replace("\\","")
media_url = plugintools.find_single_match(data,'\{.*(http.*mp4)').strip()
print media_url
plugintools.play_resolved_url(media_url)
def mailru(params):
plugintools.log('[%s %s] Mail.ru %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
url = url.replace('/my.mail.ru/video/', '/api.video.mail.ru/videos/embed/')
url = url.replace('/videoapi.my.mail.ru/', '/api.video.mail.ru/')
plugintools.log("URL = "+url)
result = getUrl(url).result
plugintools.log("result= "+result)
url = re.compile('metadataUrl":"(.+?)"').findall(result)[0]
cookie = getUrl(url, output='cookie').result
h = "|Cookie=%s" % urllib.quote(cookie)
result = getUrl(url).result
plugintools.log("result= "+result)
#result = json.loads(result)
result = data['videos']
url = []
url += [{'quality': '1080p', 'url': i['url'] + h} for i in result if i['key'] == '1080p']
url += [{'quality': 'HD', 'url': i['url'] + h} for i in result if i['key'] == '720p']
url += [{'quality': 'SD', 'url': i['url'] + h} for i in result if not (i['key'] == '1080p' or i ['key'] == '720p')]
#if url == []: return
plugintools.play_resolved_url(url)
def mediafire(params):
plugintools.log('[%s %s] Mediafire %s' % (addonName, addonVersion, repr(params)))
# Solicitud de página web
url = params.get("url")
data = plugintools.read(url)
# Espera un segundo y vuelve a cargar
plugintools.log("[PalcoTV] Espere un segundo...")
import time
time.sleep(1)
data = plugintools.read(url)
plugintools.log("data= "+data)
pattern = 'kNO \= "([^"]+)"'
matches = re.compile(pattern,re.DOTALL).findall(data)
for entry in matches:
plugintools.log("entry= "+entry)
# Tipo 1 - http://www.mediafire.com/download.php?4ddm5ddriajn2yo
pattern = 'mediafire.com/download.php\?([a-z0-9]+)'
matches = re.compile(pattern,re.DOTALL).findall(data)
for entry in matches:
if entry != "":
url = 'http://www.mediafire.com/?'+entry
plugintools.log("URL Tipo 1 = "+url)
'''
# Tipo 2 - http://www.mediafire.com/?4ckgjozbfid
pattern = 'http://www.mediafire.com/\?([a-z0-9]+)'
matches = re.compile(pattern,re.DOTALL).findall(data)
for entry in matches:
if entry != "":
url = 'http://www.mediafire.com/?'+entry
plugintools.log("URL Tipo 2 = "+url)
# Tipo 3 - http://www.mediafire.com/file/c0ama0jzxk6pbjl
pattern = 'http://www.mediafire.com/file/([a-z0-9]+)'
plugintools.log("[mediafire.py] find_videos #"+pattern+"#")
matches = re.compile(pattern,re.DOTALL).findall(data)
for entry in matches:
if entry != "":
url = 'http://www.mediafire.com/?'+entry
plugintools.log("URL Tipo 3 = "+url)
'''
def novamov(params):
plugintools.log('[%s %s] Novamov %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
media_id = page_url.replace("http://www.novamov.com/video/", "").strip()
# Comprobamos que existe el vídeo
data = scrapertools.cache_page(page_url)
if "This file no longer exists on our servers" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "No existe vídeo en Novamov", 3 , art+'icon.png'))
elif "is being converted" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Vídeo no disponible", 3 , art+'icon.png'))
plugintools.log("[novamov.py] get_video_url(page_url='%s')" % page_url)
html = scrapertools.cache_page(page_url)
html = unwise.unwise_process(html)
filekey = unwise.resolve_var(html, "flashvars.filekey")
#get stream url from api
api = 'http://www.novamov.com/api/player.api.php?key=%s&file=%s' % (filekey, media_id)
data = scrapertools.cache_page(api)
data = data.replace("url=", "").strip()
data = data.split("&title=")
url_final = data[0]+'?client=FLASH'
# http://s91.coolcdn.ch/dl/dfdb3d051c3e71db62cf8379259ffcbd/552254ab/ff2e9e3dc0489c213e868d43e74bd1b356.flv?client=FLASH
# http://s181.coolcdn.ch/dl/003aa7721702b4db5598faf880d76386/55225401/fffadbdfcba93c7515995141bcf8b1a95a.flv&title=The.Walking.Dead.S05E13.Vose%26asdasdas&site_http://www.novamov.com/video/f664cf727c58c&seekparm=&enablelimit=0]
plugintools.log("url_final= "+url_final)
plugintools.play_resolved_url(url_final)
def gamovideo(params):
plugintools.log('[%s %s] Gamovideo %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
if not "embed" in page_url:
page_url = page_url.replace("http://gamovideo.com/","http://gamovideo.com/embed-") + "-640x360.html"
headers = [['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']]
data = scrapertools.cache_page( page_url , headers=headers )
if "is no longer available" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Archivo borrado!", 3 , art+'icon.png'))
else:
headers = [['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']]
data = scrapertools.cache_page( page_url , headers=headers )
try:
'''
<input type="hidden" name="op" value="download1">
<input type="hidden" name="usr_login" value="">
<input type="hidden" name="id" value="auoxxtvyquoy">
<input type="hidden" name="fname" value="Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi">
<input type="hidden" name="referer" value="">
<input type="hidden" name="hash" value="1624-83-46-1377796069-b5e6b8f9759d080a3667adad637f00ac">
<input type="submit" name="imhuman" value="Continue to Video" id="btn_download">
'''
op = scrapertools.get_match(data,'<input type="hidden" name="op" value="(down[^"]+)"')
usr_login = ""
id = scrapertools.get_match(data,'<input type="hidden" name="id" value="([^"]+)"')
fname = scrapertools.get_match(data,'<input type="hidden" name="fname" value="([^"]+)"')
referer = scrapertools.get_match(data,'<input type="hidden" name="referer"\s+value="([^"]*)"')
hashvalue = scrapertools.get_match(data,'<input type="hidden" name="hash" value="([^"]*)"')
submitbutton = scrapertools.get_match(data,'<input type="submit" name="imhuman" value="([^"]+)"').replace(" ","+")
import time
time.sleep(5)
# Lo pide una segunda vez, como si hubieras hecho click en el banner
#op=download1&usr_login=&id=auoxxtvyquoy&fname=Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi&referer=&hash=1624-83-46-1377796019-c2b422f91da55d12737567a14ea3dffe&imhuman=Continue+to+Video
#op=search&usr_login=&id=auoxxtvyquoy&fname=Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi&referer=&hash=1624-83-46-1377796398-8020e5629f50ff2d7b7de99b55bdb177&imhuman=Continue+to+Video
post = "op="+op+"&usr_login="+usr_login+"&id="+id+"&fname="+fname+"&referer="+referer+"&hash="+hashvalue+"&imhuman="+submitbutton
headers.append(["Referer",page_url])
data = scrapertools.cache_page( page_url , post=post, headers=headers )
plugintools.log("data="+data)
except:
import traceback
traceback.print_exc()
# Extrae la URL
plugintools.log("data="+data)
data = scrapertools.find_single_match(data,"<script type='text/javascript'>(.*?)</script>")
plugintools.log("data="+data)
data = unpackerjs.unpackjs(data)
plugintools.log("data="+data)
# ('jwplayer("vplayer").setup({playlist:[{image:"http://192.99.35.229:8777/i/01/00048/ibw5pte06up4.jpg",sources:[{file:"rtmp://192.99.35.229:1935/vod?h=7ax23yxze4pskjwff5zcce7uyyqvxf5ullx3urse54oyq2tepqiko5s6xsoq/mp4:35/3779312894_n.mp4?h=7ax23yxze4pskjwff5zcce7uyyqvxf5ullx3urse54oyq2tepqiko5s6xsoq"},{file:"35/3779312894_n.mp4?h=7ax23yxze4pskjwff5zcce7uyyqvxf5ullx3urse54oyq2tepqiko5s6xsoq"}],tracks:[]}],rtmp:{bufferlength:5},height:528,primary:"flash",width:950,captions:{color:\'#FFFFFF\',fontSize:15,fontFamily:"Verdana"}});var vvplay;var tt243542=0;var p0243542=0;jwplayer().onTime(function(x){if(p0243542>0)tt243542+=x.position-p0243542;p0243542=x.position;if(0!=0&&tt243542>=0){p0243542=-1;jwplayer().stop();jwplayer().setFullscreen(false);$(\'#play_limit_box\').show();$(\'div.video_ad\').show()}});jwplayer().onSeek(function(x){p0243542=-1});jwplayer().onPlay(function(x){doPlay(x)});jwplayer().onComplete(function(){$(\'div.video_ad\').show()});function doPlay(x){$(\'div.video_ad\').hide();if(vvplay)return;vvplay=1;}',,355,
data = data.replace('file:"rtmp://', 'streamer:"')
pfile = plugintools.find_single_match(data,'file\s*\:\s*"([^"]+)"')
pstreamer = 'rtmp://'+plugintools.find_single_match(data,'streamer\s*\:\s*"([^"]+)"')
media_url = pstreamer + " playpath=" + pfile.replace("playpath=", "").strip()
plugintools.log("media_url= "+media_url)
plugintools.play_resolved_url(media_url)
def moevideos(params):
plugintools.log('[%s %s] Moevideos %s' % (addonName, addonVersion, repr(params)))
# No existe / borrado: http://www.moevideos.net/online/27991
page_url = params.get("url")
data = scrapertools.cache_page(page_url)
plugintools.log("data= "+data)
if "<span class='tabular'>No existe</span>" in data:
return False,"No existe o ha sido borrado de moevideos"
else:
# Existe: http://www.moevideos.net/online/18998
patron = "<span class='tabular'>([^>]+)</span>"
headers = []
headers.append(['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'])
data = scrapertools.cache_page( page_url , headers=headers )
# Descarga el script (no sirve para nada, excepto las cookies)
headers.append(['Referer',page_url])
post = "id=1&enviar2=ver+video"
data = scrapertools.cache_page( page_url , post=post, headers=headers )
### Modificado 12-6-2014
#code = scrapertools.get_match(data,'flashvars\="file\=([^"]+)"')
#<iframe width="860" height="440" src="http://moevideo.net/framevideo/16363.1856374b43bbd40c7f8d2b25b8e5?width=860&height=440" frameborder="0" allowfullscreen ></iframe>
code = scrapertools.get_match(data,'<iframe width="860" height="440" src="http://moevideo.net/framevideo/([^\?]+)\?width=860\&height=440" frameborder="0" allowfullscreen ></iframe>')
plugintools.log("code="+code)
# API de letitbit
headers2 = []
headers2.append(['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'])
### Modificado 12-6-2014
url = "http://api.letitbit.net"
#url = "http://api.moevideo.net"
#post = "r=%5B%22tVL0gjqo5%22%2C%5B%22preview%2Fflv%5Fimage%22%2C%7B%22uid%22%3A%2272871%2E71f6541e64b0eda8da727a79424d%22%7D%5D%2C%5B%22preview%2Fflv%5Flink%22%2C%7B%22uid%22%3A%2272871%2E71f6541e64b0eda8da727a79424d%22%7D%5D%5D"
#post = "r=%5B%22tVL0gjqo5%22%2C%5B%22preview%2Fflv%5Fimage%22%2C%7B%22uid%22%3A%2212110%2E1424270cc192f8856e07d5ba179d%22%7D%5D%2C%5B%22preview%2Fflv%5Flink%22%2C%7B%22uid%22%3A%2212110%2E1424270cc192f8856e07d5ba179d%22%7D%5D%5D
#post = "r=%5B%22tVL0gjqo5%22%2C%5B%22preview%2Fflv%5Fimage%22%2C%7B%22uid%22%3A%2268653%2E669cbb12a3b9ebee43ce14425d9e%22%7D%5D%2C%5B%22preview%2Fflv%5Flink%22%2C%7B%22uid%22%3A%2268653%2E669cbb12a3b9ebee43ce14425d9e%22%7D%5D%5D"
post = 'r=["tVL0gjqo5",["preview/flv_image",{"uid":"'+code+'"}],["preview/flv_link",{"uid":"'+code+'"}]]'
data = scrapertools.cache_page(url,headers=headers2,post=post)
plugintools.log("data="+data)
if ',"not_found"' in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Archivo borrado!", 3 , art+'icon.png'))
else:
data = data.replace("\\","")
plugintools.log("data="+data)
patron = '"link"\:"([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
video_url = matches[0]+"?ref=www.moevideos.net|User-Agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:15.0) Gecko/20100101 Firefox/15.0.1&Range=bytes:0-"
plugintools.log("[moevideos.py] video_url="+video_url)
video_urls = []
video_urls.append( [ scrapertools.get_filename_from_url(video_url)[-4:] + " [moevideos]",video_url ] )
plugintools.play_resolved_url(video_url[1])
def movshare(params):
plugintools.log('[%s %s] Movshare %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
data = scrapertools.cache_page(page_url)
if "This file no longer exists on our servers" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Archivo borrado!", 3 , art+'icon.png'))
else:
videoid = scrapertools.get_match(page_url,"http://www.movshare.net/video/([a-z0-9]+)")
video_urls = []
# Descarga la página
headers = []
headers.append( ['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'] )
data = scrapertools.cache_page(page_url , headers = headers)
# La vuelve a descargar, como si hubieras hecho click en el botón
#html = scrapertools.cache_page(page_url , headers = headers)
filekey = plugintools.find_single_match(data,'flashvars.filekey="([^"]+)"')
#get stream url from api
api = 'http://www.movshare.net/api/player.api.php?key=%s&file=%s' % (filekey, videoid)
headers.append( ['Referer',page_url] )
html = scrapertools.cache_page(api,headers=headers)
plugintools.log("html="+html)
stream_url = plugintools.find_single_match(html,'url=(.+?)&title')
if stream_url!="":
video_urls.append( [ scrapertools.get_filename_from_url(stream_url)[-4:]+" [movshare]" , stream_url ] )
for video_url in video_urls:
plugintools.log("[movshare.py] %s - %s" % (video_url[0],video_url[1]))
plugintools.log("url_final= "+video_url[1])
plugintools.play_resolved_url(video_url[1])
def movreel(params):
plugintools.log('[%s %s] Movreel %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
video_urls = []
data = scrapertools.cache_page(page_url)
op = plugintools.find_single_match(data,'<input type="hidden" name="op" value="([^"]+)">')
file_code = plugintools.find_single_match(data,'<input type="hidden" name="file_code" value="([^"]+)">')
w = plugintools.find_single_match(data,'<input type="hidden" name="w" value="([^"]+)">')
h = plugintools.find_single_match(data,'<input type="hidden" name="h" value="([^"]+)">')
method_free = plugintools.find_single_match(data,'<input type="submit" name="method_free" value="([^"]+)">')
#op=video_embed&file_code=yrwo5dotp1xy&w=600&h=400&method_free=Close+Ad+and+Watch+as+Free+User
#post = 'op=video_embed&file_code='+file_code+'+&w='+w+'&h='+h+'$method_free='+method_free
post = urllib.urlencode( {"op":op,"file_code":file_code,"w":w,"h":h,"method_free":method_free} )
print 'post',post
data = scrapertools.cache_page(page_url,post=post)
#plugintools.log("data="+data)
data = unpackerjs.unpackjs(data)
plugintools.log("data="+data)
media_url = plugintools.find_single_match(data,'file\:"([^"]+)"')
plugintools.play_resolved_url(media_url)
def videobam(params):
plugintools.log('[%s %s] Videobam %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
data = scrapertools.cache_page(page_url)
videourl = ""
match = ""
if "Video is processing" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Archivo no disponible temporalmente!", 3 , art+'icon.png'))
else:
patronHD = " high: '([^']+)'"
matches = re.compile(patronHD,re.DOTALL).findall(data)
for match in matches:
videourl = match
plugintools.log("Videobam HQ :"+match)
if videourl == "":
patronSD= " low: '([^']+)'"
matches = re.compile(patronSD,re.DOTALL).findall(data)
for match in matches:
videourl = match
plugintools.log("Videobam LQ :"+match)
if match == "":
if len(matches)==0:
# "scaling":"fit","url":"http:\/\/f10.videobam.com\/storage\/11\/videos\/a\/aa\/AaUsV\/encoded.mp4
patron = '[\W]scaling[\W]:[\W]fit[\W],[\W]url"\:"([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for match in matches:
videourl = match.replace('\/','/')
videourl = urllib.unquote(videourl)
plugintools.log("Videobam scaling: "+videourl)
if videourl != "":
plugintools.play_resolved_url(videourl)
else:
plugintools.play_resolved_url(videourl)
def vimeo(params):
plugintools.log("servers.vimeo get_video_url(page_url='%s')" % repr(params))
page_url = params.get("url")
headers = []
headers.append( ['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'] )
data = scrapertools.cache_page(page_url, headers=headers)
'''
<div class="player" style="background-image: url(http://b.vimeocdn.com/ts/433/562/433562952_960.jpg);" id="player_1_53086fb0f413f" data-config-url="http://player.vimeo.com/v2/video/63073570/config?autoplay=0&byline=0&bypass_privacy=1&context=clip.main&default_to_hd=1&portrait=0&title=0&s=4268c7772994be693b480b75b5d84452f3e81f96" data-fallback-url="//player.vimeo.com/v2/video/63073570/fallback?js"
'''
url = scrapertools.find_single_match(data,'<div class="player" style="[^"]+" id="[^"]+" data-config-url="([^"]+)"')
url = url.replace("&","&")
headers.append( ['Referer',page_url] )
data = scrapertools.cache_page(url, headers=headers)
json_object = jsontools.load_json(data)
'''
http://player.vimeo.com/v2/video/63073570/config?autoplay=0&byline=0&bypass_privacy=1&context=clip.main&default_to_hd=1&portrait=0&title=0&s=4268c7772994be693b480b75b5d84452f3e81f96
> GET /v2/video/63073570/config?autoplay=0&byline=0&bypass_privacy=1&context=clip.main&default_to_hd=1&portrait=0&title=0&s=4268c7772994be693b480b75b5d84452f3e81f96 HTTP/1.1
> User-Agent: curl/7.24.0 (x86_64-apple-darwin12.0) libcurl/7.24.0 OpenSSL/0.9.8y zlib/1.2.5
> Host: player.vimeo.com
> Accept: */*
>
< HTTP/1.1 200 OK
< Expires: Sun, 23 02 2014 09:39:32 GMT
< Vary: Origin, Accept-Encoding
< Etag: "009d88dc9b151e402faf10efb7ba4cabe0412385"
< P3p: CP="This is not a P3P policy! See http://vimeo.com/privacy"
< Content-Type: application/json
< Transfer-Encoding: chunked
< Date: Sat, 22 Feb 2014 09:39:32 GMT
< X-Varnish: 1162931632
< Age: 0
< Via: 1.1 varnish
< Cache-Control: no-store, no-cache, must-revalidate, post-check=0, pre-check=0
< X-Player2: 1
< X-Varnish-Cache: 0
< nnCoection: close
< X-VServer: 10.90.128.193
<
* Connection #0 to host player.vimeo.com left intact
{"cdn_url":"http://a.vimeocdn.com","view":1,"request":{"files":{"h264":{"hd":{"profile":113,"origin":"ns3.pdl","url":"http://pdl.vimeocdn.com/72437/773/155150233.mp4?token2=1393065072_197f0ca458049c7217e9e8969c373af1&aksessionid=358994b3a75767bb","height":720,"width":1280,"id":155150233,"bitrate":2658,"availability":60},"sd":{"profile":112,"origin":"ns3.pdl","url":"http://pdl.vimeocdn.com/44925/440/155100150.mp4?token2=1393065072_cd5b62387758a46798e02dbd0b19bd3e&aksessionid=56c93283ac081129","height":360,"width":640,"id":155100150,"bitrate":860,"availability":60}},"hls":{"all":"http://av70.hls.vimeocdn.com/i/,44925/440/155100150,72437/773/155150233,.mp4.csmil/master.m3u8?primaryToken=1393065072_fe1a557fd7460bc8409bf09960614694","hd":"http://av70.hls.vimeocdn.com/i/,72437/773/155150233,.mp4.csmil/master.m3u8?primaryToken=1393065072_8ba190ee7643f318c75dc265a14b750d"},"codecs":["h264"]},"ga_account":"UA-76641-35","timestamp":1393061972,"expires":3100,"prefix":"/v2","session":"9d8f0ce5a2de113df027f1f1d2428648","cookie":{"scaling":1,"volume":1.0,"hd":null,"captions":null},"cookie_domain":".vimeo.com","referrer":null,"conviva_account":"c3.Vimeo","flags":{"login":1,"preload_video":1,"plays":1,"partials":1,"conviva":1},"build":{"player":"d854ba1a","js":"2.3.7"},"urls":{"zeroclip_swf":"http://a.vimeocdn.com/p/external/zeroclipboard/ZeroClipboard.swf","js":"http://a.vimeocdn.com/p/2.3.7/js/player.js","proxy":"https://secure-a.vimeocdn.com/p/2.3.7/proxy.html","conviva":"http://livepassdl.conviva.com/ver/2.72.0.13589/LivePass.js","flideo":"http://a.vimeocdn.com/p/flash/flideo/1.0.3b10/flideo.swf","canvas_js":"http://a.vimeocdn.com/p/2.3.7/js/player.canvas.js","moog":"http://a.vimeocdn.com/p/flash/moogaloop/6.0.7/moogaloop.swf?clip_id=63073570","conviva_service":"http://livepass.conviva.com","moog_js":"http://a.vimeocdn.com/p/2.3.7/js/moogaloop.js","zeroclip_js":"http://a.vimeocdn.com/p/external/zeroclipboard/ZeroClipboard-patch.js","css":"http://a.vimeocdn.com/p/2.3.7/css/player.css"},"signature":"67ef54c1e894448dd7c38e7da8a3bdba"},"player_url":"player.vimeo.com","video":{"allow_hd":1,"height":720,"owner":{"account_type":"basic","name":"Menna Fit\u00e9","img":"http://b.vimeocdn.com/ps/446/326/4463264_75.jpg","url":"http://vimeo.com/user10601457","img_2x":"http://b.vimeocdn.com/ps/446/326/4463264_300.jpg","id":10601457},"thumbs":{"1280":"http://b.vimeocdn.com/ts/433/562/433562952_1280.jpg","960":"http://b.vimeocdn.com/ts/433/562/433562952_960.jpg","640":"http://b.vimeocdn.com/ts/433/562/433562952_640.jpg"},"duration":2200,"id":63073570,"hd":1,"embed_code":"<iframe src=\"//player.vimeo.com/video/63073570\" width=\"500\" height=\"281\" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>","default_to_hd":1,"title":"No le digas a la Mama que me he ido a Mongolia en Moto","url":"http://vimeo.com/63073570","privacy":"anybody","share_url":"http://vimeo.com/63073570","width":1280,"embed_permission":"public","fps":25.0},"build":{"player":"d854ba1a","rpc":"dev"},"embed":{"player_id":null,"outro":"nothing","api":2,"context":"clip.main","time":0,"color":"00adef","settings":{"fullscreen":1,"instant_sidedock":1,"byline":0,"like":1,"playbar":1,"title":0,"color":1,"branding":0,"share":1,"scaling":1,"logo":0,"info_on_pause":0,"watch_later":1,"portrait":0,"embed":1,"badge":0,"volume":1},"on_site":1,"loop":0,"autoplay":0},"vimeo_url":"vimeo.com","user":{"liked":0,"account_type":"none","logged_in":0,"owner":0,"watch_later":0,"id":0,"mod":0}}* Closing connection #0
'''
media_url = json_object['request']['files']['h264']['hd']['url']
video_urls.append( [ "HD [vimeo]",media_url ] )
media_url = json_object['request']['files']['h264']['sd']['url']
video_urls.append( [ "SD [vimeo]",media_url ] )
def veetle(params):
plugintools.log('[%s %s] Veetle %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
# Leemos el código web
headers = {'user-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'}
r = requests.get(url, headers=headers)
data = r.text
# Obtenemos ID del canal de Veetle
if url.startswith("http://veetle.com/index.php/channel/view") == True: # http://veetle.com/index.php/channel/view#520c3ec32200c (la URL incluye el ID de Veetle)
id_veetle=plugintools.find_single_match(url, 'view#([^/]+)')
plugintools.log("id_veetle= "+id_veetle)
elif url.startswith("http://veetle.com/?play=") == True: # http://veetle.com/?play=7a1c4b6130984cc3bf239cafeff7d04e (hay que buscar ID del canal de Veetle)
live_id=url.split("play=")[1]
plugintools.log("live_id= "+live_id)
id_veetle=plugintools.find_single_match(data, live_id+'/(.*?)_'+live_id)
plugintools.log("id_veetle= "+id_veetle)
# Buscamos enlaces de video...
url_veetle='http://veetle.com/index.php/stream/ajaxStreamLocation/'+id_veetle+'/android-hls'
headers = {'user-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'}
r = requests.get(url_veetle, headers=headers);data = r.text
url_veetle = plugintools.find_single_match(data, 'payload"."([^"]+)').replace("\\", "")
if url_veetle == "":
url_veetle='http://veetle.com/index.php/stream/ajaxStreamLocation/'+id_veetle+'/flash'
headers = {'user-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'}
r = requests.get(url_veetle, headers=headers);data = r.text
url_veetle = plugintools.find_single_match(data, 'payload"."([^"]+)').replace("\\", "")
plugintools.play_resolved_url(url_veetle)
def videoweed(params):
plugintools.log('[%s %s] Videoweed %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
headers = { "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14", "Accept-Encoding": "gzip,deflate,sdch" }
#data = scrapertools.cache_page(page_url,headers=headers)
r = requests.get(url, headers=headers)
data = r.text
print data
file_parameter = plugintools.find_single_match(data,'flashvars\.file="([^"]+)"')
plugintools.log("file_parameter="+file_parameter)
filekey_parameter = plugintools.find_single_match(data,'flashvars.filekey\="([^"]+)"')
plugintools.log("filekey_parameter="+filekey_parameter)
if filekey_parameter=="":
filekey_parameter = plugintools.find_single_match(data,'fkz="([^"]+)"')
plugintools.log("filekey_parameter="+filekey_parameter)
#88%2E0%2E189%2E203%2Dd3cb0515a1ed66e5b297da999ed23b42%2D
filekey_parameter = filekey_parameter.replace(".","%2E")
filekey_parameter = filekey_parameter.replace("-","%2D")
plugintools.log("filekey_parameter="+filekey_parameter)
# http://www.videoweed.es/api/player.api.php?cid=undefined&cid2=undefined&file=31f8c26a80d23&cid3=undefined&key=88%2E0%2E189%2E203%2Dd3cb0515a1ed66e5b297da999ed23b42%2D&numOfErrors=0&user=undefined&pass=undefined
parameters="cid=undefined&cid2=undefined&file="+file_parameter+"&cid3=undefined&key="+filekey_parameter+"&numOfErrors=0&user=undefined&pass=undefined"
url = "http://www.videoweed.es/api/player.api.php?"+parameters
headers["Referer"]=url
#data = scrapertools.cache_page(url,headers=headers)
r = requests.get(url, headers=headers);data = r.text
plugintools.log(data)
patron = 'url=(.*?)&title='
url_media = re.compile(patron).findall(data)[0]
plugintools.play_resolved_url(url_media)
def streamable(params):
plugintools.log('[%s %s] Streamable %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
headers = { "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14", "Accept-Encoding": "gzip,deflate,sdch" }
data = scrapertools.cache_page(url)
data = plugintools.find_single_match(data,'<embed(.*?)</embed>')
data = plugintools.find_single_match(data,'setting=(.*?)"')
import base64
info_url= base64.b64decode(data)
data = scrapertools.cache_page(info_url)
vcode = plugintools.find_single_match(data,'"vcode":"(.*?)",')
st = plugintools.find_single_match(data,'"st":(.*?),')
media_url = "http://video.streamable.ch/s?v="+vcode+"&t="+st
media_url=media_url.strip()
plugintools.play_resolved_url(media_url)
def rocvideo(params):
plugintools.log('[%s %s] Rocvideo %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
if not "embed" in page_url:
page_url = page_url.replace("http://rocvideo.tv/","http://rocvideo.tv/embed-") + ".html"
headers = { "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14" }
r=requests.get(page_url, headers=headers);data=r.text
data = scrapertools.find_single_match(data,"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>")
data = unpackerjs.unpackjs(data)
plugintools.log("data="+data)
media_url = scrapertools.get_match(data,'file:"([^"]+)"').strip()
plugintools.play_resolved_url(media_url)
def realvid(params):
plugintools.log('[%s %s] Realvid %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
if not "embed" in page_url:
page_url = page_url.replace("http://realvid.net/","http://realvid.net/embed-") + ".html"
headers = { "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14" }
r=requests.get(page_url, headers=headers);data=r.text
media_url = plugintools.find_single_match(data,'file: "([^"]+)",').strip()
plugintools.play_resolved_url(media_url)
def netu(params):
plugintools.log('[%s %s] Netu %s' % (addonName, addonVersion, repr(params)))
page_url= params.get("url")
headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0' }
if "www.yaske.net" or "www.yaske.cc" in page_url:
## Encode a la url para pasarla como valor de parámetro
urlEncode = urllib.quote_plus(page_url)
## Carga los datos
id_video = plugintools.find_single_match( page_url , "embed_([A-Za-z0-9]+)")
#data = scrapertools.cache_page( page_url , headers=headers )
r = requests.get(page_url, headers=headers);data = r.text
headers["Referer"]=page_url
try:
## Nueva id del video
page_url_the_new_video_id = plugintools.find_single_match( data , 'script src="([^"]+)"></script>')
#data_with_new_video_id = scrapertools.cache_page( page_url_the_new_video_id , headers=headers )
r = requests.get(page_url_the_new_video_id, headers=headers)
data_with_new_video_id = r.text
## Algunos enlaces necesitan el paso pervio de la siguiente línea para coseguir la id
data_with_new_video_id = urllib.unquote( data_with_new_video_id )
new_id_video = plugintools.find_single_match( data_with_new_video_id , "var vid='([^']+)';")
## Petición a hqq.tv con la nueva id de vídeo
b64_data = get_b64_data(new_id_video, headers)
## Doble decode y escape
utf8 = double_b64(b64_data)
except:
## Recoge los datos
b64_data = plugintools.find_single_match( data , '<script.*?src="data:text\/javascript;charset=utf-8;base64,([^"]+)"><\/script>')
## Doble decode y escape
utf8 = double_b64(b64_data)
## Nueva id del video
new_id_video = plugintools.find_single_match( utf8 , 'value="([^"]+)"')
## Petición a hqq.tv con la nueva id de vídeo y recoger los datos
b64_data = get_b64_data(new_id_video, headers)
## Doble decode y escape
utf8 = double_b64(b64_data)
### at ###
match_at = '<input name="at" id="text" value="([^"]+)">'
at = plugintools.find_single_match(utf8, match_at)
### m3u8 ###
## Recoger los bytes ofuscados que contiene la url del m3u8
b_m3u8_2 = get_obfuscated( new_id_video, at, urlEncode, headers )
### tb_m3u8 ###
## Obtener la url del m3u8
url_m3u8 = tb(b_m3u8_2)
elif "pruebas.wz" in page_url:
## Encode a la url para pasarla como valor de parámetro con hqq como host
urlEncode = urllib.quote_plus( page_url.replace("netu","hqq") )
### at ###
print page_url
id_video = page_url.split("=")[1]
## Petición a hqq.tv con la nueva id de vídeo y recoger los datos
b64_data = get_b64_data(id_video, headers)
## Doble decode y escape
utf8 = double_b64(b64_data)
match_at = '<input name="at" id="text" value="([^"]+)">'
at = plugintools.find_single_match(utf8, match_at)
### b_m3u8 ###
headers["Referer"]=page_url
## Recoger los bytes ofuscados que contiene la url del m3u8
b_m3u8_2 = get_obfuscated( id_video, at, urlEncode, headers )
### tb ###
## Obtener la url del m3u8
url_m3u8 = tb(b_m3u8_2).strip()
else:
## Encode a la url para pasarla como valor de parámetro con hqq como host
urlEncode = urllib.quote_plus( page_url.replace("netu","hqq") )
### at ###
print page_url
id_video = page_url.split("=")[1]
## Petición a hqq.tv con la nueva id de vídeo y recoger los datos
b64_data = get_b64_data(id_video, headers)
## Doble decode y escape
utf8 = double_b64(b64_data)
match_at = '<input name="at" id="text" value="([^"]+)">'
at = plugintools.find_single_match(utf8, match_at)
### b_m3u8 ###
headers["Referer"]=page_url
## Recoger los bytes ofuscados que contiene la url del m3u8
b_m3u8_2 = get_obfuscated( id_video, at, urlEncode, headers )
### tb ###
## Obtener la url del m3u8
url_m3u8 = tb(b_m3u8_2).strip()
### m3u8 ###
media_url = url_m3u8
print media_url
plugintools.play_resolved_url(media_url)
def waaw(params):
plugintools.log('[%s %s] Waaw.tv (ex Netu.tv) %s' % (addonName, addonVersion, repr(params)))
page_url= params.get("url")
headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0' }
if "www.yaske.net" or "www.yaske.cc" in page_url:
## Encode a la url para pasarla como valor de parámetro
urlEncode = urllib.quote_plus(page_url)
## Carga los datos
id_video = plugintools.find_single_match( page_url , "embed_([A-Za-z0-9]+)")
#data = scrapertools.cache_page( page_url , headers=headers )
r = requests.get(page_url, headers=headers);data = r.text
headers["Referer"]=page_url
try:
## Nueva id del video
page_url_the_new_video_id = plugintools.find_single_match( data , 'script src="([^"]+)"></script>')
#data_with_new_video_id = scrapertools.cache_page( page_url_the_new_video_id , headers=headers )
r = requests.get(page_url_the_new_video_id, headers=headers)
data_with_new_video_id = r.text
## Algunos enlaces necesitan el paso pervio de la siguiente línea para coseguir la id
data_with_new_video_id = urllib.unquote( data_with_new_video_id )
new_id_video = plugintools.find_single_match( data_with_new_video_id , "var vid='([^']+)';")
## Petición a hqq.tv con la nueva id de vídeo
b64_data = get_b64_data(new_id_video, headers)
## Doble decode y escape
utf8 = double_b64(b64_data)
except:
## Recoge los datos
b64_data = plugintools.find_single_match( data , '<script.*?src="data:text\/javascript;charset=utf-8;base64,([^"]+)"><\/script>')
## Doble decode y escape
utf8 = double_b64(b64_data)
## Nueva id del video
new_id_video = plugintools.find_single_match( utf8 , 'value="([^"]+)"')
## Petición a hqq.tv con la nueva id de vídeo y recoger los datos
b64_data = get_b64_data(new_id_video, headers)
## Doble decode y escape
utf8 = double_b64(b64_data)
### at ###
match_at = '<input name="at" id="text" value="([^"]+)">'
at = plugintools.find_single_match(utf8, match_at)
### m3u8 ###
## Recoger los bytes ofuscados que contiene la url del m3u8
b_m3u8_2 = get_obfuscated( new_id_video, at, urlEncode, headers )
### tb_m3u8 ###
## Obtener la url del m3u8
url_m3u8 = tb(b_m3u8_2)
elif "pruebas.wz" in page_url:
## Encode a la url para pasarla como valor de parámetro con hqq como host
urlEncode = urllib.quote_plus( page_url.replace("netu","hqq") )
### at ###
print page_url
id_video = page_url.split("=")[1]
## Petición a hqq.tv con la nueva id de vídeo y recoger los datos
b64_data = get_b64_data(id_video, headers)
## Doble decode y escape
utf8 = double_b64(b64_data)
match_at = '<input name="at" id="text" value="([^"]+)">'
at = plugintools.find_single_match(utf8, match_at)
### b_m3u8 ###
headers["Referer"]=page_url
## Recoger los bytes ofuscados que contiene la url del m3u8
b_m3u8_2 = get_obfuscated( id_video, at, urlEncode, headers )
### tb ###
## Obtener la url del m3u8
url_m3u8 = tb(b_m3u8_2).strip()
else:
## Encode a la url para pasarla como valor de parámetro con hqq como host
urlEncode = urllib.quote_plus( page_url.replace("netu","hqq") )
### at ###
print page_url
id_video = page_url.split("=")[1]
## Petición a hqq.tv con la nueva id de vídeo y recoger los datos
b64_data = get_b64_data(id_video, headers)
## Doble decode y escape
utf8 = double_b64(b64_data)
match_at = '<input name="at" id="text" value="([^"]+)">'
at = plugintools.find_single_match(utf8, match_at)
### b_m3u8 ###
headers["Referer"]=page_url
## Recoger los bytes ofuscados que contiene la url del m3u8
b_m3u8_2 = get_obfuscated( id_video, at, urlEncode, headers )
### tb ###
## Obtener la url del m3u8
url_m3u8 = tb(b_m3u8_2).strip()
### m3u8 ###
media_url = url_m3u8
print media_url
plugintools.play_resolved_url(media_url)
## --------------------------------------------------------------------------------
## Decodificación b64 para Netu
## --------------------------------------------------------------------------------
## Decode
def b64(text, inverse=False):
if inverse:
text = text[::-1]
return base64.decodestring(text)
## Petición a hqq.tv con la nueva id de vídeo
def get_b64_data(new_id_video, headers):
page_url_hqq = "http://hqq.tv/player/embed_player.php?vid="+new_id_video+"&autoplay=no"
data_page_url_hqq = scrapertools.cache_page( page_url_hqq , headers=headers )
b64_data = scrapertools.get_match(data_page_url_hqq, 'base64,([^"]+)"')
return b64_data
## Doble decode y unicode-escape
def double_b64(b64_data):
b64_data_inverse = b64(b64_data)
b64_data_2 = scrapertools.get_match(b64_data_inverse, "='([^']+)';")
utf8_data_encode = b64(b64_data_2,True)
utf8_encode = scrapertools.get_match(utf8_data_encode, "='([^']+)';")
utf8_decode = utf8_encode.replace("%","\\").decode('unicode-escape')
return utf8_decode
## Recoger los bytes ofuscados que contiene el m3u8
def get_obfuscated(id_video, at, urlEncode, headers):
url = "http://hqq.tv/sec/player/embed_player.php?vid="+id_video+"&at="+at+"&autoplayed=yes&referer=on&http_referer="+urlEncode+"&pass="
data = scrapertools.cache_page( url, headers=headers )
match_b_m3u8_1 = '</div>.*?<script>document.write[^"]+"([^"]+)"'
b_m3u8_1 = urllib.unquote( scrapertools.get_match(data, match_b_m3u8_1) )
if b_m3u8_1 == "undefined": b_m3u8_1 = urllib.unquote( data )
match_b_m3u8_2 = '"#([^"]+)"'
b_m3u8_2 = scrapertools.get_match(b_m3u8_1, match_b_m3u8_2)
return b_m3u8_2
## Obtener la url del m3u8
def tb(b_m3u8_2):
j = 0
s2 = ""
while j < len(b_m3u8_2):
s2+= "\\u0"+b_m3u8_2[j:(j+3)]
j+= 3
return s2.decode('unicode-escape').encode('ASCII', 'ignore')
## --------------------------------------------------------------------------------
## --------------------------------------------------------------------------------
def videomega(params):
plugintools.log('[%s %s] Videomega.tv %s' % (addonName, addonVersion, repr(params)))
#data = scrapertools.downloadpage(page_url,follow_redirects=False)
page_url = params.get("url")
ref = page_url.split("ref=")[1]
page_url = 'http://videomega.tv/view.php?ref='+ref+'&width=100%&height=400'
headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0', 'Referer': page_url, "Accept-Encoding": "gzip, deflate, sdch" }
#data = scrapertools.downloadpage(page_url,follow_redirects=False)
r = requests.get(page_url, headers=headers);data = r.text
media_url = plugintools.find_single_match(data,'<source src="([^"]+)"')
plugintools.play_resolved_url(media_url)
def videott(params):
plugintools.log("[%s %s] Videott %s " % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
# URL del vídeo
videoid = scrapertools.find_single_match(page_url,"video.tt/video/([A-Za-z0-9]+)")
plugintools.log("videoid= "+videoid)
timestamp=str(random.randint(1000000000,9999999999))
hexastring = scrapertools.get_sha1(page_url) + scrapertools.get_sha1(page_url) + scrapertools.get_sha1(page_url) + scrapertools.get_sha1(page_url)
hexastring = hexastring[:96]
media_url = "http://gs.video.tt/s?v="+videoid+"&r=1&t="+timestamp+"&u=&c="+hexastring+"&start=0"
plugintools.play_resolved_url(media_url)
def flashx(params):
plugintools.log("[%s %s] Flashx %s " % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
headers = [['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']]
data = scrapertools.cache_page( page_url , headers=headers )
#plugintools.log("data= "+data)
form_url = plugintools.find_single_match(data,"<Form method=\"POST\" action='([^']+)'>")
form_url = urlparse.urljoin(page_url,form_url)
op = plugintools.find_single_match(data,'<input type="hidden" name="op" value="([^"]+)"')
usr_login = ""
id = plugintools.find_single_match(data,'<input type="hidden" name="id" value="([^"]+)"')
fname = plugintools.find_single_match(data,'<input type="hidden" name="fname" value="([^"]+)"')
referer = plugintools.find_single_match(data,'<input type="hidden" name="referer" value="([^"]*)"')
hashstring = plugintools.find_single_match(data,'<input type="hidden" name="hash" value="([^"]*)"')
imhuman = plugintools.find_single_match(data,'<input type="submit".*?name="imhuman" value="([^"]+)"').replace(" ","+")
time.sleep(10)
post = "op="+op+"&usr_login="+usr_login+"&id="+id+"&fname="+fname+"&referer="+referer+"&hash="+hashstring+"&imhuman="+imhuman
headers.append(["Referer",page_url])
body = scrapertools.cache_page( form_url , post=post, headers=headers )
#plugintools.log("body="+body)
data = plugintools.find_single_match(body,"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>")
data = unpackerjs.unpackjs(data)
#plugintools.log("data="+data)
urls = plugintools.find_multiple_matches(data,'file:"([^"]+)')
print urls
for entry in urls:
if entry.endswith(".mp4") == True:
media_url = entry
plugintools.play_resolved_url(media_url)
def okru(params):
plugintools.log("[%s %s] Ok.ru %s " % (addonName, addonVersion, repr(params)))
url=params.get("url")
plugintools.log("url= "+url)
hash_url=url.replace("http://ok.ru/video/", "").strip()
plugintools.log("hash= "+hash_url)
url_json='http://ok.ru/dk?cmd=videoPlayerMetadata&mid='+hash_url
plugintools.log("url_json= "+url_json)
r=requests.get(url_json, "utf-8")
data=r.content
js=json.loads(data)
videos=js["videos"]
opts={}
for video in videos:
opts[video["name"]]=video["url"]
if opts["hd"]:
plugintools.play_resolved_url(opts["hd"])
elif opts["sd"]:
plugintools.play_resolved_url(opts["sd"])
elif opts["mobile"]:
plugintools.play_resolved_url(opts["mobile"])
elif opts["lowest"]:
plugintools.play_resolved_url(opts["lowest"])
def vidtome(params):
plugintools.log("[%s %s] Vidto.me %s " % (addonName, addonVersion, repr(params)))
url=params.get("url")
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vidto.me/embed-%s.html' % url
plugintools.log("url= "+url)
r=requests.get(url)
data=r.content
result = re.compile('(eval.*?\)\)\))').findall(data)[-1]
result = unpackerjs.unpackjs(result)
#plugintools.log("result= "+result)
quality=plugintools.find_multiple_matches(result, 'label:"([^"]+)')
url_media=plugintools.find_multiple_matches(result, 'file:"([^"]+)')
plugintools.play_resolved_url(url_media[len(quality)-1])
def playwire(params):
plugintools.log("[%s %s] Playwire en Ourmatch.net %s " % (addonName, addonVersion, repr(params)))
url=params.get("url")
r=requests.get(url)
data=r.content
video_contents=plugintools.find_single_match(data, 'var video_contents = {(.*?)</script>')
items_video=plugintools.find_multiple_matches(video_contents, '{(.*?)}')
for entry in items_video:
url_zeus=plugintools.find_single_match(entry, 'config.playwire.com/(.*?)"')
zeus='http://config.playwire.com/'+url_zeus
type_item=plugintools.find_single_match(entry, "type\':\'([^']+)")
lang=plugintools.find_single_match(entry, "lang:\'([^']+)")
title_item='[COLOR white]'+type_item+' [/COLOR][I][COLOR lightyellow]'+lang+'[/I][/COLOR]'
print zeus,title_item
url_media=[];posters=[]
r=requests.get(zeus)
data=r.content
url_f4m=plugintools.find_single_match(data, 'f4m\":\"(.*?)f4m');url_f4m=url_f4m+'f4m'
poster=plugintools.find_single_match(data, 'poster\":\"(.*?)png');poster=poster+'png'
posters.append(poster)
url_media.append(url_f4m)
url_videos=dict.fromkeys(url_media).keys()
url_poster=dict.fromkeys(posters).keys()
r=requests.get(url_videos[0])
data=r.content
print data
burl=plugintools.find_single_match(data, '<baseURL>([^<]+)</baseURL>')
media_item=plugintools.find_multiple_matches(data, '<media(.*?)"/>')
i=1
while i<=len(media_item):
for item in media_item:
plugintools.log("item= "+item)
media=plugintools.find_single_match(item, 'url="([^"]+)')
bitrate=plugintools.find_single_match(item, 'bitrate="([^"]+)')
url_media=burl+'/'+media
title_fixed=title_item+' [COLOR lightblue][I]('+bitrate+' kbps)[/I][/COLOR]'
plugintools.add_item(action="play", title=title_fixed, url=url_media, thumbnail=url_poster[0], fanart='http://images.huffingtonpost.com/2014-09-12-image1.JPG', folder=False, isPlayable=True)
i=i+1
#http://config.playwire.com/17003/videos/v2/4225978/zeus.json
#https://config.playwire.com/17003/videos/v2/4225978/manifest.f4m
#https://cdn.phoenix.intergi.com/17003/videos/4225978/video-sd.mp4?hosting_id=17003
| gpl-2.0 |
vitaly4uk/django | tests/defer_regress/tests.py | 269 | 12479 | from __future__ import unicode_literals
from operator import attrgetter
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.contrib.sessions.backends.db import SessionStore
from django.db.models import Count
from django.db.models.query_utils import (
DeferredAttribute, deferred_class_factory,
)
from django.test import TestCase, override_settings
from .models import (
Base, Child, Derived, Feature, Item, ItemAndSimpleItem, Leaf, Location,
OneToOneItem, Proxy, ProxyRelated, RelatedItem, Request, ResolveThis,
SimpleItem, SpecialFeature,
)
class DeferRegressionTest(TestCase):
def test_basic(self):
# Deferred fields should really be deferred and not accidentally use
# the field's default value just because they aren't passed to __init__
Item.objects.create(name="first", value=42)
obj = Item.objects.only("name", "other_value").get(name="first")
# Accessing "name" doesn't trigger a new database query. Accessing
# "value" or "text" should.
with self.assertNumQueries(0):
self.assertEqual(obj.name, "first")
self.assertEqual(obj.other_value, 0)
with self.assertNumQueries(1):
self.assertEqual(obj.value, 42)
with self.assertNumQueries(1):
self.assertEqual(obj.text, "xyzzy")
with self.assertNumQueries(0):
self.assertEqual(obj.text, "xyzzy")
# Regression test for #10695. Make sure different instances don't
# inadvertently share data in the deferred descriptor objects.
i = Item.objects.create(name="no I'm first", value=37)
items = Item.objects.only("value").order_by("-value")
self.assertEqual(items[0].name, "first")
self.assertEqual(items[1].name, "no I'm first")
RelatedItem.objects.create(item=i)
r = RelatedItem.objects.defer("item").get()
self.assertEqual(r.item_id, i.id)
self.assertEqual(r.item, i)
# Some further checks for select_related() and inherited model
# behavior (regression for #10710).
c1 = Child.objects.create(name="c1", value=42)
c2 = Child.objects.create(name="c2", value=37)
Leaf.objects.create(name="l1", child=c1, second_child=c2)
obj = Leaf.objects.only("name", "child").select_related()[0]
self.assertEqual(obj.child.name, "c1")
self.assertQuerysetEqual(
Leaf.objects.select_related().only("child__name", "second_child__name"), [
"l1",
],
attrgetter("name")
)
# Models instances with deferred fields should still return the same
# content types as their non-deferred versions (bug #10738).
ctype = ContentType.objects.get_for_model
c1 = ctype(Item.objects.all()[0])
c2 = ctype(Item.objects.defer("name")[0])
c3 = ctype(Item.objects.only("name")[0])
self.assertTrue(c1 is c2 is c3)
# Regression for #10733 - only() can be used on a model with two
# foreign keys.
results = Leaf.objects.only("name", "child", "second_child").select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
results = Leaf.objects.only(
"name", "child", "second_child", "child__name", "second_child__name"
).select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
# Regression for #16409 - make sure defer() and only() work with annotate()
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count('feature')).defer('name')),
list)
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count('feature')).only('name')),
list)
def test_ticket_11936(self):
app_config = apps.get_app_config("defer_regress")
# Regression for #11936 - get_models should not return deferred models
# by default. Run a couple of defer queries so that app registry must
# contain some deferred classes. It might contain a lot more classes
# depending on the order the tests are ran.
list(Item.objects.defer("name"))
list(Child.objects.defer("value"))
klasses = {model.__name__ for model in app_config.get_models()}
self.assertIn("Child", klasses)
self.assertIn("Item", klasses)
self.assertNotIn("Child_Deferred_value", klasses)
self.assertNotIn("Item_Deferred_name", klasses)
self.assertFalse(any(k._deferred for k in app_config.get_models()))
klasses_with_deferred = {model.__name__ for model in app_config.get_models(include_deferred=True)}
self.assertIn("Child", klasses_with_deferred)
self.assertIn("Item", klasses_with_deferred)
self.assertIn("Child_Deferred_value", klasses_with_deferred)
self.assertIn("Item_Deferred_name", klasses_with_deferred)
self.assertTrue(any(k._deferred for k in app_config.get_models(include_deferred=True)))
@override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer')
def test_ticket_12163(self):
# Test for #12163 - Pickling error saving session with unsaved model
# instances.
SESSION_KEY = '2b1189a188b44ad18c35e1baac6ceead'
item = Item()
item._deferred = False
s = SessionStore(SESSION_KEY)
s.clear()
s["item"] = item
s.save()
s = SessionStore(SESSION_KEY)
s.modified = True
s.save()
i2 = s["item"]
self.assertFalse(i2._deferred)
def test_ticket_16409(self):
# Regression for #16409 - make sure defer() and only() work with annotate()
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count('feature')).defer('name')),
list)
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count('feature')).only('name')),
list)
def test_ticket_23270(self):
Derived.objects.create(text="foo", other_text="bar")
with self.assertNumQueries(1):
obj = Base.objects.select_related("derived").defer("text")[0]
self.assertIsInstance(obj.derived, Derived)
self.assertEqual("bar", obj.derived.other_text)
self.assertNotIn("text", obj.__dict__)
self.assertEqual(1, obj.derived.base_ptr_id)
def test_only_and_defer_usage_on_proxy_models(self):
# Regression for #15790 - only() broken for proxy models
proxy = Proxy.objects.create(name="proxy", value=42)
msg = 'QuerySet.only() return bogus results with proxy models'
dp = Proxy.objects.only('other_value').get(pk=proxy.pk)
self.assertEqual(dp.name, proxy.name, msg=msg)
self.assertEqual(dp.value, proxy.value, msg=msg)
# also test things with .defer()
msg = 'QuerySet.defer() return bogus results with proxy models'
dp = Proxy.objects.defer('name', 'text', 'value').get(pk=proxy.pk)
self.assertEqual(dp.name, proxy.name, msg=msg)
self.assertEqual(dp.value, proxy.value, msg=msg)
def test_resolve_columns(self):
ResolveThis.objects.create(num=5.0, name='Foobar')
qs = ResolveThis.objects.defer('num')
self.assertEqual(1, qs.count())
self.assertEqual('Foobar', qs[0].name)
def test_reverse_one_to_one_relations(self):
# Refs #14694. Test reverse relations which are known unique (reverse
# side has o2ofield or unique FK) - the o2o case
item = Item.objects.create(name="first", value=42)
o2o = OneToOneItem.objects.create(item=item, name="second")
self.assertEqual(len(Item.objects.defer('one_to_one_item__name')), 1)
self.assertEqual(len(Item.objects.select_related('one_to_one_item')), 1)
self.assertEqual(len(Item.objects.select_related(
'one_to_one_item').defer('one_to_one_item__name')), 1)
self.assertEqual(len(Item.objects.select_related('one_to_one_item').defer('value')), 1)
# Make sure that `only()` doesn't break when we pass in a unique relation,
# rather than a field on the relation.
self.assertEqual(len(Item.objects.only('one_to_one_item')), 1)
with self.assertNumQueries(1):
i = Item.objects.select_related('one_to_one_item')[0]
self.assertEqual(i.one_to_one_item.pk, o2o.pk)
self.assertEqual(i.one_to_one_item.name, "second")
with self.assertNumQueries(1):
i = Item.objects.select_related('one_to_one_item').defer(
'value', 'one_to_one_item__name')[0]
self.assertEqual(i.one_to_one_item.pk, o2o.pk)
self.assertEqual(i.name, "first")
with self.assertNumQueries(1):
self.assertEqual(i.one_to_one_item.name, "second")
with self.assertNumQueries(1):
self.assertEqual(i.value, 42)
def test_defer_with_select_related(self):
item1 = Item.objects.create(name="first", value=47)
item2 = Item.objects.create(name="second", value=42)
simple = SimpleItem.objects.create(name="simple", value="23")
ItemAndSimpleItem.objects.create(item=item1, simple=simple)
obj = ItemAndSimpleItem.objects.defer('item').select_related('simple').get()
self.assertEqual(obj.item, item1)
self.assertEqual(obj.item_id, item1.id)
obj.item = item2
obj.save()
obj = ItemAndSimpleItem.objects.defer('item').select_related('simple').get()
self.assertEqual(obj.item, item2)
self.assertEqual(obj.item_id, item2.id)
def test_proxy_model_defer_with_select_related(self):
# Regression for #22050
item = Item.objects.create(name="first", value=47)
RelatedItem.objects.create(item=item)
# Defer fields with only()
obj = ProxyRelated.objects.all().select_related().only('item__name')[0]
with self.assertNumQueries(0):
self.assertEqual(obj.item.name, "first")
with self.assertNumQueries(1):
self.assertEqual(obj.item.value, 47)
def test_only_with_select_related(self):
# Test for #17485.
item = SimpleItem.objects.create(name='first', value=47)
feature = Feature.objects.create(item=item)
SpecialFeature.objects.create(feature=feature)
qs = Feature.objects.only('item__name').select_related('item')
self.assertEqual(len(qs), 1)
qs = SpecialFeature.objects.only('feature__item__name').select_related('feature__item')
self.assertEqual(len(qs), 1)
def test_deferred_class_factory(self):
new_class = deferred_class_factory(
Item,
('this_is_some_very_long_attribute_name_so_modelname_truncation_is_triggered',))
self.assertEqual(
new_class.__name__,
'Item_Deferred_this_is_some_very_long_attribute_nac34b1f495507dad6b02e2cb235c875e')
def test_deferred_class_factory_already_deferred(self):
deferred_item1 = deferred_class_factory(Item, ('name',))
deferred_item2 = deferred_class_factory(deferred_item1, ('value',))
self.assertIs(deferred_item2._meta.proxy_for_model, Item)
self.assertNotIsInstance(deferred_item2.__dict__.get('name'), DeferredAttribute)
self.assertIsInstance(deferred_item2.__dict__.get('value'), DeferredAttribute)
def test_deferred_class_factory_no_attrs(self):
deferred_cls = deferred_class_factory(Item, ())
self.assertFalse(deferred_cls._deferred)
class DeferAnnotateSelectRelatedTest(TestCase):
def test_defer_annotate_select_related(self):
location = Location.objects.create()
Request.objects.create(location=location)
self.assertIsInstance(list(Request.objects
.annotate(Count('items')).select_related('profile', 'location')
.only('profile', 'location')), list)
self.assertIsInstance(list(Request.objects
.annotate(Count('items')).select_related('profile', 'location')
.only('profile__profile1', 'location__location1')), list)
self.assertIsInstance(list(Request.objects
.annotate(Count('items')).select_related('profile', 'location')
.defer('request1', 'request2', 'request3', 'request4')), list)
| bsd-3-clause |
LinusU/ansible | lib/ansible/executor/task_queue_manager.py | 52 | 9846 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import multiprocessing
import os
import socket
import sys
import tempfile
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.process.result import ResultProcess
from ansible.executor.stats import AggregateStats
from ansible.playbook.play_context import PlayContext
from ansible.plugins import callback_loader, strategy_loader, module_loader
from ansible.template import Templar
__all__ = ['TaskQueueManager']
class TaskQueueManager:
'''
This class handles the multiprocessing requirements of Ansible by
creating a pool of worker forks, a result handler fork, and a
manager object with shared datastructures/queues for coordinating
work between all processes.
The queue manager is responsible for loading the play strategy plugin,
which dispatches the Play's tasks to hosts.
'''
def __init__(self, inventory, variable_manager, loader, display, options, passwords, stdout_callback=None):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._display = display
self._options = options
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
self._callbacks_loaded = False
self._callback_plugins = []
# make sure the module path (if specified) is parsed and
# added to the module_loader object
if options.module_path is not None:
for path in options.module_path.split(os.pathsep):
module_loader.add_directory(path)
# a special flag to help us exit cleanly
self._terminated = False
# this dictionary is used to keep track of notified handlers
self._notified_handlers = dict()
# dictionaries to keep track of failed/unreachable hosts
self._failed_hosts = dict()
self._unreachable_hosts = dict()
self._final_q = multiprocessing.Queue()
# create the pool of worker threads, based on the number of forks specified
try:
fileno = sys.stdin.fileno()
except ValueError:
fileno = None
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
self._workers = []
for i in range(self._options.forks):
main_q = multiprocessing.Queue()
rslt_q = multiprocessing.Queue()
prc = WorkerProcess(self, main_q, rslt_q, loader)
prc.start()
self._workers.append((prc, main_q, rslt_q))
self._result_prc = ResultProcess(self._final_q, self._workers)
self._result_prc.start()
def _initialize_notified_handlers(self, handlers):
'''
Clears and initializes the shared notified handlers dict with entries
for each handler in the play, which is an empty array that will contain
inventory hostnames for those hosts triggering the handler.
'''
# Zero the dictionary first by removing any entries there.
# Proxied dicts don't support iteritems, so we have to use keys()
for key in self._notified_handlers.keys():
del self._notified_handlers[key]
# FIXME: there is a block compile helper for this...
handler_list = []
for handler_block in handlers:
for handler in handler_block.block:
handler_list.append(handler)
# then initialize it with the handler names from the handler list
for handler in handler_list:
self._notified_handlers[handler.get_name()] = []
def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
for callback_plugin in callback_loader.all(class_only=True):
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
# we only allow one callback of type 'stdout' to be loaded, so check
# the name of the current plugin and type to see if we need to skip
# loading this callback plugin
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
if callback_type == 'stdout':
if callback_name != self._stdout_callback or stdout_callback_loaded:
continue
stdout_callback_loaded = True
elif C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST:
continue
self._callback_plugins.append(callback_plugin(self._display))
else:
self._callback_plugins.append(callback_plugin())
self._callbacks_loaded = True
def run(self, play):
'''
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
'''
if not self._callbacks_loaded:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
# initialize the shared dictionary containing the notified handlers
self._initialize_notified_handlers(new_play.handlers)
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# build the iterator
iterator = PlayIterator(
inventory=self._inventory,
play=new_play,
play_context=play_context,
variable_manager=self._variable_manager,
all_vars=all_vars,
)
# and run the play using the strategy
return strategy.run(iterator, play_context)
def cleanup(self):
self._display.debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._result_prc.terminate()
for (worker_prc, main_q, rslt_q) in self._workers:
rslt_q.close()
main_q.close()
worker_prc.terminate()
def clear_failed_hosts(self):
self._failed_hosts = dict()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_notified_handlers(self):
return self._notified_handlers
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
methods = [
getattr(callback_plugin, method_name, None),
getattr(callback_plugin, 'v2_on_any', None)
]
for method in methods:
if method is not None:
try:
method(*args, **kwargs)
except Exception as e:
self._display.warning('Error when using %s: %s' % (method, str(e)))
| gpl-3.0 |
forseti-security/forseti-security | tests/services/api_tests/inventory_test.py | 1 | 6154 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the Forseti Server inventory service."""
import time
import unittest
from tests.services.api_tests.api_tester import ApiTestRunner
from tests.services.inventory import gcp_api_mocks
from tests.services.util.db import create_test_engine
from tests.services.util.mock import MockServerConfig
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.common.util.threadpool import ThreadPool
from google.cloud.forseti.services import db
from google.cloud.forseti.services.base.config import InventoryConfig
from google.cloud.forseti.services.client import ClientComposition
from google.cloud.forseti.services.dao import ModelManager
from google.cloud.forseti.services.explain.service import GrpcExplainerFactory
from google.cloud.forseti.services.inventory.service import GrpcInventoryFactory
from google.cloud.forseti.services.inventory.storage import Storage
class TestServiceConfig(MockServerConfig):
"""ServiceConfig Stub."""
def __init__(self):
self.engine = create_test_engine()
self.model_manager = ModelManager(self.engine)
self.sessionmaker = db.create_scoped_sessionmaker(self.engine)
self.workers = ThreadPool(10)
self.inventory_config = InventoryConfig(gcp_api_mocks.ORGANIZATION_ID,
'',
{},
'',
{})
self.inventory_config.set_service_config(self)
def run_in_background(self, func):
"""Stub."""
self.workers.add_func(func)
def get_engine(self):
return self.engine
def scoped_session(self):
return self.sessionmaker()
def client(self):
return ClientComposition(self.endpoint)
def get_storage_class(self):
return Storage
def get_inventory_config(self):
return self.inventory_config
def create_tester():
"""Create API test runner."""
return ApiTestRunner(
TestServiceConfig(),
[GrpcExplainerFactory,
GrpcInventoryFactory])
class ApiTest(ForsetiTestCase):
"""Api Test."""
def test_basic(self):
"""Test: Create inventory, foreground & no import."""
def test(client):
"""API test callback."""
progress = None
inventory_index = None
for progress in client.inventory.create(background=False,
import_as=''):
continue
self.assertGreater(len([x for x in client.inventory.list()]),
0,
'Assert list not empty')
for inventory_index in client.inventory.list():
self.assertTrue(inventory_index.id == progress.id)
self.assertEqual(inventory_index.id,
(client.inventory.get(inventory_index.id)
.inventory.id))
with gcp_api_mocks.mock_gcp():
setup = create_tester()
setup.run(test)
def test_basic_background(self):
"""Test: Create inventory, background & no import."""
def test(client):
"""API test callback."""
progress = None
inventory_index = None
for progress in client.inventory.create(background=True,
import_as=''):
continue
while True:
# Give background running time to complete.
time.sleep(5)
if [x for x in client.inventory.list()]:
break
self.assertGreater(len([x for x in client.inventory.list()]),
0,
'Assert list not empty')
for inventory_index in client.inventory.list():
self.assertTrue(inventory_index.id == progress.id)
self.assertEqual(inventory_index.id,
(client.inventory.get(inventory_index.id)
.inventory.id))
self.assertEqual(inventory_index.id,
(client.inventory.delete(inventory_index.id)
.inventory.id))
self.assertEqual([], [i for i in client.inventory.list()])
with gcp_api_mocks.mock_gcp():
setup = create_tester()
setup.run(test)
def test_error(self):
"""Test: Create inventory, foreground, exception raised."""
def test(client):
"""API test callback."""
progress = None
inventory_index = None
for progress in client.inventory.create(background=False,
import_as=''):
continue
for inventory_index in client.inventory.list():
self.assertTrue(inventory_index.id == progress.id)
result = client.inventory.get(inventory_index.id).inventory
# Ensure inventory failure.
self.assertEqual('FAILURE', result.status)
self.assertIn('Boom!', result.errors)
with unittest.mock.patch.object(Storage, 'write') as mock_write:
mock_write.side_effect = Exception('Boom!')
with gcp_api_mocks.mock_gcp():
setup = create_tester()
setup.run(test)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
cga-harvard/worldmap | wm_extra/accounts/views.py | 1 | 11290 | from __future__ import unicode_literals
from django.contrib.auth.models import Group
from django.http import Http404, HttpResponseForbidden
from django.shortcuts import redirect, get_object_or_404
from django.utils.http import base36_to_int, int_to_base36
from django.views.generic.base import TemplateResponseMixin, View
from django.views.generic.edit import FormView, CreateView
from django.core.urlresolvers import reverse
from django.db import transaction
from forms import SignupForm
from django.views.generic.edit import FormView
from django.contrib import auth, messages
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.contrib.auth import get_user_model
from account.models import SignupCode, EmailAddress, \
EmailConfirmation, Account, AccountDeletion
from account import signals
from account.utils import default_redirect
from pinax.notifications import models as notification
class SignupView(FormView):
template_name = "account/signup.html"
template_name_ajax = "account/ajax/signup.html"
template_name_email_confirmation_sent = "account/email_confirmation_sent.html"
template_name_email_confirmation_sent_ajax = "account/ajax/email_confirmation_sent.html"
template_name_admin_approval_sent = "account/admin_approval_sent.html"
template_name_admin_approval_sent_ajax = "account/ajax/admin_approval_sent.html"
template_name_signup_closed = "account/signup_closed.html"
template_name_signup_closed_ajax = "account/ajax/signup_closed.html"
form_class = SignupForm
form_kwargs = {}
redirect_field_name = "next"
identifier_field = "username"
messages = {
"email_confirmation_sent": {
"level": messages.INFO,
"text": _("Confirmation email sent to {email}.")
},
"invalid_signup_code": {
"level": messages.WARNING,
"text": _("The code {code} is invalid.")
}
}
def __init__(self, *args, **kwargs):
self.created_user = None
kwargs["signup_code"] = None
super(SignupView, self).__init__(*args, **kwargs)
def get(self, *args, **kwargs):
if self.request.user.is_authenticated():
return redirect(default_redirect(self.request, settings.ACCOUNT_LOGIN_REDIRECT_URL))
if not self.is_open():
return self.closed()
return super(SignupView, self).get(*args, **kwargs)
def post(self, *args, **kwargs):
if not self.is_open():
return self.closed()
return super(SignupView, self).post(*args, **kwargs)
def get_initial(self):
initial = super(SignupView, self).get_initial()
if self.signup_code:
initial["code"] = self.signup_code.code
if self.signup_code.email:
initial["email"] = self.signup_code.email
return initial
def get_template_names(self):
if self.request.is_ajax():
return [self.template_name_ajax]
else:
return [self.template_name]
def get_context_data(self, **kwargs):
ctx = kwargs
redirect_field_name = self.get_redirect_field_name()
ctx.update({
"redirect_field_name": redirect_field_name,
"redirect_field_value": self.request.REQUEST.get(redirect_field_name, ""),
})
return ctx
def get_form_kwargs(self):
kwargs = super(SignupView, self).get_form_kwargs()
kwargs.update(self.form_kwargs)
return kwargs
def form_invalid(self, form):
signals.user_sign_up_attempt.send(
sender=SignupForm,
username=form.data.get("username"),
email=form.data.get("email"),
result=form.is_valid()
)
return super(SignupView, self).form_invalid(form)
@transaction.atomic
def form_valid(self, form):
self.created_user = self.create_user(form, commit=False)
# prevent User post_save signal from creating an Account instance
# we want to handle that ourself.
self.created_user._disable_account_creation = True
self.created_user.save()
email_address = self.create_email_address(form)
if form.cleaned_data['is_org_member']:
harvard_group=Group.objects.get(name='Harvard')
self.created_user.groups.add(harvard_group)
if settings.ACCOUNT_EMAIL_CONFIRMATION_REQUIRED and not email_address.verified:
self.created_user.is_active = False
self.created_user.save()
if settings.ACCOUNT_APPROVAL_REQUIRED:
self.created_user.is_active = False
self.created_user.save()
self.create_account(form)
self.after_signup(form)
if settings.ACCOUNT_APPROVAL_REQUIRED:
# Notify site admins about the user wanting activation
staff = auth.get_user_model().objects.filter(is_staff=True)
notification.send(staff, "account_approve", {"from_user": self.created_user})
return self.account_approval_required_response()
if settings.ACCOUNT_EMAIL_CONFIRMATION_EMAIL and not email_address.verified:
self.send_email_confirmation(email_address)
if settings.ACCOUNT_EMAIL_CONFIRMATION_REQUIRED and not email_address.verified:
return self.email_confirmation_required_response()
else:
show_message = [
settings.ACCOUNT_EMAIL_CONFIRMATION_EMAIL,
self.messages.get("email_confirmation_sent"),
not email_address.verified
]
if all(show_message):
messages.add_message(
self.request,
self.messages["email_confirmation_sent"]["level"],
self.messages["email_confirmation_sent"]["text"].format(**{
"email": form.cleaned_data["email"]
})
)
# attach form to self to maintain compatibility with login_user
# API. this should only be relied on by d-u-a and it is not a stable
# API for site developers.
self.form = form
# Use autologin only when the account is active.
if self.created_user.is_active:
self.login_user()
return redirect(self.get_success_url())
def get_success_url(self, fallback_url=None, **kwargs):
if fallback_url is None:
fallback_url = settings.ACCOUNT_SIGNUP_REDIRECT_URL
kwargs.setdefault("redirect_field_name", self.get_redirect_field_name())
return default_redirect(self.request, fallback_url, **kwargs)
def get_redirect_field_name(self):
return self.redirect_field_name
def create_user(self, form, commit=True, **kwargs):
user = get_user_model()(**kwargs)
code = form.cleaned_data['code']
user.username = form.cleaned_data["username"].strip()
user.email = form.cleaned_data["email"].strip()
password = form.cleaned_data.get("password")
if password:
user.set_password(password)
else:
user.set_unusable_password()
if commit:
user.save()
return user
def create_account(self, form):
return Account.create(request=self.request, user=self.created_user, create_email=False)
def generate_username(self, form):
raise NotImplementedError("Unable to generate username by default. "
"Override SignupView.generate_username in a subclass.")
def create_email_address(self, form, **kwargs):
kwargs.setdefault("primary", True)
kwargs.setdefault("verified", False)
if self.signup_code:
self.signup_code.use(self.created_user)
kwargs["verified"] = self.signup_code.email and self.created_user.email == self.signup_code.email
return EmailAddress.objects.add_email(self.created_user, self.created_user.email, **kwargs)
def send_email_confirmation(self, email_address):
email_address.send_confirmation(site=get_current_site(self.request))
def after_signup(self, form):
signals.user_signed_up.send(sender=SignupForm, user=self.created_user, form=form)
def login_user(self):
user = self.created_user
if settings.ACCOUNT_USE_AUTH_AUTHENTICATE:
# call auth.authenticate to ensure we set the correct backend for
# future look ups using auth.get_user().
user = auth.authenticate(**self.user_credentials())
else:
# set auth backend to ModelBackend, but this may not be used by
# everyone. this code path is deprecated and will be removed in
# favor of using auth.authenticate above.
user.backend = "django.contrib.auth.backends.ModelBackend"
auth.login(self.request, user)
self.request.session.set_expiry(0)
def user_credentials(self):
return hookset.get_user_credentials(self.form, self.identifier_field)
def is_open(self):
code = self.request.REQUEST.get("code")
if code:
try:
self.signup_code = SignupCode.check_code(code)
except SignupCode.InvalidCode:
if self.messages.get("invalid_signup_code"):
messages.add_message(
self.request,
self.messages["invalid_signup_code"]["level"],
self.messages["invalid_signup_code"]["text"].format(**{
"code": code
})
)
return settings.ACCOUNT_OPEN_SIGNUP
else:
return True
else:
return settings.ACCOUNT_OPEN_SIGNUP
def email_confirmation_required_response(self):
if self.request.is_ajax():
template_name = self.template_name_email_confirmation_sent_ajax
else:
template_name = self.template_name_email_confirmation_sent
response_kwargs = {
"request": self.request,
"template": template_name,
"context": {
"email": self.created_user.email,
"success_url": self.get_success_url(),
}
}
return self.response_class(**response_kwargs)
def account_approval_required_response(self):
if self.request.is_ajax():
template_name = self.template_name_admin_approval_ajax
else:
template_name = self.template_name_admin_approval_sent
response_kwargs = {
"request": self.request,
"template": template_name,
"context": {
"email": self.created_user.email,
"success_url": self.get_success_url(),
}
}
return self.response_class(**response_kwargs)
def closed(self):
if self.request.is_ajax():
template_name = self.template_name_signup_closed_ajax
else:
template_name = self.template_name_signup_closed
response_kwargs = {
"request": self.request,
"template": template_name,
}
return self.response_class(**response_kwargs)
| gpl-3.0 |
sanethd/device-cloud-appliances | DigitalDisplay/modules/colorstreamhandler.py | 5 | 3827 | #!/usr/bin/env python
# encoding: utf-8
import logging
# now we patch Python code to add color support to logging.StreamHandler
def add_coloring_to_emit_windows(fn):
# add methods we need to the class
def _out_handle(self):
import ctypes
return ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
out_handle = property(_out_handle)
def _set_color(self, code):
import ctypes
# Constants from the Windows API
self.STD_OUTPUT_HANDLE = -11
hdl = ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
ctypes.windll.kernel32.SetConsoleTextAttribute(hdl, code)
setattr(logging.StreamHandler, '_set_color', _set_color)
def new(*args):
FOREGROUND_BLUE = 0x0001 # text color contains blue.
FOREGROUND_GREEN = 0x0002 # text color contains green.
FOREGROUND_RED = 0x0004 # text color contains red.
FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
FOREGROUND_WHITE = FOREGROUND_BLUE|FOREGROUND_GREEN |FOREGROUND_RED
# winbase.h
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h
FOREGROUND_BLACK = 0x0000
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_CYAN = 0x0003
FOREGROUND_RED = 0x0004
FOREGROUND_MAGENTA = 0x0005
FOREGROUND_YELLOW = 0x0006
FOREGROUND_GREY = 0x0007
FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
BACKGROUND_BLACK = 0x0000
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_CYAN = 0x0030
BACKGROUND_RED = 0x0040
BACKGROUND_MAGENTA = 0x0050
BACKGROUND_YELLOW = 0x0060
BACKGROUND_GREY = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
levelno = args[1].levelno
if(levelno>=50):
color = BACKGROUND_YELLOW | FOREGROUND_RED | FOREGROUND_INTENSITY | BACKGROUND_INTENSITY
elif(levelno>=40):
color = FOREGROUND_RED | FOREGROUND_INTENSITY
elif(levelno>=30):
color = FOREGROUND_YELLOW | FOREGROUND_INTENSITY
elif(levelno>=20):
color = FOREGROUND_BLUE
elif(levelno>=10):
color = FOREGROUND_GREEN
else:
color = FOREGROUND_WHITE
args[0]._set_color(color)
ret = fn(*args)
args[0]._set_color( FOREGROUND_WHITE )
#print "after"
return ret
return new
def add_coloring_to_emit_ansi(fn):
# add methods we need to the class
def new(*args):
levelno = args[1].levelno
if(levelno>=50):
color = '\x1b[31m' # red
elif(levelno>=40):
color = '\x1b[31m' # red
elif(levelno>=30):
color = '\x1b[33m' # yellow
elif(levelno>=20):
color = '\x1b[34m' # blue
elif(levelno>=10):
color = '\x1b[32m' # green
else:
color = '\x1b[0m' # normal
args[1].msg = color + str(args[1].msg) + '\x1b[0m' # normal
#print "after"
return fn(*args)
return new
import platform
if platform.system()=='Windows':
# Windows does not support ANSI escapes and we are using API calls to set the console color
logging.StreamHandler.emit = add_coloring_to_emit_windows(logging.StreamHandler.emit)
else:
# all non-Windows platforms are supporting ANSI escapes so we use them
logging.StreamHandler.emit = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
#log = logging.getLogger()
#log.addFilter(log_filter())
#//hdlr = logging.StreamHandler()
#//hdlr.setFormatter(formatter()) | apache-2.0 |
jswrenn/xtreemfs | cpp/thirdparty/protobuf-2.5.0/python/google/protobuf/internal/type_checkers.py | 527 | 12163 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides type checking routines.
This module defines type checking utilities in the forms of dictionaries:
VALUE_CHECKERS: A dictionary of field types and a value validation object.
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
function.
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
function.
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
coresponding wire types.
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
function.
"""
__author__ = '[email protected] (Will Robinson)'
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def GetTypeChecker(cpp_type, field_type):
"""Returns a type checker for a message field of the specified types.
Args:
cpp_type: C++ type of the field (see descriptor.py).
field_type: Protocol message field type (see descriptor.py).
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
"""
if (cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field_type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
return _VALUE_CHECKERS[cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
# protect against malicious clients here, just people accidentally shooting
# themselves in the foot in obvious ways.
class TypeChecker(object):
"""Type checker used to catch type errors as early as possible
when the client is setting scalar fields in protocol messages.
"""
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
# IntValueChecker and its subclasses perform integer type-checks
# and bounds-checks.
class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (int, long)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int, long)))
raise TypeError(message)
if not self._MIN <= proposed_value <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
class UnicodeValueChecker(object):
"""Checker used for string fields."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (str, unicode)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (str, unicode)))
raise TypeError(message)
# If the value is of type 'str' make sure that it is in 7-bit ASCII
# encoding.
if isinstance(proposed_value, str):
try:
unicode(proposed_value, 'ascii')
except UnicodeDecodeError:
raise ValueError('%.1024r has type str, but isn\'t in 7-bit ASCII '
'encoding. Non-ASCII strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
# Type-checkers for all scalar CPPTYPEs.
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_FLOAT: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int),
_FieldDescriptor.CPPTYPE_ENUM: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_STRING: TypeChecker(str),
}
# Map from field type to a function F, such that F(field_num, value)
# gives the total byte size for a value of the given type. This
# byte size includes tag information and any other additional space
# associated with serializing "value".
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
# Maps from field types to encoder constructors.
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
# Maps from field types to sizer constructors.
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
# Maps from field type to a decoder constructor.
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
# Maps from field type to expected wiretype.
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
| bsd-3-clause |
ktritz/vispy | vispy/scene/cameras/turntable.py | 20 | 5029 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from .perspective import Base3DRotationCamera
class TurntableCamera(Base3DRotationCamera):
""" 3D camera class that orbits around a center point while
maintaining a view on a center point.
For this camera, the ``scale_factor`` indicates the zoom level, and
the ``center`` indicates the position to put at the center of the
view.
Parameters
----------
fov : float
Field of view. Zero (default) means orthographic projection.
elevation : float
Elevation angle in degrees. Positive angles place the camera
above the cente point, negative angles place the camera below
the center point.
azimuth : float
Azimuth angle in degrees. Zero degrees places the camera on the
positive x-axis, pointing in the negative x direction.
roll : float
Roll angle in degrees
distance : float | None
The distance of the camera from the rotation point (only makes sense
if fov > 0). If None (default) the distance is determined from the
scale_factor and fov.
**kwargs : dict
Keyword arguments to pass to `BaseCamera`.
Notes
-----
Interaction:
* LMB: orbits the view around its center point.
* RMB or scroll: change scale_factor (i.e. zoom level)
* SHIFT + LMB: translate the center point
* SHIFT + RMB: change FOV
"""
_state_props = Base3DRotationCamera._state_props + ('elevation',
'azimuth', 'roll')
def __init__(self, fov=0.0, elevation=30.0, azimuth=30.0, roll=0.0,
distance=None, **kwargs):
super(TurntableCamera, self).__init__(fov=fov, **kwargs)
# Set camera attributes
self.azimuth = azimuth
self.elevation = elevation
self.roll = roll # interaction not implemented yet
self.distance = distance # None means auto-distance
@property
def elevation(self):
""" The angle of the camera in degrees above the horizontal (x, z)
plane.
"""
return self._elevation
@elevation.setter
def elevation(self, elev):
elev = float(elev)
self._elevation = min(90, max(-90, elev))
self.view_changed()
@property
def azimuth(self):
""" The angle of the camera in degrees around the y axis. An angle of
0 places the camera within the (y, z) plane.
"""
return self._azimuth
@azimuth.setter
def azimuth(self, azim):
azim = float(azim)
while azim < -180:
azim += 360
while azim > 180:
azim -= 360
self._azimuth = azim
self.view_changed()
@property
def roll(self):
""" The angle of the camera in degrees around the z axis. An angle of
0 places puts the camera upright.
"""
return self._roll
@roll.setter
def roll(self, roll):
roll = float(roll)
while roll < -180:
roll += 360
while roll > 180:
roll -= 360
self._roll = roll
self.view_changed()
def orbit(self, azim, elev):
""" Orbits the camera around the center position.
Parameters
----------
azim : float
Angle in degrees to rotate horizontally around the center point.
elev : float
Angle in degrees to rotate vertically around the center point.
"""
self.azimuth += azim
self.elevation = np.clip(self.elevation + elev, -90, 90)
self.view_changed()
def _update_rotation(self, event):
"""Update rotation parmeters based on mouse movement"""
p1 = event.mouse_event.press_event.pos
p2 = event.mouse_event.pos
if self._event_value is None:
self._event_value = self.azimuth, self.elevation
self.azimuth = self._event_value[0] - (p2 - p1)[0] * 0.5
self.elevation = self._event_value[1] + (p2 - p1)[1] * 0.5
def _rotate_tr(self):
"""Rotate the transformation matrix based on camera parameters"""
up, forward, right = self._get_dim_vectors()
self.transform.rotate(self.elevation, -right)
self.transform.rotate(self.azimuth, up)
def _dist_to_trans(self, dist):
"""Convert mouse x, y movement into x, y, z translations"""
rae = np.array([self.roll, self.azimuth, self.elevation]) * np.pi / 180
sro, saz, sel = np.sin(rae)
cro, caz, cel = np.cos(rae)
dx = (+ dist[0] * (cro * caz + sro * sel * saz)
+ dist[1] * (sro * caz - cro * sel * saz))
dy = (+ dist[0] * (cro * saz - sro * sel * caz)
+ dist[1] * (sro * saz + cro * sel * caz))
dz = (- dist[0] * sro * cel + dist[1] * cro * cel)
return dx, dy, dz
| bsd-3-clause |
smunaut/gnuradio | gr-utils/python/modtool/gr-newmod/docs/doxygen/doxyxml/doxyindex.py | 223 | 6551 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Classes providing more user-friendly interfaces to the doxygen xml
docs than the generated classes provide.
"""
import os
from generated import index
from base import Base
from text import description
class DoxyIndex(Base):
"""
Parses a doxygen xml directory.
"""
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyIndex, self)._parse()
self._root = index.parse(os.path.join(self._xml_path, 'index.xml'))
for mem in self._root.compound:
converted = self.convert_mem(mem)
# For files we want the contents to be accessible directly
# from the parent rather than having to go through the file
# object.
if self.get_cls(mem) == DoxyFile:
if mem.name.endswith('.h'):
self._members += converted.members()
self._members.append(converted)
else:
self._members.append(converted)
def generate_swig_doc_i(self):
"""
%feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
Wraps the C++: gr_align_on_samplenumbers_ss::align_state";
"""
pass
class DoxyCompMem(Base):
kind = None
def __init__(self, *args, **kwargs):
super(DoxyCompMem, self).__init__(*args, **kwargs)
@classmethod
def can_parse(cls, obj):
return obj.kind == cls.kind
def set_descriptions(self, parse_data):
bd = description(getattr(parse_data, 'briefdescription', None))
dd = description(getattr(parse_data, 'detaileddescription', None))
self._data['brief_description'] = bd
self._data['detailed_description'] = dd
class DoxyCompound(DoxyCompMem):
pass
class DoxyMember(DoxyCompMem):
pass
class DoxyFunction(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'function'
def _parse(self):
if self._parsed:
return
super(DoxyFunction, self)._parse()
self.set_descriptions(self._parse_data)
self._data['params'] = []
prms = self._parse_data.param
for prm in prms:
self._data['params'].append(DoxyParam(prm))
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
Base.mem_classes.append(DoxyFunction)
class DoxyParam(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyParam, self)._parse()
self.set_descriptions(self._parse_data)
self._data['declname'] = self._parse_data.declname
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
declname = property(lambda self: self.data()['declname'])
class DoxyClass(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'class'
def _parse(self):
if self._parsed:
return
super(DoxyClass, self)._parse()
self.retrieve_data()
if self._error:
return
self.set_descriptions(self._retrieved_data.compounddef)
# Sectiondef.kind tells about whether private or public.
# We just ignore this for now.
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
Base.mem_classes.append(DoxyClass)
class DoxyFile(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'file'
def _parse(self):
if self._parsed:
return
super(DoxyFile, self)._parse()
self.retrieve_data()
self.set_descriptions(self._retrieved_data.compounddef)
if self._error:
return
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
Base.mem_classes.append(DoxyFile)
class DoxyNamespace(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'namespace'
Base.mem_classes.append(DoxyNamespace)
class DoxyGroup(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'group'
def _parse(self):
if self._parsed:
return
super(DoxyGroup, self)._parse()
self.retrieve_data()
if self._error:
return
cdef = self._retrieved_data.compounddef
self._data['title'] = description(cdef.title)
# Process inner groups
grps = cdef.innergroup
for grp in grps:
converted = DoxyGroup.from_refid(grp.refid, top=self.top)
self._members.append(converted)
# Process inner classes
klasses = cdef.innerclass
for kls in klasses:
converted = DoxyClass.from_refid(kls.refid, top=self.top)
self._members.append(converted)
# Process normal members
self.process_memberdefs()
title = property(lambda self: self.data()['title'])
Base.mem_classes.append(DoxyGroup)
class DoxyFriend(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'friend'
Base.mem_classes.append(DoxyFriend)
class DoxyOther(Base):
__module__ = "gnuradio.utils.doxyxml"
kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum', 'dir', 'page'])
@classmethod
def can_parse(cls, obj):
return obj.kind in cls.kinds
Base.mem_classes.append(DoxyOther)
| gpl-3.0 |
hsaputra/tensorflow | tensorflow/python/kernel_tests/sparse_add_op_test.py | 80 | 9362 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseAdd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import timeit
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class SparseAddTest(test.TestCase):
def _randomTensor(self, size, np_dtype, sparse=True):
n, m = size
x = np.random.randn(n, m).astype(np_dtype)
return _sparsify(x) if sparse else x
def _SparseTensorValue_3x3(self, negate=False):
# [ 1]
# [2 ]
# [3 4]
# ...or its cwise negation, if `negate`
ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]])
val = np.array([1, 2, 3, 4])
if negate:
val = -np.array([1, 2, 3, 4])
shape = np.array([3, 3])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x3(self, negate=False):
return sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_3x3(negate))
def _SparseTensor_3x3_v2(self):
# [ 1]
# [-1.9 ]
# [ 3 -4.2]
ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]])
val = np.array([1, -1.9, 3, -4.2])
shape = np.array([3, 3])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def testAddSelf(self):
with self.test_session(use_gpu=False) as sess:
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
for sp_b in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
sp_sum = sparse_ops.sparse_add(sp_a, sp_b)
self.assertAllEqual((3, 3), sp_sum.get_shape())
sum_out = sess.run(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [1, 0], [2, 0], [2, 1]])
self.assertAllEqual(sum_out.values, [2, 4, 6, 8])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testAddSelfAndNegation(self):
with self.test_session(use_gpu=False) as sess:
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x3(negate=True)
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, 0.1)
sum_out = sess.run(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, np.empty([0, 2]))
self.assertAllEqual(sum_out.values, [])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testSmallValuesShouldVanish(self):
with self.test_session(use_gpu=False) as sess:
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x3_v2()
# sum:
# [ 2]
# [.1 ]
# [ 6 -.2]
# two values should vanish: |.1| < .21, and |-.2| < .21
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.21)
sum_out = sess.run(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0]])
self.assertAllEqual(sum_out.values, [2, 6])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
# only .1 vanishes
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.11)
sum_out = sess.run(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0], [2, 1]])
self.assertAllClose(sum_out.values, [2, 6, -.2])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testGradients(self):
np.random.seed(1618) # Make it reproducible.
with self.test_session(use_gpu=False):
for n in [10, 31]:
for m in [4, 17]:
sp_a, nnz_a = self._randomTensor([n, m], np.float32)
sp_b, nnz_b = self._randomTensor([n, m], np.float32)
sp_sum = sparse_ops.sparse_add(sp_a, sp_b)
nnz_sum = len(sp_sum.values.eval())
err = gradient_checker.compute_gradient_error(
[sp_a.values, sp_b.values], [(nnz_a,), (nnz_b,)], sp_sum.values,
(nnz_sum,))
self.assertLess(err, 1e-3)
def testAddSparseDense(self):
np.random.seed(1618) # Make it reproducible.
n, m = np.random.randint(30, size=2)
for dtype in [np.float32, np.float64, np.int64, np.complex64]:
for index_dtype in [np.int32, np.int64]:
rand_vals_np = np.random.randn(n, m).astype(dtype)
dense_np = np.random.randn(n, m).astype(dtype)
with self.test_session(use_gpu=False):
sparse, unused_nnz = _sparsify(rand_vals_np, index_dtype=index_dtype)
s = sparse_ops.sparse_add(sparse,
constant_op.constant(dense_np)).eval()
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
# check commutativity
s = sparse_ops.sparse_add(constant_op.constant(dense_np),
sparse).eval()
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
def testSparseTensorDenseAddGradients(self):
np.random.seed(1618) # Make it reproducible.
n, m = np.random.randint(30, size=2)
rand_vals_np = np.random.randn(n, m).astype(np.float32)
dense_np = np.random.randn(n, m).astype(np.float32)
with self.test_session(use_gpu=False):
sparse, nnz = _sparsify(rand_vals_np)
dense = constant_op.constant(dense_np, dtype=dtypes.float32)
s = sparse_ops.sparse_add(sparse, dense)
err = gradient_checker.compute_gradient_error([sparse.values, dense],
[(nnz,), (n, m)], s, (n, m))
self.assertLess(err, 1e-3)
def testInvalidSparseTensor(self):
with self.test_session(use_gpu=False) as sess:
shape = [2, 2]
val = [0]
dense = constant_op.constant(np.zeros(shape, dtype=np.int32))
for bad_idx in [
[[-1, 0]], # -1 is invalid.
[[1, 3]], # ...so is 3.
]:
sparse = sparse_tensor.SparseTensorValue(bad_idx, val, shape)
s = sparse_ops.sparse_add(sparse, dense)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"invalid index"):
sess.run(s)
######################## Benchmarking code
def _s2d_add_vs_sparse_add(sparsity, n, m, num_iters=50):
np.random.seed(1618)
with session.Session(graph=ops.Graph()) as sess:
sp_vals = np.random.rand(n, m).astype(np.float32)
sp_t, unused_nnz = _sparsify(sp_vals, thresh=sparsity, index_dtype=np.int32)
vals = np.random.rand(n, m).astype(np.float32)
s2d = math_ops.add(
sparse_ops.sparse_tensor_to_dense(sp_t), constant_op.constant(vals))
sa = sparse_ops.sparse_add(sp_t, constant_op.constant(vals))
timeit.timeit(lambda: sess.run(s2d), number=3)
timeit.timeit(lambda: sess.run(sa), number=3)
s2d_total = timeit.timeit(lambda: sess.run(s2d), number=num_iters)
sa_total = timeit.timeit(lambda: sess.run(sa), number=num_iters)
# per-iter latency; secs to millis
return s2d_total * 1e3 / num_iters, sa_total * 1e3 / num_iters
class SparseAddBenchmark(test.Benchmark):
def benchmarkSparseAddDense(self):
print("SparseAddDense: add with sparse_to_dense vs. sparse_add")
print("%nnz \t n \t m \t millis(s2d) \t millis(sparse_add) \t speedup")
for sparsity in [0.99, 0.5, 0.01]:
for n in [1, 256, 50000]:
for m in [100, 1000]:
s2d_dt, sa_dt = _s2d_add_vs_sparse_add(sparsity, n, m)
print("%.2f \t %d \t %d \t %.4f \t %.4f \t %.2f" % (sparsity, n, m,
s2d_dt, sa_dt,
s2d_dt / sa_dt))
if __name__ == "__main__":
test.main()
| apache-2.0 |
farodin91/servo | tests/wpt/update/tree.py | 167 | 6560 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
import tempfile
from wptrunner import update as wptupdate
from wptrunner.update.tree import Commit, CommitMessage, get_unique_name
class HgTree(wptupdate.tree.HgTree):
def __init__(self, *args, **kwargs):
self.commit_cls = kwargs.pop("commit_cls", Commit)
wptupdate.tree.HgTree.__init__(self, *args, **kwargs)
# TODO: The extra methods for upstreaming patches from a
# hg checkout
class GitTree(wptupdate.tree.GitTree):
def __init__(self, *args, **kwargs):
"""Extension of the basic GitTree with extra methods for
transfering patches"""
commit_cls = kwargs.pop("commit_cls", Commit)
wptupdate.tree.GitTree.__init__(self, *args, **kwargs)
self.commit_cls = commit_cls
def create_branch(self, name, ref=None):
"""Create a named branch,
:param name: String representing the branch name.
:param ref: None to use current HEAD or rev that the branch should point to"""
args = []
if ref is not None:
if hasattr(ref, "sha1"):
ref = ref.sha1
args.append(ref)
self.git("branch", name, *args)
def commits_by_message(self, message, path=None):
"""List of commits with messages containing a given string.
:param message: The string that must be contained in the message.
:param path: Path to a file or directory the commit touches
"""
args = ["--pretty=format:%H", "--reverse", "-z", "--grep=%s" % message]
if path is not None:
args.append("--")
args.append(path)
data = self.git("log", *args)
return [self.commit_cls(self, sha1) for sha1 in data.split("\0")]
def log(self, base_commit=None, path=None):
"""List commits touching a certian path from a given base commit.
:base_param commit: Commit object for the base commit from which to log
:param path: Path that the commits must touch
"""
args = ["--pretty=format:%H", "--reverse", "-z"]
if base_commit is not None:
args.append("%s.." % base_commit.sha1)
if path is not None:
args.append("--")
args.append(path)
data = self.git("log", *args)
return [self.commit_cls(self, sha1) for sha1 in data.split("\0") if sha1]
def import_patch(self, patch):
"""Import a patch file into the tree and commit it
:param patch: a Patch object containing the patch to import
"""
with tempfile.NamedTemporaryFile() as f:
f.write(patch.diff)
f.flush()
f.seek(0)
self.git("apply", "--index", f.name)
self.git("commit", "-m", patch.message.text, "--author=%s" % patch.full_author)
def rebase(self, ref, continue_rebase=False):
"""Rebase the current branch onto another commit.
:param ref: A Commit object for the commit to rebase onto
:param continue_rebase: Continue an in-progress rebase"""
if continue_rebase:
args = ["--continue"]
else:
if hasattr(ref, "sha1"):
ref = ref.sha1
args = [ref]
self.git("rebase", *args)
def push(self, remote, local_ref, remote_ref, force=False):
"""Push local changes to a remote.
:param remote: URL of the remote to push to
:param local_ref: Local branch to push
:param remote_ref: Name of the remote branch to push to
:param force: Do a force push
"""
args = []
if force:
args.append("-f")
args.extend([remote, "%s:%s" % (local_ref, remote_ref)])
self.git("push", *args)
def unique_branch_name(self, prefix):
"""Get an unused branch name in the local tree
:param prefix: Prefix to use at the start of the branch name"""
branches = [ref[len("refs/heads/"):] for sha1, ref in self.list_refs()
if ref.startswith("refs/heads/")]
return get_unique_name(branches, prefix)
class Patch(object):
def __init__(self, author, email, message, diff):
self.author = author
self.email = email
if isinstance(message, CommitMessage):
self.message = message
else:
self.message = GeckoCommitMessage(message)
self.diff = diff
def __repr__(self):
return "<Patch (%s)>" % self.message.full_summary
@property
def full_author(self):
return "%s <%s>" % (self.author, self.email)
@property
def empty(self):
return bool(self.diff.strip())
class GeckoCommitMessage(CommitMessage):
"""Commit message following the Gecko conventions for identifying bug number
and reviewer"""
# c.f. http://hg.mozilla.org/hgcustom/version-control-tools/file/tip/hghooks/mozhghooks/commit-message.py
# which has the regexps that are actually enforced by the VCS hooks. These are
# slightly different because we need to parse out specific parts of the message rather
# than just enforce a general pattern.
_bug_re = re.compile("^Bug (\d+)[^\w]*(?:Part \d+[^\w]*)?(.*?)\s*(?:r=(\w*))?$",
re.IGNORECASE)
_backout_re = re.compile("^(?:Back(?:ing|ed)\s+out)|Backout|(?:Revert|(?:ed|ing))",
re.IGNORECASE)
_backout_sha1_re = re.compile("(?:\s|\:)(0-9a-f){12}")
def _parse_message(self):
CommitMessage._parse_message(self)
if self._backout_re.match(self.full_summary):
self.backouts = self._backout_re.findall(self.full_summary)
else:
self.backouts = []
m = self._bug_re.match(self.full_summary)
if m is not None:
self.bug, self.summary, self.reviewer = m.groups()
else:
self.bug, self.summary, self.reviewer = None, self.full_summary, None
class GeckoCommit(Commit):
msg_cls = GeckoCommitMessage
def export_patch(self, path=None):
"""Convert a commit in the tree to a Patch with the bug number and
reviewer stripped from the message"""
args = ["--binary", self.sha1]
if path is not None:
args.append("--")
args.append(path)
diff = self.git("show", *args)
return Patch(self.author, self.email, self.message, diff)
| mpl-2.0 |
danielvdende/incubator-airflow | tests/ti_deps/deps/test_not_running_dep.py | 15 | 1506 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import datetime
from mock import Mock
from airflow.ti_deps.deps.not_running_dep import NotRunningDep
from airflow.utils.state import State
class NotRunningDepTest(unittest.TestCase):
def test_ti_running(self):
"""
Running task instances should fail this dep
"""
ti = Mock(state=State.RUNNING, start_date=datetime(2016, 1, 1))
self.assertFalse(NotRunningDep().is_met(ti=ti))
def test_ti_not_running(self):
"""
Non-running task instances should pass this dep
"""
ti = Mock(state=State.NONE, start_date=datetime(2016, 1, 1))
self.assertTrue(NotRunningDep().is_met(ti=ti))
| apache-2.0 |
rakeshmi/tempest | tempest/api/object_storage/test_object_formpost_negative.py | 15 | 5117 | # Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import hmac
import time
from six.moves.urllib import parse as urlparse
from tempest_lib import exceptions as lib_exc
from tempest.api.object_storage import base
from tempest.common.utils import data_utils
from tempest import test
class ObjectFormPostNegativeTest(base.BaseObjectTest):
metadata = {}
containers = []
@classmethod
def resource_setup(cls):
super(ObjectFormPostNegativeTest, cls).resource_setup()
cls.container_name = data_utils.rand_name(name='TestContainer')
cls.object_name = data_utils.rand_name(name='ObjectTemp')
cls.container_client.create_container(cls.container_name)
cls.containers = [cls.container_name]
cls.key = 'Meta'
cls.metadata = {'Temp-URL-Key': cls.key}
cls.account_client.create_account_metadata(metadata=cls.metadata)
def setUp(self):
super(ObjectFormPostNegativeTest, self).setUp()
# make sure the metadata has been set
account_client_metadata, _ = \
self.account_client.list_account_metadata()
self.assertIn('x-account-meta-temp-url-key',
account_client_metadata)
self.assertEqual(
account_client_metadata['x-account-meta-temp-url-key'],
self.key)
@classmethod
def resource_cleanup(cls):
cls.account_client.delete_account_metadata(metadata=cls.metadata)
cls.delete_containers(cls.containers)
super(ObjectFormPostNegativeTest, cls).resource_cleanup()
def get_multipart_form(self, expires=600):
path = "%s/%s/%s" % (
urlparse.urlparse(self.container_client.base_url).path,
self.container_name,
self.object_name)
redirect = ''
max_file_size = 104857600
max_file_count = 10
expires += int(time.time())
hmac_body = '%s\n%s\n%s\n%s\n%s' % (path,
redirect,
max_file_size,
max_file_count,
expires)
signature = hmac.new(self.key, hmac_body, hashlib.sha1).hexdigest()
fields = {'redirect': redirect,
'max_file_size': str(max_file_size),
'max_file_count': str(max_file_count),
'expires': str(expires),
'signature': signature}
boundary = '--boundary--'
data = []
for (key, value) in fields.items():
data.append('--' + boundary)
data.append('Content-Disposition: form-data; name="%s"' % key)
data.append('')
data.append(value)
data.append('--' + boundary)
data.append('Content-Disposition: form-data; '
'name="file1"; filename="testfile"')
data.append('Content-Type: application/octet-stream')
data.append('')
data.append('hello world')
data.append('--' + boundary + '--')
data.append('')
body = '\r\n'.join(data)
content_type = 'multipart/form-data; boundary=%s' % boundary
return body, content_type
@test.idempotent_id('d3fb3c4d-e627-48ce-9379-a1631f21336d')
@test.requires_ext(extension='formpost', service='object')
@test.attr(type=['negative'])
def test_post_object_using_form_expired(self):
body, content_type = self.get_multipart_form(expires=1)
time.sleep(2)
headers = {'Content-Type': content_type,
'Content-Length': str(len(body))}
url = "%s/%s" % (self.container_name, self.object_name)
exc = self.assertRaises(
lib_exc.Unauthorized,
self.object_client.post,
url, body, headers=headers)
self.assertIn('FormPost: Form Expired', str(exc))
@test.idempotent_id('b277257f-113c-4499-b8d1-5fead79f7360')
@test.requires_ext(extension='formpost', service='object')
def test_post_object_using_form_invalid_signature(self):
self.key = "Wrong"
body, content_type = self.get_multipart_form()
headers = {'Content-Type': content_type,
'Content-Length': str(len(body))}
url = "%s/%s" % (self.container_name, self.object_name)
exc = self.assertRaises(
lib_exc.Unauthorized,
self.object_client.post,
url, body, headers=headers)
self.assertIn('FormPost: Invalid Signature', str(exc))
| apache-2.0 |
40223136/20150616test1 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/color.py | 603 | 4330 | ## pygame - Python Game Library
## Copyright (C) 2000-2003 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## [email protected]
"""Manipulate colors"""
try:
from colordict import THECOLORS
except ImportError:
#the colordict module isn't available
THECOLORS = {}
def Color(colorname):
"""pygame.color.Color(colorname) -> RGBA
Get RGB values from common color names
The color name can be the name of a common english color,
or a "web" style color in the form of 0xFF00FF. The english
color names are defined by the standard 'rgb' colors for X11.
With the hex color formatting you may optionally include an
alpha value, the formatting is 0xRRGGBBAA. You may also specify
a hex formatted color by starting the string with a '#'.
The color name used is case insensitive and whitespace is ignored.
"""
if colorname[:2] == '0x' or colorname[0] == '#': #webstyle
if colorname[0] == '#':
colorname = colorname[1:]
else:
colorname = colorname[2:]
a = 255
try:
r = int('0x' + colorname[0:2], 16)
g = int('0x' + colorname[2:4], 16)
b = int('0x' + colorname[4:6], 16)
if len(colorname) > 6:
a = int('0x' + colorname[6:8], 16)
except ValueError:
raise ValueError("Illegal hex color")
return r, g, b, a
else: #color name
#no spaces and lowercase
name = colorname.replace(' ', '').lower()
try:
return THECOLORS[name]
except KeyError:
raise ValueError("Illegal color name, " + name)
def _splitcolor(color, defaultalpha=255):
try:
second = int(color)
r = g = b = color
a = defaultalpha
except TypeError:
if len(color) == 4:
r, g, b, a = color
elif len(color) == 3:
r, g, b = color
a = defaultalpha
return r, g, b, a
def add(color1, color2):
"""pygame.color.add(color1, color2) -> RGBA
add two colors
Add the RGB values of two colors together. If one of the
colors is only a single numeric value, it is applied to the
RGB components of the first color. Color values will be clamped
to the maximum color value of 255.
"""
r1, g1, b1, a1 = _splitcolor(color1)
r2, g2, b2, a2 = _splitcolor(color2)
m, i = min, int
return m(i(r1+r2), 255), m(i(g1+g2), 255), m(i(b1+b2), 255), m(i(a1+a2), 255)
def subtract(color1, color2):
"""pygame.color.subtract(color1, color2) -> RGBA
subtract two colors
Subtract the RGB values of two colors together. If one of the
colors is only a single numeric value, it is applied to the
RGB components of the first color. Color values will be clamped
to the minimum color value of 0.
"""
r1, g1, b1, a1 = _splitcolor(color1)
r2, g2, b2, a2 = _splitcolor(color2, 0)
m, i = max, int
return m(i(r1-r2), 0), m(i(g1-g2), 0), m(i(b1-b2), 0), m(i(a1-a2), 0)
def multiply(color1, color2):
"""pygame.color.multiply(color1, color2) -> RGBA
multiply two colors
Multiply the RGB values of two colors together. If one of the
colors is only a single numeric value, it is applied to the
RGB components of the first color.
"""
r1, g1, b1, a1 = _splitcolor(color1)
r2, g2, b2, a2 = _splitcolor(color2)
m, i = min, int
return m(i(r1*r2)/255, 255), m(i(g1*g2)/255, 255), m(i(b1*b2)/255, 255), m(i(a1*a2)/255, 255)
| gpl-3.0 |
DazWorrall/ansible | lib/ansible/modules/cloud/cloudstack/cs_account.py | 33 | 10806 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_account
short_description: Manages accounts on Apache CloudStack based clouds.
description:
- Create, disable, lock, enable and remove accounts.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of account.
required: true
username:
description:
- Username of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
password:
description:
- Password of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
first_name:
description:
- First name of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
last_name:
description:
- Last name of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
email:
description:
- Email of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
timezone:
description:
- Timezone of the user to be created if account did not exist.
required: false
default: null
network_domain:
description:
- Network domain of the account.
required: false
default: null
account_type:
description:
- Type of the account.
required: false
default: 'user'
choices: [ 'user', 'root_admin', 'domain_admin' ]
domain:
description:
- Domain the account is related to.
required: false
default: 'ROOT'
state:
description:
- State of the account.
- C(unlocked) is an alias for C(enabled).
required: false
default: 'present'
choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked' ]
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create an account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
username: customer_xy
password: S3Cur3
last_name: Doe
first_name: John
email: [email protected]
domain: CUSTOMERS
# Lock an existing account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
domain: CUSTOMERS
state: locked
# Disable an existing account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
domain: CUSTOMERS
state: disabled
# Enable an existing account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
domain: CUSTOMERS
state: enabled
# Remove an account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
domain: CUSTOMERS
state: absent
'''
RETURN = '''
---
id:
description: UUID of the account.
returned: success
type: string
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
name:
description: Name of the account.
returned: success
type: string
sample: [email protected]
account_type:
description: Type of the account.
returned: success
type: string
sample: user
state:
description: State of the account.
returned: success
type: string
sample: enabled
network_domain:
description: Network domain of the account.
returned: success
type: string
sample: example.local
domain:
description: Domain the account is related.
returned: success
type: string
sample: ROOT
'''
# import cloudstack common
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackAccount(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackAccount, self).__init__(module)
self.returns = {
'networkdomain': 'network_domain',
}
self.account = None
self.account_types = {
'user': 0,
'root_admin': 1,
'domain_admin': 2,
}
def get_account_type(self):
account_type = self.module.params.get('account_type')
return self.account_types[account_type]
def get_account(self):
if not self.account:
args = {
'listall': True,
'domainid': self.get_domain(key='id'),
}
accounts = self.query_api('listAccounts', **args)
if accounts:
account_name = self.module.params.get('name')
for a in accounts['account']:
if account_name == a['name']:
self.account = a
break
return self.account
def enable_account(self):
account = self.get_account()
if not account:
account = self.present_account()
if account['state'].lower() != 'enabled':
self.result['changed'] = True
args = {
'id': account['id'],
'account': self.module.params.get('name'),
'domainid': self.get_domain(key='id')
}
if not self.module.check_mode:
res = self.query_api('enableAccount', **args)
account = res['account']
return account
def lock_account(self):
return self.lock_or_disable_account(lock=True)
def disable_account(self):
return self.lock_or_disable_account()
def lock_or_disable_account(self, lock=False):
account = self.get_account()
if not account:
account = self.present_account()
# we need to enable the account to lock it.
if lock and account['state'].lower() == 'disabled':
account = self.enable_account()
if (lock and account['state'].lower() != 'locked' or
not lock and account['state'].lower() != 'disabled'):
self.result['changed'] = True
args = {
'id': account['id'],
'account': self.module.params.get('name'),
'domainid': self.get_domain(key='id'),
'lock': lock,
}
if not self.module.check_mode:
account = self.query_api('disableAccount', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
account = self.poll_job(account, 'account')
return account
def present_account(self):
required_params = [
'email',
'username',
'password',
'first_name',
'last_name',
]
self.module.fail_on_missing_params(required_params=required_params)
account = self.get_account()
if not account:
self.result['changed'] = True
args = {
'account': self.module.params.get('name'),
'domainid': self.get_domain(key='id'),
'accounttype': self.get_account_type(),
'networkdomain': self.module.params.get('network_domain'),
'username': self.module.params.get('username'),
'password': self.module.params.get('password'),
'firstname': self.module.params.get('first_name'),
'lastname': self.module.params.get('last_name'),
'email': self.module.params.get('email'),
'timezone': self.module.params.get('timezone')
}
if not self.module.check_mode:
res = self.query_api('createAccount', **args)
account = res['account']
return account
def absent_account(self):
account = self.get_account()
if account:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('deleteAccount', id=account['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'account')
return account
def get_result(self, account):
super(AnsibleCloudStackAccount, self).get_result(account)
if account:
if 'accounttype' in account:
for key, value in self.account_types.items():
if value == account['accounttype']:
self.result['account_type'] = key
break
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked'], default='present'),
account_type=dict(choices=['user', 'root_admin', 'domain_admin'], default='user'),
network_domain=dict(),
domain=dict(default='ROOT'),
email=dict(),
first_name=dict(),
last_name=dict(),
username=dict(),
password=dict(no_log=True),
timezone=dict(),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_acc = AnsibleCloudStackAccount(module)
state = module.params.get('state')
if state in ['absent']:
account = acs_acc.absent_account()
elif state in ['enabled', 'unlocked']:
account = acs_acc.enable_account()
elif state in ['disabled']:
account = acs_acc.disable_account()
elif state in ['locked']:
account = acs_acc.lock_account()
else:
account = acs_acc.present_account()
result = acs_acc.get_result(account)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
KMK-ONLINE/ansible | lib/ansible/utils/module_docs.py | 20 | 5467 | #!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import ast
from ansible.parsing.yaml.loader import AnsibleLoader
import traceback
from collections import MutableMapping, MutableSet, MutableSequence
from ansible.plugins import fragment_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
# modules that are ok that they do not have documentation strings
BLACKLIST_MODULES = frozenset((
'async_wrapper',
'accelerate',
'fireball',
))
def get_docstring(filename, verbose=False):
"""
Search for assignment of the DOCUMENTATION and EXAMPLES variables
in the given file.
Parse DOCUMENTATION from YAML and return the YAML doc or None
together with EXAMPLES, as plain text.
DOCUMENTATION can be extended using documentation fragments
loaded by the PluginLoader from the module_docs_fragments
directory.
"""
doc = None
plainexamples = None
returndocs = None
try:
# Thank you, Habbie, for this bit of code :-)
M = ast.parse(''.join(open(filename)))
for child in M.body:
if isinstance(child, ast.Assign):
for t in child.targets:
try:
theid = t.id
except AttributeError as e:
# skip errors can happen when trying to use the normal code
display.warning("Failed to assign id for %s on %s, skipping" % (t, filename))
continue
if 'DOCUMENTATION' in theid:
doc = AnsibleLoader(child.value.s, file_name=filename).get_single_data()
fragments = doc.get('extends_documentation_fragment', [])
if isinstance(fragments, basestring):
fragments = [ fragments ]
# Allow the module to specify a var other than DOCUMENTATION
# to pull the fragment from, using dot notation as a separator
for fragment_slug in fragments:
fragment_slug = fragment_slug.lower()
if '.' in fragment_slug:
fragment_name, fragment_var = fragment_slug.split('.', 1)
fragment_var = fragment_var.upper()
else:
fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION'
fragment_class = fragment_loader.get(fragment_name)
assert fragment_class is not None
fragment_yaml = getattr(fragment_class, fragment_var, '{}')
fragment = AnsibleLoader(fragment_yaml, file_name=filename).get_single_data()
if fragment.has_key('notes'):
notes = fragment.pop('notes')
if notes:
if not doc.has_key('notes'):
doc['notes'] = []
doc['notes'].extend(notes)
if 'options' not in fragment.keys():
raise Exception("missing options in fragment, possibly misformatted?")
for key, value in fragment.items():
if not doc.has_key(key):
doc[key] = value
else:
if isinstance(doc[key], MutableMapping):
doc[key].update(value)
elif isinstance(doc[key], MutableSet):
doc[key].add(value)
elif isinstance(doc[key], MutableSequence):
doc[key] = sorted(frozenset(doc[key] + value))
else:
raise Exception("Attempt to extend a documentation fragement of unknown type")
elif 'EXAMPLES' in theid:
plainexamples = child.value.s[1:] # Skip first empty line
elif 'RETURN' in theid:
returndocs = child.value.s[1:]
except:
display.error("unable to parse %s" % filename)
if verbose == True:
display.display("unable to parse %s" % filename)
raise
return doc, plainexamples, returndocs
| gpl-3.0 |
keen99/SickRage | lib/hachoir_core/field/link.py | 95 | 3176 | from hachoir_core.field import Field, FieldSet, ParserError, Bytes, MissingField
from hachoir_core.stream import FragmentedStream
class Link(Field):
def __init__(self, parent, name, *args, **kw):
Field.__init__(self, parent, name, 0, *args, **kw)
def hasValue(self):
return True
def createValue(self):
return self._parent[self.display]
def createDisplay(self):
value = self.value
if value is None:
return "<%s>" % MissingField.__name__
return value.path
def _getField(self, name, const):
target = self.value
assert self != target
return target._getField(name, const)
class Fragments:
def __init__(self, first):
self.first = first
def __iter__(self):
fragment = self.first
while fragment is not None:
data = fragment.getData()
yield data and data.size
fragment = fragment.next
class Fragment(FieldSet):
_first = None
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._field_generator = self._createFields(self._field_generator)
if self.__class__.createFields == Fragment.createFields:
self._getData = lambda: self
def getData(self):
try:
return self._getData()
except MissingField, e:
self.error(str(e))
return None
def setLinks(self, first, next=None):
self._first = first or self
self._next = next
self._feedLinks = lambda: self
return self
def _feedLinks(self):
while self._first is None and self.readMoreFields(1):
pass
if self._first is None:
raise ParserError("first is None")
return self
first = property(lambda self: self._feedLinks()._first)
def _getNext(self):
next = self._feedLinks()._next
if callable(next):
self._next = next = next()
return next
next = property(_getNext)
def _createInputStream(self, **args):
first = self.first
if first is self and hasattr(first, "_getData"):
return FragmentedStream(first, packets=Fragments(first), **args)
return FieldSet._createInputStream(self, **args)
def _createFields(self, field_generator):
if self._first is None:
for field in field_generator:
if self._first is not None:
break
yield field
else:
raise ParserError("Fragment.setLinks not called")
else:
field = None
if self._first is not self:
link = Link(self, "first", None)
link._getValue = lambda: self._first
yield link
if self._next:
link = Link(self, "next", None)
link.createValue = self._getNext
yield link
if field:
yield field
for field in field_generator:
yield field
def createFields(self):
if self._size is None:
self._size = self._getSize()
yield Bytes(self, "data", self._size/8)
| gpl-3.0 |
jamesbulpin/xcp-xen-4.1 | tools/python/logging/logging-0.4.9.2/test/log_test12.py | 42 | 1951 | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A test harness for the logging module. Tests HTTPHandler.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import sys, string, logging, logging.handlers
def main():
import pdb
host = "localhost:%d" % logging.handlers.DEFAULT_HTTP_LOGGING_PORT
gh = logging.handlers.HTTPHandler(host, '/log', 'GET')
ph = logging.handlers.HTTPHandler(host, '/log', 'POST')
logger = logging.getLogger("log_test12")
logger.propagate = 0
logger.addHandler(gh)
logger.addHandler(ph)
logging.getLogger("").setLevel(logging.DEBUG)
logger.info("Jackdaws love my big %s of %s", "sphinx", "quartz")
logger.debug("Pack my %s with twelve dozen %s", "box", "liquor jugs")
gh.close()
ph.close()
logger.removeHandler(gh)
logger.removeHandler(ph)
if __name__ == "__main__":
main()
| gpl-2.0 |
40423109/2017springcd_hw | plugin/summary/summary.py | 317 | 2852 | """
Summary
-------
This plugin allows easy, variable length summaries directly embedded into the
body of your articles.
"""
from __future__ import unicode_literals
from pelican import signals
from pelican.generators import ArticlesGenerator, StaticGenerator, PagesGenerator
def initialized(pelican):
from pelican.settings import DEFAULT_CONFIG
DEFAULT_CONFIG.setdefault('SUMMARY_BEGIN_MARKER',
'<!-- PELICAN_BEGIN_SUMMARY -->')
DEFAULT_CONFIG.setdefault('SUMMARY_END_MARKER',
'<!-- PELICAN_END_SUMMARY -->')
if pelican:
pelican.settings.setdefault('SUMMARY_BEGIN_MARKER',
'<!-- PELICAN_BEGIN_SUMMARY -->')
pelican.settings.setdefault('SUMMARY_END_MARKER',
'<!-- PELICAN_END_SUMMARY -->')
def extract_summary(instance):
# if summary is already specified, use it
# if there is no content, there's nothing to do
if hasattr(instance, '_summary'):
instance.has_summary = True
return
if not instance._content:
instance.has_summary = False
return
begin_marker = instance.settings['SUMMARY_BEGIN_MARKER']
end_marker = instance.settings['SUMMARY_END_MARKER']
content = instance._content
begin_summary = -1
end_summary = -1
if begin_marker:
begin_summary = content.find(begin_marker)
if end_marker:
end_summary = content.find(end_marker)
if begin_summary == -1 and end_summary == -1:
instance.has_summary = False
return
# skip over the begin marker, if present
if begin_summary == -1:
begin_summary = 0
else:
begin_summary = begin_summary + len(begin_marker)
if end_summary == -1:
end_summary = None
summary = content[begin_summary:end_summary]
# remove the markers from the content
if begin_summary:
content = content.replace(begin_marker, '', 1)
if end_summary:
content = content.replace(end_marker, '', 1)
instance._content = content
instance._summary = summary
instance.has_summary = True
def run_plugin(generators):
for generator in generators:
if isinstance(generator, ArticlesGenerator):
for article in generator.articles:
extract_summary(article)
elif isinstance(generator, PagesGenerator):
for page in generator.pages:
extract_summary(page)
def register():
signals.initialized.connect(initialized)
try:
signals.all_generators_finalized.connect(run_plugin)
except AttributeError:
# NOTE: This results in #314 so shouldn't really be relied on
# https://github.com/getpelican/pelican-plugins/issues/314
signals.content_object_init.connect(extract_summary)
| agpl-3.0 |
dsemi/Flexget | flexget/webserver.py | 2 | 7364 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import hashlib
import logging
import random
import socket
import threading
import cherrypy
import safe
from flask import Flask, abort, redirect
from flask_login import UserMixin
from sqlalchemy import Column, Integer, Unicode
from werkzeug.security import generate_password_hash
from flexget.config_schema import register_config_key
from flexget.event import event
from flexget.manager import Base
from flexget.utils.database import with_session
from flexget.utils.tools import singleton
log = logging.getLogger('web_server')
_home = None
_app_register = {}
_default_app = Flask(__name__)
random = random.SystemRandom()
web_config_schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'bind': {'type': 'string', 'format': 'ipv4', 'default': '0.0.0.0'},
'port': {'type': 'integer', 'default': 3539},
},
'additionalProperties': False
}
]
}
def generate_key():
""" Generate key for use to authentication """
return str(hashlib.sha224(str(random.getrandbits(128)).encode('utf-8')).hexdigest())
def get_random_string(length=12, allowed_chars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits.
Taken from the django.utils.crypto module.
"""
return ''.join(random.choice(allowed_chars) for __ in range(length))
@with_session
def get_secret(session=None):
""" Generate a secret key for flask applications and store it in the database. """
web_secret = session.query(WebSecret).first()
if not web_secret:
web_secret = WebSecret(id=1, value=get_random_string(50, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'))
session.add(web_secret)
session.commit()
return web_secret.value
class WeakPassword(Exception):
def __init__(self, value, logger=log, **kwargs):
super(WeakPassword, self).__init__()
# Value is expected to be a string
if not isinstance(value, str):
value = str(value)
self.value = value
self.log = logger
self.kwargs = kwargs
def __str__(self):
return str(self).encode('utf-8')
def __unicode__(self):
return str(self.value)
class User(Base, UserMixin):
""" User class available for flask apps to handle authentication using flask_login """
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(Unicode(50), unique=True)
token = Column(Unicode, default=generate_key)
password = Column(Unicode)
def __repr__(self):
return '<User %r>' % self.name
def get_id(self):
return self.name
class WebSecret(Base):
""" Store flask secret in the database """
__tablename__ = 'secret'
id = Column(Unicode, primary_key=True)
value = Column(Unicode)
@event('config.register')
def register_config():
register_config_key('web_server', web_config_schema)
def register_app(path, application):
if path in _app_register:
raise ValueError('path %s already registered')
_app_register[path] = application
def register_home(route):
"""Registers UI home page"""
global _home
_home = route
@_default_app.route('/')
def start_page():
""" Redirect user to registered UI home """
if not _home:
abort(404)
return redirect(_home)
@event('manager.daemon.started', -255) # Low priority so plugins can register apps
@with_session
def setup_server(manager, session=None):
""" Sets up and starts/restarts the web service. """
if not manager.is_daemon:
return
web_server_config = manager.config.get('web_server')
if not web_server_config:
return
web_server = WebServer(
bind=web_server_config['bind'],
port=web_server_config['port'],
)
_default_app.secret_key = get_secret()
user = get_user()
if not user or not user.password:
log.warning('No password set for web server, create one by using'
' `flexget web passwd <password>`')
if web_server.is_alive():
web_server.stop()
if _app_register:
web_server.start()
@event('manager.shutdown')
def stop_server(manager):
""" Sets up and starts/restarts the webui. """
if not manager.is_daemon:
return
web_server = WebServer()
if web_server.is_alive():
web_server.stop()
@singleton
class WebServer(threading.Thread):
# We use a regular list for periodic jobs, so you must hold this lock while using it
triggers_lock = threading.Lock()
def __init__(self, bind='0.0.0.0', port=5050):
threading.Thread.__init__(self, name='web_server')
self.bind = str(bind) # String to remove unicode warning from cherrypy startup
self.port = port
def start(self):
# If we have already started and stopped a thread, we need to reinitialize it to create a new one
if not self.is_alive():
self.__init__(bind=self.bind, port=self.port)
threading.Thread.start(self)
def _start_server(self):
# Mount the WSGI callable object (app) on the root directory
cherrypy.tree.graft(_default_app, '/')
for path, registered_app in _app_register.items():
cherrypy.tree.graft(registered_app, path)
cherrypy.log.error_log.propagate = False
cherrypy.log.access_log.propagate = False
# Set the configuration of the web server
cherrypy.config.update({
'engine.autoreload.on': False,
'server.socket_port': self.port,
'server.socket_host': self.bind,
'log.screen': False,
})
try:
host = self.bind if self.bind != "0.0.0.0" else socket.gethostbyname(socket.gethostname())
except socket.gaierror:
host = '127.0.0.1'
log.info('Web interface available at http://%s:%s' % (host, self.port))
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
def run(self):
self._start_server()
def stop(self):
log.info('Shutting down web server')
cherrypy.engine.exit()
@with_session
def get_user(username='flexget', session=None):
user = session.query(User).filter(User.name == username).first()
if not user:
user = User()
user.name = username
session.add(user)
return user
@with_session
def change_password(username='flexget', password='', session=None):
check = safe.check(password)
if check.strength not in ['medium', 'strong']:
raise WeakPassword('Password {0} is not strong enough'.format(password))
user = get_user(username=username, session=session)
user.password = str(generate_password_hash(password))
session.commit()
@with_session
def generate_token(username='flexget', session=None):
user = get_user(username=username, session=session)
user.token = generate_key()
session.commit()
return user.token
| mit |
martinspeleo/dicomlayers | src/dicom/test/test_filewriter.py | 4 | 8121 | # test_filewriter.py
"""unittest cases for dicom.filewriter module"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import sys
import os.path
import os
import unittest
from dicom.filereader import read_file
from dicom.filewriter import write_data_element
from dicom.tag import Tag
from dicom.dataset import Dataset, FileDataset
from dicom.sequence import Sequence
from dicom.util.hexutil import hex2bytes, bytes2hex
# from io import BytesIO
from dicom.filebase import DicomBytesIO
from dicom.dataelem import DataElement
from dicom.util.hexutil import hex2bytes, bytes2hex
from pkg_resources import Requirement, resource_filename
test_dir = resource_filename(Requirement.parse("pydicom"), "dicom/testfiles")
testcharset_dir = resource_filename(Requirement.parse("pydicom"),
"dicom/testcharsetfiles")
rtplan_name = os.path.join(test_dir, "rtplan.dcm")
rtdose_name = os.path.join(test_dir, "rtdose.dcm")
ct_name = os.path.join(test_dir, "CT_small.dcm")
mr_name = os.path.join(test_dir, "MR_small.dcm")
jpeg_name = os.path.join(test_dir, "JPEG2000.dcm")
unicode_name = os.path.join(testcharset_dir, "chrH31.dcm")
multiPN_name = os.path.join(testcharset_dir, "chrFrenMulti.dcm")
# Set up rtplan_out, rtdose_out etc. Filenames as above, with '2' appended
for inname in ['rtplan', 'rtdose', 'ct', 'mr', 'jpeg', 'unicode', 'multiPN']:
exec(inname + "_out = " + inname + "_name + '2'")
def files_identical(a, b):
"""Return a tuple (file a == file b, index of first difference)"""
a_bytes = open(a, "rb").read()
b_bytes = open(b, "rb").read()
return bytes_identical(a_bytes, b_bytes)
def bytes_identical(a_bytes, b_bytes):
"""Return a tuple (bytes a == bytes b, index of first difference)"""
if a_bytes == b_bytes:
return True, 0 # True, dummy argument
else:
pos = 0
while a_bytes[pos] == b_bytes[pos]:
pos += 1
return False, pos # False if not identical, position of 1st diff
class WriteFileTests(unittest.TestCase):
def compare(self, in_filename, out_filename, decode=False):
"""Read file1, write file2, then compare.
Return value as for files_identical.
"""
dataset = read_file(in_filename)
if decode:
dataset.decode()
dataset.save_as(out_filename)
same, pos = files_identical(in_filename, out_filename)
self.assertTrue(same,
"Files are not identical - first difference at 0x%x" % pos)
if os.path.exists(out_filename):
os.remove(out_filename) # get rid of the file
def testRTPlan(self):
"""Input file, write back and verify them identical (RT Plan file)"""
self.compare(rtplan_name, rtplan_out)
def testRTDose(self):
"""Input file, write back and verify them identical (RT Dose file)"""
self.compare(rtdose_name, rtdose_out)
def testCT(self):
"""Input file, write back and verify them identical (CT file)....."""
self.compare(ct_name, ct_out)
def testMR(self):
"""Input file, write back and verify them identical (MR file)....."""
self.compare(mr_name, mr_out)
def testUnicode(self):
"""Ensure decoded string DataElements are written to file properly"""
self.compare(unicode_name, unicode_out, decode=True)
def testMultiPN(self):
"""Ensure multiple Person Names are written to the file correctly."""
self.compare(multiPN_name, multiPN_out, decode=True)
def testJPEG2000(self):
"""Input file, write back and verify them identical (JPEG2K file)."""
self.compare(jpeg_name, jpeg_out)
def testListItemWriteBack(self):
"""Change item in a list and confirm it is written to file .."""
DS_expected = 0
CS_expected = "new"
SS_expected = 999
ds = read_file(ct_name)
ds.ImagePositionPatient[2] = DS_expected
ds.ImageType[1] = CS_expected
ds[(0x0043, 0x1012)].value[0] = SS_expected
ds.save_as(ct_out)
# Now read it back in and check that the values were changed
ds = read_file(ct_out)
self.assertTrue(ds.ImageType[1] == CS_expected,
"Item in a list not written correctly to file (VR=CS)")
self.assertTrue(ds[0x00431012].value[0] == SS_expected,
"Item in a list not written correctly to file (VR=SS)")
self.assertTrue(ds.ImagePositionPatient[2] == DS_expected,
"Item in a list not written correctly to file (VR=DS)")
if os.path.exists(ct_out):
os.remove(ct_out)
class WriteDataElementTests(unittest.TestCase):
"""Attempt to write data elements has the expected behaviour"""
def setUp(self):
# Create a dummy (in memory) file to write to
self.f1 = DicomBytesIO()
self.f1.is_little_endian = True
self.f1.is_implicit_VR = True
def test_empty_AT(self):
"""Write empty AT correctly.........."""
# Was issue 74
data_elem = DataElement(0x00280009, "AT", [])
expected = hex2bytes((
" 28 00 09 00" # (0028,0009) Frame Increment Pointer
" 00 00 00 00" # length 0
))
write_data_element(self.f1, data_elem)
got = self.f1.parent.getvalue()
msg = ("Did not write zero-length AT value correctly. "
"Expected %r, got %r") % (bytes2hex(expected), bytes2hex(got))
msg = "%r %r" % (type(expected), type(got))
msg = "'%r' '%r'" % (expected, got)
self.assertEqual(expected, got, msg)
class ScratchWriteTests(unittest.TestCase):
"""Simple dataset from scratch, written in all endian/VR combinations"""
def setUp(self):
# Create simple dataset for all tests
ds = Dataset()
ds.PatientName = "Name^Patient"
# Set up a simple nested sequence
# first, the innermost sequence
subitem1 = Dataset()
subitem1.ContourNumber = 1
subitem1.ContourData = ['2', '4', '8', '16']
subitem2 = Dataset()
subitem2.ContourNumber = 2
subitem2.ContourData = ['32', '64', '128', '196']
sub_ds = Dataset()
sub_ds.ContourSequence = Sequence((subitem1, subitem2))
# Now the top-level sequence
ds.ROIContourSequence = Sequence((sub_ds,)) # Comma to make one-tuple
# Store so each test can use it
self.ds = ds
def compare_write(self, hex_std, file_ds):
"""Write file and compare with expected byte string
:arg hex_std: the bytes which should be written, as space separated hex
:arg file_ds: a FileDataset instance containing the dataset to write
"""
out_filename = "scratch.dcm"
file_ds.save_as(out_filename)
std = hex2bytes(hex_std)
bytes_written = open(out_filename, 'rb').read()
# print "std :", bytes2hex(std)
# print "written:", bytes2hex(bytes_written)
same, pos = bytes_identical(std, bytes_written)
self.assertTrue(same,
"Writing from scratch unexpected result - 1st diff at 0x%x" % pos)
if os.path.exists(out_filename):
os.remove(out_filename) # get rid of the file
def testImpl_LE_deflen_write(self):
"""Scratch Write for implicit VR little endian, defined length SQ's"""
from dicom.test._write_stds import impl_LE_deflen_std_hex as std
file_ds = FileDataset("test", self.ds)
self.compare_write(std, file_ds)
if __name__ == "__main__":
# This is called if run alone, but not if loaded through run_tests.py
# If not run from the directory where the sample images are,
# then need to switch there
dir_name = os.path.dirname(sys.argv[0])
save_dir = os.getcwd()
if dir_name:
os.chdir(dir_name)
os.chdir("../testfiles")
unittest.main()
os.chdir(save_dir)
| gpl-2.0 |
mwiebe/numpy | numpy/linalg/__init__.py | 84 | 2343 | """
Core Linear Algebra Tools
=========================
=============== ==========================================================
Linear algebra basics
==========================================================================
norm Vector or matrix norm
inv Inverse of a square matrix
solve Solve a linear system of equations
det Determinant of a square matrix
slogdet Logarithm of the determinant of a square matrix
lstsq Solve linear least-squares problem
pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
value decomposition
matrix_power Integer power of a square matrix
matrix_rank Calculate matrix rank using an SVD-based method
=============== ==========================================================
=============== ==========================================================
Eigenvalues and decompositions
==========================================================================
eig Eigenvalues and vectors of a square matrix
eigh Eigenvalues and eigenvectors of a Hermitian matrix
eigvals Eigenvalues of a square matrix
eigvalsh Eigenvalues of a Hermitian matrix
qr QR decomposition of a matrix
svd Singular value decomposition of a matrix
cholesky Cholesky decomposition of a matrix
=============== ==========================================================
=============== ==========================================================
Tensor operations
==========================================================================
tensorsolve Solve a linear tensor equation
tensorinv Calculate an inverse of a tensor
=============== ==========================================================
=============== ==========================================================
Exceptions
==========================================================================
LinAlgError Indicates a failed linear algebra operation
=============== ==========================================================
"""
from __future__ import division, absolute_import, print_function
# To get sub-modules
from .info import __doc__
from .linalg import *
from numpy.testing.nosetester import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
| bsd-3-clause |
gregorschatz/pymodbus3 | examples/functional/base_runner.py | 3 | 3135 | import os
import time
from subprocess import Popen as execute
from twisted.internet.defer import Deferred
#---------------------------------------------------------------------------#
# configure the client logging
#---------------------------------------------------------------------------#
import logging
log = logging.getLogger(__name__)
class Runner(object):
'''
This is the base runner class for all the integration tests
'''
def initialize(self, service):
''' Initializes the test environment '''
self.fnull = open(os.devnull, 'w')
self.server = execute(service, stdout=self.fnull, stderr=self.fnull)
log.debug("%s service started: %s", service, self.server.pid)
time.sleep(0.2)
def shutdown(self):
''' Cleans up the test environment '''
self.server.kill()
self.fnull.close()
log.debug("service stopped")
def testReadWriteCoil(self):
rq = self.client.write_coil(1, True)
rr = self.client.read_coils(1,1)
self.__validate(rq, lambda r: r.function_code < 0x80)
self.__validate(rr, lambda r: r.bits[0] == True)
def testReadWriteCoils(self):
rq = self.client.write_coils(1, [True]*8)
rr = self.client.read_coils(1,8)
self.__validate(rq, lambda r: r.function_code < 0x80)
self.__validate(rr, lambda r: r.bits == [True]*8)
def testReadWriteDiscreteRegisters(self):
rq = self.client.write_coils(1, [False]*8)
rr = self.client.read_discrete_inputs(1,8)
self.__validate(rq, lambda r: r.function_code < 0x80)
self.__validate(rr, lambda r: r.bits == [False]*8)
def testReadWriteHoldingRegisters(self):
rq = self.client.write_register(1, 10)
rr = self.client.read_holding_registers(1,1)
self.__validate(rq, lambda r: r.function_code < 0x80)
self.__validate(rr, lambda r: r.registers[0] == 10)
def testReadWriteInputRegisters(self):
rq = self.client.write_registers(1, [10]*8)
rr = self.client.read_input_registers(1,8)
self.__validate(rq, lambda r: r.function_code < 0x80)
self.__validate(rr, lambda r: r.registers == [10]*8)
def testReadWriteRegistersTogether(self):
arguments = {
'read_address': 1,
'read_count': 8,
'write_address': 1,
'write_registers': [20]*8,
}
rq = self.client.readwrite_registers(**arguments)
rr = self.client.read_input_registers(1,8)
self.__validate(rq, lambda r: r.function_code < 0x80)
self.__validate(rr, lambda r: r.registers == [20]*8)
def __validate(self, result, test):
''' Validate the result whether it is a result or a deferred.
:param result: The result to __validate
:param callback: The test to __validate
'''
if isinstance(result, Deferred):
deferred.callback(lambda : self.assertTrue(test(result)))
deferred.errback(lambda _: self.assertTrue(False))
else: self.assertTrue(test(result))
| bsd-3-clause |
cchurch/ansible | lib/ansible/modules/network/nxos/nxos_bgp_neighbor.py | 5 | 17494 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_bgp_neighbor
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages BGP neighbors configurations.
description:
- Manages BGP neighbors configurations on NX-OS switches.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(state=absent) removes the whole BGP neighbor configuration.
- Default, where supported, restores params default value.
options:
asn:
description:
- BGP autonomous system number. Valid values are string,
Integer in ASPLAIN or ASDOT notation.
required: true
vrf:
description:
- Name of the VRF. The name 'default' is a valid VRF representing
the global bgp.
default: default
neighbor:
description:
- Neighbor Identifier. Valid values are string. Neighbors may use
IPv4 or IPv6 notation, with or without prefix length.
required: true
description:
description:
- Description of the neighbor.
bfd:
description:
- Enables/Disables BFD for a given neighbor.
- "Dependency: 'feature bfd'"
version_added: "2.9"
type: str
choices: ['enable', 'disable']
connected_check:
description:
- Configure whether or not to check for directly connected peer.
type: bool
capability_negotiation:
description:
- Configure whether or not to negotiate capability with
this neighbor.
type: bool
dynamic_capability:
description:
- Configure whether or not to enable dynamic capability.
type: bool
ebgp_multihop:
description:
- Specify multihop TTL for a remote peer. Valid values are
integers between 2 and 255, or keyword 'default' to disable
this property.
local_as:
description:
- Specify the local-as number for the eBGP neighbor.
Valid values are String or Integer in ASPLAIN or ASDOT notation,
or 'default', which means not to configure it.
log_neighbor_changes:
description:
- Specify whether or not to enable log messages for neighbor
up/down event.
choices: ['enable', 'disable', 'inherit']
low_memory_exempt:
description:
- Specify whether or not to shut down this neighbor under
memory pressure.
type: bool
maximum_peers:
description:
- Specify Maximum number of peers for this neighbor prefix
Valid values are between 1 and 1000, or 'default', which does
not impose the limit. Note that this parameter is accepted
only on neighbors with address/prefix.
pwd:
description:
- Specify the password for neighbor. Valid value is string.
pwd_type:
description:
- Specify the encryption type the password will use. Valid values
are '3des' or 'cisco_type_7' encryption or keyword 'default'.
choices: ['3des', 'cisco_type_7', 'default']
remote_as:
description:
- Specify Autonomous System Number of the neighbor.
Valid values are String or Integer in ASPLAIN or ASDOT notation,
or 'default', which means not to configure it.
remove_private_as:
description:
- Specify the config to remove private AS number from outbound
updates. Valid values are 'enable' to enable this config,
'disable' to disable this config, 'all' to remove all
private AS number, or 'replace-as', to replace the private
AS number.
choices: ['enable', 'disable', 'all', 'replace-as']
shutdown:
description:
- Configure to administratively shutdown this neighbor.
type: bool
suppress_4_byte_as:
description:
- Configure to suppress 4-byte AS Capability.
type: bool
timers_keepalive:
description:
- Specify keepalive timer value. Valid values are integers
between 0 and 3600 in terms of seconds, or 'default',
which is 60.
timers_holdtime:
description:
- Specify holdtime timer value. Valid values are integers between
0 and 3600 in terms of seconds, or 'default', which is 180.
transport_passive_only:
description:
- Specify whether or not to only allow passive connection setup.
Valid values are 'true', 'false', and 'default', which defaults
to 'false'. This property can only be configured when the
neighbor is in 'ip' address format without prefix length.
type: bool
update_source:
description:
- Specify source interface of BGP session and updates.
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# create a new neighbor
- nxos_bgp_neighbor:
asn: 65535
neighbor: 192.0.2.3
local_as: 20
remote_as: 30
bfd: enable
description: "just a description"
update_source: Ethernet1/3
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "neighbor 192.0.2.3",
"remote-as 30", "update-source Ethernet1/3",
"description just a description", "local-as 20"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
BOOL_PARAMS = [
'capability_negotiation',
'shutdown',
'connected_check',
'dynamic_capability',
'low_memory_exempt',
'suppress_4_byte_as',
'transport_passive_only',
]
PARAM_TO_COMMAND_KEYMAP = {
'asn': 'router bgp',
'bfd': 'bfd',
'capability_negotiation': 'dont-capability-negotiate',
'connected_check': 'disable-connected-check',
'description': 'description',
'dynamic_capability': 'dynamic-capability',
'ebgp_multihop': 'ebgp-multihop',
'local_as': 'local-as',
'log_neighbor_changes': 'log-neighbor-changes',
'low_memory_exempt': 'low-memory exempt',
'maximum_peers': 'maximum-peers',
'neighbor': 'neighbor',
'pwd': 'password',
'pwd_type': 'password',
'remote_as': 'remote-as',
'remove_private_as': 'remove-private-as',
'shutdown': 'shutdown',
'suppress_4_byte_as': 'capability suppress 4-byte-as',
'timers_keepalive': 'timers',
'timers_holdtime': 'timers',
'transport_passive_only': 'transport connection-mode passive',
'update_source': 'update-source',
'vrf': 'vrf'
}
PARAM_TO_DEFAULT_KEYMAP = {
'bfd': 'disable',
'shutdown': False,
'dynamic_capability': True,
'timers_keepalive': 60,
'timers_holdtime': 180
}
def get_value(arg, config):
command = PARAM_TO_COMMAND_KEYMAP[arg]
has_command = re.search(r'^\s+{0}$'.format(command), config, re.M)
has_command_val = re.search(r'(?:\s+{0}\s*)(?P<value>.*)$'.format(command), config, re.M)
if arg == 'dynamic_capability':
has_no_command = re.search(r'\s+no\s{0}\s*$'.format(command), config, re.M)
value = True
if has_no_command:
value = False
elif arg in BOOL_PARAMS:
value = False
if has_command:
value = True
elif arg == 'log_neighbor_changes':
value = ''
if has_command:
value = 'enable'
elif has_command_val:
value = 'disable'
elif arg == 'remove_private_as':
value = 'disable'
if has_command:
value = 'enable'
elif has_command_val:
value = has_command_val.group('value')
elif arg == 'bfd':
value = 'enable' if has_command else 'disable'
else:
value = ''
if has_command_val:
value = has_command_val.group('value')
if command in ['timers', 'password']:
split_value = value.split()
value = ''
if arg in ['timers_keepalive', 'pwd_type']:
value = split_value[0]
elif arg in ['timers_holdtime', 'pwd'] and len(split_value) == 2:
value = split_value[1]
return value
def get_existing(module, args, warnings):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
asn_regex = re.compile(r'.*router\sbgp\s(?P<existing_asn>\d+(\.\d+)?).*', re.S)
match_asn = asn_regex.match(str(netcfg))
if match_asn:
existing_asn = match_asn.group('existing_asn')
parents = ["router bgp {0}".format(existing_asn)]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
parents.append('neighbor {0}'.format(module.params['neighbor']))
config = netcfg.get_section(parents)
if config:
for arg in args:
if arg not in ['asn', 'vrf', 'neighbor']:
existing[arg] = get_value(arg, config)
existing['asn'] = existing_asn
existing['neighbor'] = module.params['neighbor']
existing['vrf'] = module.params['vrf']
else:
warnings.append("The BGP process didn't exist but the task"
" just created it.")
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key in table:
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = table.get(key)
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key == 'log-neighbor-changes':
if value == 'enable':
commands.append('{0}'.format(key))
elif value == 'disable':
commands.append('{0} {1}'.format(key, value))
elif value == 'inherit':
if existing_commands.get(key):
commands.append('no {0}'.format(key))
elif key == 'password':
pwd_type = module.params['pwd_type']
if pwd_type == '3des':
pwd_type = 3
else:
pwd_type = 7
command = '{0} {1} {2}'.format(key, pwd_type, value)
if command not in commands:
commands.append(command)
elif key == 'remove-private-as':
if value == 'enable':
command = '{0}'.format(key)
commands.append(command)
elif value == 'disable':
if existing_commands.get(key) != 'disable':
command = 'no {0}'.format(key)
commands.append(command)
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
elif key == 'timers':
if (proposed['timers_keepalive'] != PARAM_TO_DEFAULT_KEYMAP.get('timers_keepalive') or
proposed['timers_holdtime'] != PARAM_TO_DEFAULT_KEYMAP.get('timers_holdtime')):
command = 'timers {0} {1}'.format(
proposed['timers_keepalive'],
proposed['timers_holdtime'])
if command not in commands:
commands.append(command)
elif key == 'bfd':
no_cmd = 'no ' if value == 'disable' else ''
commands.append(no_cmd + key)
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
parents = ['router bgp {0}'.format(module.params['asn'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
parents.append('neighbor {0}'.format(module.params['neighbor']))
# make sure that local-as is the last command in the list.
local_as_command = 'local-as {0}'.format(module.params['local_as'])
if local_as_command in commands:
commands.remove(local_as_command)
commands.append(local_as_command)
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
commands = []
parents = ["router bgp {0}".format(module.params['asn'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
commands.append('no neighbor {0}'.format(module.params['neighbor']))
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
asn=dict(required=True, type='str'),
vrf=dict(required=False, type='str', default='default'),
neighbor=dict(required=True, type='str'),
description=dict(required=False, type='str'),
bfd=dict(required=False, type='str', choices=['enable', 'disable']),
capability_negotiation=dict(required=False, type='bool'),
connected_check=dict(required=False, type='bool'),
dynamic_capability=dict(required=False, type='bool'),
ebgp_multihop=dict(required=False, type='str'),
local_as=dict(required=False, type='str'),
log_neighbor_changes=dict(required=False, type='str', choices=['enable', 'disable', 'inherit']),
low_memory_exempt=dict(required=False, type='bool'),
maximum_peers=dict(required=False, type='str'),
pwd=dict(required=False, type='str'),
pwd_type=dict(required=False, type='str', choices=['3des', 'cisco_type_7', 'default']),
remote_as=dict(required=False, type='str'),
remove_private_as=dict(required=False, type='str', choices=['enable', 'disable', 'all', 'replace-as']),
shutdown=dict(required=False, type='bool'),
suppress_4_byte_as=dict(required=False, type='bool'),
timers_keepalive=dict(required=False, type='str'),
timers_holdtime=dict(required=False, type='str'),
transport_passive_only=dict(required=False, type='bool'),
update_source=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present', required=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=[['timers_holdtime', 'timers_keepalive'], ['pwd', 'pwd_type']],
supports_check_mode=True,
)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
state = module.params['state']
if module.params['pwd_type'] == 'default':
module.params['pwd_type'] = '0'
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args, warnings)
if existing.get('asn') and state == 'present':
if existing['asn'] != module.params['asn']:
module.fail_json(msg='Another BGP ASN already exists.',
proposed_asn=module.params['asn'],
existing_asn=existing.get('asn'))
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key not in ['asn', 'vrf', 'neighbor', 'pwd_type']:
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key, 'default')
if key == 'bfd':
if existing.get('bfd', 'disable') != value:
proposed[key] = value
elif existing.get(key) != value:
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present':
state_present(module, existing, proposed, candidate)
elif state == 'absent' and existing:
state_absent(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
load_config(module, candidate)
result['changed'] = True
result['commands'] = candidate
else:
result['commands'] = []
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
sparkslabs/kamaelia_ | Code/Python/Kamaelia/Tools/DocGen/TestSuiteRun.py | 9 | 10078 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
====================
Test suite outputter
====================
A command line tool for runs a set of unit tests (built on the python unittest
framework) and separating the results into individual text files.
The resulting test output is suitable for inclusion by the DocExtractor.py
documentation generator.
* Recurses over a directory tree (the test suite) containing unit test code.
* Outputs individual text files containing successes and failures
* Can run tests against an alternate codebase (instead of whatever python
modules are installed)
Usage
-----
To run the axon test suite you might use a command line like this::
$> ./TestSuiteRun.py testSuiteDir --outdir ./testoutput --root Axon --codebase ./trunk/Code/Python/Axon
The specified codebase is pre-pended to the PYTHONPATH environment variable -
causing python to look there before looking in its installed packages. This
enables you to run tests against modules that aren't installed.
Directory structure of the test suite
-------------------------------------
The directory and filenaming structure should mirror that of the code being
tested if you want the output from running this tool to be usable for
documentation generation. That way the filenames can be easily matched up
so the documentation generator knows what tests go with what modules it is
documenting.
For example, for a source code base like this::
Axon/
Microprocess.py
Component.py
Support/
Misc.py
The corresponding tests should be in the same directory structure with matching
test_XXX.py format filenames::
Axon/
test_Microprocess.py
test_Component.py
Support/
test_Misc.py
Format of tests
---------------
Tests should be written using the python unittest framework and should execute
when the source file containing them is run.
A test file might typically look like this::
import unittest
class MyTest(unittest.TestCase):
....
if __name__=="__main__":
unittest.main()
In particular when supplied with the ``-v`` command line option, the output
they produce should be in the same format as python unittest output.
Format of the output
--------------------
Suppose the test suite consists of the following directory structure, and the
``--root`` is set to "A.B"::
testSuiteDir/
not_a_test.py
test_Foo.py
test_Bar.py
subdir/
test_Bling.py
subdir2/
test_Burble.py
Then the outputted files will be::
testoutput/
A.B.test_Foo...ok
A.B.test_Foo...fail
A.B.test_Foo...msgs
A.B.test_Bar...ok
A.B.test_Bar...fail
A.B.test_Bar...msgs
A.B.subdir.test_Bling...ok
A.B.subdir.test_Bling...fail
A.B.subdir.test_Bling...msgs
A.B.subdir2.test_Burble...ok
A.B.subdir2.test_Burble...fail
A.B.subdir2.test_Burble...msgs
As you can see, the filenames mimick the directory structure. Only files with
a name matching the pattern "test_XXX.py" are run. Anything else is considered
to not be a test and is ignored.
For each test source file, three files are output:
* ``XXX...ok`` - description of each test that passed
* ``XXX...fail`` - description of each test that failed
* ``XXX...msgs`` - any other messages output during the test being run
(eg. reasons why particular tests failed)
"""
import re
import os
import sys
def writeOut(filename,data):
"""Write data to the named file"""
F=open(filename,"w")
F.write(data)
F.close()
def processDirectory(suiteDir, outFilePath, filePattern):
"""\
Recurse through test suite directory running any python files matching the
specified filename pattern (a compiled regular expression) and collecting
the output and splitting it into separate output text files.
"""
dirEntries = os.listdir(suiteDir)
for filename in dirEntries:
filepath = os.path.join(suiteDir, filename)
if os.path.isdir(filepath):
processDirectory(filepath, outFilePath+"."+filename, filePattern)
else:
match = filePattern.match(filename)
if match:
nameFragment = match.group(1)
outname = outFilePath+"."+nameFragment
print "Running: "+filepath+" ..."
print
inpipe, outpipe = os.popen4(filepath+" -v")
lines = outpipe.readlines()
inpipe.close()
outpipe.close()
output, failures, msgs = parseLines(lines)
writeOut(outname+"...ok", "".join(output))
writeOut(outname+"...fail", "".join(failures))
writeOut(outname+"...msgs", "".join(msgs))
pattern_ok = re.compile("^(.*) \.\.\. ok\n$")
pattern_fail = re.compile("^(.*) \.\.\. FAIL\n$")
def parseLines(lines):
"""\
Parse lines of output from a unittest run, separating them into
passes, failures and messages
"""
passes = []
failures = []
msgs = []
state="LINES"
for line in lines:
print line,
if state=="LINES":
if pattern_ok.match(line):
msg = pattern_ok.match(line).group(1)
passes.append(msg+"\n")
elif pattern_fail.match(line):
msg = pattern_fail.match(line).group(1)
failures.append(msg+"\n")
else:
state="ERROR REPORTS"
if state=="ERROR REPORTS":
if re.match("Ran \d+ tests? in \d*(\.\d+)?s\n$",line):
state="DONE"
else:
msgs.append(line)
return passes,failures,msgs
if __name__ == "__main__":
testSuiteDir = None
testOutputDir = None
moduleRoot = None
filePattern = re.compile("^test_([^\.]*)\.py$")
cmdLineArgs = []
for arg in sys.argv[1:]:
if arg[:2] == "--" and len(arg)>2:
cmdLineArgs.append(arg.lower())
else:
cmdLineArgs.append(arg)
if not cmdLineArgs or "--help" in cmdLineArgs or "-h" in cmdLineArgs:
sys.stderr.write("\n".join([
"Usage:",
"",
" "+sys.argv[0]+" <arguments - see below>",
"",
"Optional arguments:",
"",
" --help Display this help message",
"",
" --codebase <dir> The directory containing the codebase - will be",
" pre-pended to python's module path. Default is nothing.",
"",
" --root <moduleRoot> The module path leading up to the repositoryDir specified",
" eg. Axon, if testSuiteDir='.../Tests/Python/Axon/'",
" Default is the leaf directory name of the <testSuiteDir>",
"",
"Mandatory arguments:",
"",
" --outdir <dir> Directory to put output into (default is 'pydoc')",
" directory must already exist (and be emptied)",
"",
" <testSuiteDir> Use Kamaelia modules here instead of the installed ones",
"",
"",
]))
sys.exit(0)
try:
if "--outdir" in cmdLineArgs:
index = cmdLineArgs.index("--outdir")
testOutputDir = cmdLineArgs[index+1]
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if "--root" in cmdLineArgs:
index = cmdLineArgs.index("--root")
moduleRoot = cmdLineArgs[index+1]
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if "--codebase" in cmdLineArgs:
index = cmdLineArgs.index("--codebase")
codeBaseDir = cmdLineArgs[index+1]
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if len(cmdLineArgs)==1:
testSuiteDir = cmdLineArgs[0]
elif len(cmdLineArgs)==0:
testSuiteDir = None
else:
raise
except:
sys.stderr.write("\n".join([
"Error in command line arguments.",
"Run with '--help' for info on command line arguments.",
"",
"",
]))
sys.exit(1)
sys.argv=sys.argv[0:0]
assert(testSuiteDir)
assert(testOutputDir)
if moduleRoot is None:
# if no module root specified, strip down the test suite dir for the leaf directory name
moduleRoot = os.path.abspath(testSuiteDir)
moduleRoot = os.path.split(moduleRoot)[1]
assert(moduleRoot)
if codeBaseDir is not None:
# if codebase is specified, set the pythonpath variable so it will
# be found by subsequent python apps we run
os.putenv("PYTHONPATH",codeBaseDir)
outDir = os.path.join(testOutputDir,moduleRoot) # ensure its already got the suffix
processDirectory(testSuiteDir,outDir,filePattern)
| apache-2.0 |
colinnewell/odoo | openerp/report/render/simple.py | 324 | 3152 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import render
from cStringIO import StringIO
import xml.dom.minidom
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table
from reportlab.lib.units import mm
from reportlab.lib.pagesizes import A4
import reportlab.lib
import copy
class simple(render.render):
def _render(self):
self.result = StringIO()
parser = xml.dom.minidom.parseString(self.xml)
title = parser.documentElement.tagName
doc = SimpleDocTemplate(self.result, pagesize=A4, title=title,
author='Odoo, Fabien Pinckaers', leftmargin=10*mm, rightmargin=10*mm)
styles = reportlab.lib.styles.getSampleStyleSheet()
title_style = copy.deepcopy(styles["Heading1"])
title_style.alignment = reportlab.lib.enums.TA_CENTER
story = [ Paragraph(title, title_style) ]
style_level = {}
nodes = [ (parser.documentElement,0) ]
while len(nodes):
node = nodes.pop(0)
value = ''
n=len(node[0].childNodes)-1
while n>=0:
if node[0].childNodes[n].nodeType==3:
value += node[0].childNodes[n].nodeValue
else:
nodes.insert( 0, (node[0].childNodes[n], node[1]+1) )
n-=1
if not node[1] in style_level:
style = copy.deepcopy(styles["Normal"])
style.leftIndent=node[1]*6*mm
style.firstLineIndent=-3*mm
style_level[node[1]] = style
story.append( Paragraph('<b>%s</b>: %s' % (node[0].tagName, value), style_level[node[1]]))
doc.build(story)
return self.result.getvalue()
if __name__=='__main__':
s = simple()
s.xml = '''<test>
<author-list>
<author>
<name>Fabien Pinckaers</name>
<age>23</age>
</author>
<author>
<name>Michel Pinckaers</name>
<age>53</age>
</author>
No other
</author-list>
</test>'''
if s.render():
print s.get()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
devs4v/devs4v-information-retrieval15 | project/venv/lib/python2.7/site-packages/django/conf/locale/it/formats.py | 115 | 2079 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y' # 25 Ottobre 2006
TIME_FORMAT = 'H:i' # 14:30
DATETIME_FORMAT = 'l d F Y H:i' # Mercoledì 25 Ottobre 2006 14:30
YEAR_MONTH_FORMAT = 'F Y' # Ottobre 2006
MONTH_DAY_FORMAT = 'j/F' # 10/2006
SHORT_DATE_FORMAT = 'd/m/Y' # 25/12/2009
SHORT_DATETIME_FORMAT = 'd/m/Y H:i' # 25/10/2009 14:30
FIRST_DAY_OF_WEEK = 1 # Lunedì
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%Y/%m/%d', # '25/10/2006', '2008/10/25'
'%d-%m-%Y', '%Y-%m-%d', # '25-10-2006', '2008-10-25'
'%d-%m-%y', '%d/%m/%y', # '25-10-06', '25/10/06'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d-%m-%y %H:%M:%S', # '25-10-06 14:30:59'
'%d-%m-%y %H:%M:%S.%f', # '25-10-06 14:30:59.000200'
'%d-%m-%y %H:%M', # '25-10-06 14:30'
'%d-%m-%y', # '25-10-06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit |
lpsinger/astropy | astropy/utils/console.py | 2 | 35859 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for console input and output.
"""
import codecs
import locale
import re
import math
import multiprocessing
import os
import struct
import sys
import threading
import time
from concurrent.futures import ProcessPoolExecutor, as_completed
try:
import fcntl
import termios
import signal
_CAN_RESIZE_TERMINAL = True
except ImportError:
_CAN_RESIZE_TERMINAL = False
from astropy import conf
from .misc import isiterable
from .decorators import classproperty
__all__ = [
'isatty', 'color_print', 'human_time', 'human_file_size',
'ProgressBar', 'Spinner', 'print_code_line', 'ProgressBarOrSpinner',
'terminal_size']
_DEFAULT_ENCODING = 'utf-8'
class _IPython:
"""Singleton class given access to IPython streams, etc."""
@classproperty
def get_ipython(cls):
try:
from IPython import get_ipython
except ImportError:
pass
return get_ipython
@classproperty
def OutStream(cls):
if not hasattr(cls, '_OutStream'):
cls._OutStream = None
try:
cls.get_ipython()
except NameError:
return None
try:
from ipykernel.iostream import OutStream
except ImportError:
try:
from IPython.zmq.iostream import OutStream
except ImportError:
from IPython import version_info
if version_info[0] >= 4:
return None
try:
from IPython.kernel.zmq.iostream import OutStream
except ImportError:
return None
cls._OutStream = OutStream
return cls._OutStream
@classproperty
def ipyio(cls):
if not hasattr(cls, '_ipyio'):
try:
from IPython.utils import io
except ImportError:
cls._ipyio = None
else:
cls._ipyio = io
return cls._ipyio
@classmethod
def get_stream(cls, stream):
return getattr(cls.ipyio, stream)
def _get_stdout(stderr=False):
"""
This utility function contains the logic to determine what streams to use
by default for standard out/err.
Typically this will just return `sys.stdout`, but it contains additional
logic for use in IPython on Windows to determine the correct stream to use
(usually ``IPython.util.io.stdout`` but only if sys.stdout is a TTY).
"""
if stderr:
stream = 'stderr'
else:
stream = 'stdout'
sys_stream = getattr(sys, stream)
return sys_stream
def isatty(file):
"""
Returns `True` if ``file`` is a tty.
Most built-in Python file-like objects have an `isatty` member,
but some user-defined types may not, so this assumes those are not
ttys.
"""
if (multiprocessing.current_process().name != 'MainProcess' or
threading.current_thread().getName() != 'MainThread'):
return False
if hasattr(file, 'isatty'):
return file.isatty()
if _IPython.OutStream is None or (not isinstance(file, _IPython.OutStream)):
return False
# File is an IPython OutStream. Check whether:
# - File name is 'stdout'; or
# - File wraps a Console
if getattr(file, 'name', None) == 'stdout':
return True
if hasattr(file, 'stream'):
# FIXME: pyreadline has no had new release since 2015, drop it when
# IPython minversion is 5.x.
# On Windows, in IPython 2 the standard I/O streams will wrap
# pyreadline.Console objects if pyreadline is available; this should
# be considered a TTY.
try:
from pyreadline.console import Console as PyreadlineConsole
except ImportError:
return False
return isinstance(file.stream, PyreadlineConsole)
return False
def terminal_size(file=None):
"""
Returns a tuple (height, width) containing the height and width of
the terminal.
This function will look for the width in height in multiple areas
before falling back on the width and height in astropy's
configuration.
"""
if file is None:
file = _get_stdout()
try:
s = struct.pack("HHHH", 0, 0, 0, 0)
x = fcntl.ioctl(file, termios.TIOCGWINSZ, s)
(lines, width, xpixels, ypixels) = struct.unpack("HHHH", x)
if lines > 12:
lines -= 6
if width > 10:
width -= 1
if lines <= 0 or width <= 0:
raise Exception('unable to get terminal size')
return (lines, width)
except Exception:
try:
# see if POSIX standard variables will work
return (int(os.environ.get('LINES')),
int(os.environ.get('COLUMNS')))
except TypeError:
# fall back on configuration variables, or if not
# set, (25, 80)
lines = conf.max_lines
width = conf.max_width
if lines is None:
lines = 25
if width is None:
width = 80
return lines, width
def _color_text(text, color):
"""
Returns a string wrapped in ANSI color codes for coloring the
text in a terminal::
colored_text = color_text('Here is a message', 'blue')
This won't actually effect the text until it is printed to the
terminal.
Parameters
----------
text : str
The string to return, bounded by the color codes.
color : str
An ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
"""
color_mapping = {
'black': '0;30',
'red': '0;31',
'green': '0;32',
'brown': '0;33',
'blue': '0;34',
'magenta': '0;35',
'cyan': '0;36',
'lightgrey': '0;37',
'default': '0;39',
'darkgrey': '1;30',
'lightred': '1;31',
'lightgreen': '1;32',
'yellow': '1;33',
'lightblue': '1;34',
'lightmagenta': '1;35',
'lightcyan': '1;36',
'white': '1;37'}
if sys.platform == 'win32' and _IPython.OutStream is None:
# On Windows do not colorize text unless in IPython
return text
color_code = color_mapping.get(color, '0;39')
return f'\033[{color_code}m{text}\033[0m'
def _decode_preferred_encoding(s):
"""Decode the supplied byte string using the preferred encoding
for the locale (`locale.getpreferredencoding`) or, if the default encoding
is invalid, fall back first on utf-8, then on latin-1 if the message cannot
be decoded with utf-8.
"""
enc = locale.getpreferredencoding()
try:
try:
return s.decode(enc)
except LookupError:
enc = _DEFAULT_ENCODING
return s.decode(enc)
except UnicodeDecodeError:
return s.decode('latin-1')
def _write_with_fallback(s, write, fileobj):
"""Write the supplied string with the given write function like
``write(s)``, but use a writer for the locale's preferred encoding in case
of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or
'latin-1'.
"""
try:
write(s)
return write
except UnicodeEncodeError:
# Let's try the next approach...
pass
enc = locale.getpreferredencoding()
try:
Writer = codecs.getwriter(enc)
except LookupError:
Writer = codecs.getwriter(_DEFAULT_ENCODING)
f = Writer(fileobj)
write = f.write
try:
write(s)
return write
except UnicodeEncodeError:
Writer = codecs.getwriter('latin-1')
f = Writer(fileobj)
write = f.write
# If this doesn't work let the exception bubble up; I'm out of ideas
write(s)
return write
def color_print(*args, end='\n', **kwargs):
"""
Prints colors and styles to the terminal uses ANSI escape
sequences.
::
color_print('This is the color ', 'default', 'GREEN', 'green')
Parameters
----------
positional args : str
The positional arguments come in pairs (*msg*, *color*), where
*msg* is the string to display and *color* is the color to
display it in.
*color* is an ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
file : writable file-like, optional
Where to write to. Defaults to `sys.stdout`. If file is not
a tty (as determined by calling its `isatty` member, if one
exists), no coloring will be included.
end : str, optional
The ending of the message. Defaults to ``\\n``. The end will
be printed after resetting any color or font state.
"""
file = kwargs.get('file', _get_stdout())
write = file.write
if isatty(file) and conf.use_color:
for i in range(0, len(args), 2):
msg = args[i]
if i + 1 == len(args):
color = ''
else:
color = args[i + 1]
if color:
msg = _color_text(msg, color)
# Some file objects support writing unicode sensibly on some Python
# versions; if this fails try creating a writer using the locale's
# preferred encoding. If that fails too give up.
write = _write_with_fallback(msg, write, file)
write(end)
else:
for i in range(0, len(args), 2):
msg = args[i]
write(msg)
write(end)
def strip_ansi_codes(s):
"""
Remove ANSI color codes from the string.
"""
return re.sub('\033\\[([0-9]+)(;[0-9]+)*m', '', s)
def human_time(seconds):
"""
Returns a human-friendly time string that is always exactly 6
characters long.
Depending on the number of seconds given, can be one of::
1w 3d
2d 4h
1h 5m
1m 4s
15s
Will be in color if console coloring is turned on.
Parameters
----------
seconds : int
The number of seconds to represent
Returns
-------
time : str
A human-friendly representation of the given number of seconds
that is always exactly 6 characters.
"""
units = [
('y', 60 * 60 * 24 * 7 * 52),
('w', 60 * 60 * 24 * 7),
('d', 60 * 60 * 24),
('h', 60 * 60),
('m', 60),
('s', 1),
]
seconds = int(seconds)
if seconds < 60:
return f' {seconds:2d}s'
for i in range(len(units) - 1):
unit1, limit1 = units[i]
unit2, limit2 = units[i + 1]
if seconds >= limit1:
return '{:2d}{}{:2d}{}'.format(
seconds // limit1, unit1,
(seconds % limit1) // limit2, unit2)
return ' ~inf'
def human_file_size(size):
"""
Returns a human-friendly string representing a file size
that is 2-4 characters long.
For example, depending on the number of bytes given, can be one
of::
256b
64k
1.1G
Parameters
----------
size : int
The size of the file (in bytes)
Returns
-------
size : str
A human-friendly representation of the size of the file
"""
if hasattr(size, 'unit'):
# Import units only if necessary because the import takes a
# significant time [#4649]
from astropy import units as u
size = u.Quantity(size, u.byte).value
suffixes = ' kMGTPEZY'
if size == 0:
num_scale = 0
else:
num_scale = int(math.floor(math.log(size) / math.log(1000)))
if num_scale > 7:
suffix = '?'
else:
suffix = suffixes[num_scale]
num_scale = int(math.pow(1000, num_scale))
value = size / num_scale
str_value = str(value)
if suffix == ' ':
str_value = str_value[:str_value.index('.')]
elif str_value[2] == '.':
str_value = str_value[:2]
else:
str_value = str_value[:3]
return f"{str_value:>3s}{suffix}"
class _mapfunc(object):
"""
A function wrapper to support ProgressBar.map().
"""
def __init__(self, func):
self._func = func
def __call__(self, i_arg):
i, arg = i_arg
return i, self._func(arg)
class ProgressBar:
"""
A class to display a progress bar in the terminal.
It is designed to be used either with the ``with`` statement::
with ProgressBar(len(items)) as bar:
for item in enumerate(items):
bar.update()
or as a generator::
for item in ProgressBar(items):
item.process()
"""
def __init__(self, total_or_items, ipython_widget=False, file=None):
"""
Parameters
----------
total_or_items : int or sequence
If an int, the number of increments in the process being
tracked. If a sequence, the items to iterate over.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the progress bar will be
completely silent.
"""
if file is None:
file = _get_stdout()
if not ipython_widget and not isatty(file):
self.update = self._silent_update
self._silent = True
else:
self._silent = False
if isiterable(total_or_items):
self._items = iter(total_or_items)
self._total = len(total_or_items)
else:
try:
self._total = int(total_or_items)
except TypeError:
raise TypeError("First argument must be int or sequence")
else:
self._items = iter(range(self._total))
self._file = file
self._start_time = time.time()
self._human_total = human_file_size(self._total)
self._ipython_widget = ipython_widget
self._signal_set = False
if not ipython_widget:
self._should_handle_resize = (
_CAN_RESIZE_TERMINAL and self._file.isatty())
self._handle_resize()
if self._should_handle_resize:
signal.signal(signal.SIGWINCH, self._handle_resize)
self._signal_set = True
self.update(0)
def _handle_resize(self, signum=None, frame=None):
terminal_width = terminal_size(self._file)[1]
self._bar_length = terminal_width - 37
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._silent:
if exc_type is None:
self.update(self._total)
self._file.write('\n')
self._file.flush()
if self._signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
def __iter__(self):
return self
def __next__(self):
try:
rv = next(self._items)
except StopIteration:
self.__exit__(None, None, None)
raise
else:
self.update()
return rv
def update(self, value=None):
"""
Update progress bar via the console or notebook accordingly.
"""
# Update self.value
if value is None:
value = self._current_value + 1
self._current_value = value
# Choose the appropriate environment
if self._ipython_widget:
self._update_ipython_widget(value)
else:
self._update_console(value)
def _update_console(self, value=None):
"""
Update the progress bar to the given value (out of the total
given to the constructor).
"""
if self._total == 0:
frac = 1.0
else:
frac = float(value) / float(self._total)
file = self._file
write = file.write
if frac > 1:
bar_fill = int(self._bar_length)
else:
bar_fill = int(float(self._bar_length) * frac)
write('\r|')
color_print('=' * bar_fill, 'blue', file=file, end='')
if bar_fill < self._bar_length:
color_print('>', 'green', file=file, end='')
write('-' * (self._bar_length - bar_fill - 1))
write('|')
if value >= self._total:
t = time.time() - self._start_time
prefix = ' '
elif value <= 0:
t = None
prefix = ''
else:
t = ((time.time() - self._start_time) * (1.0 - frac)) / frac
prefix = ' ETA '
write(f' {human_file_size(value):>4s}/{self._human_total:>4s}')
write(f' ({frac:>6.2%})')
write(prefix)
if t is not None:
write(human_time(t))
self._file.flush()
def _update_ipython_widget(self, value=None):
"""
Update the progress bar to the given value (out of a total
given to the constructor).
This method is for use in the IPython notebook 2+.
"""
# Create and display an empty progress bar widget,
# if none exists.
if not hasattr(self, '_widget'):
# Import only if an IPython widget, i.e., widget in iPython NB
from IPython import version_info
if version_info[0] < 4:
from IPython.html import widgets
self._widget = widgets.FloatProgressWidget()
else:
_IPython.get_ipython()
from ipywidgets import widgets
self._widget = widgets.FloatProgress()
from IPython.display import display
display(self._widget)
self._widget.value = 0
# Calculate percent completion, and update progress bar
frac = (value/self._total)
self._widget.value = frac * 100
self._widget.description = f' ({frac:>6.2%})'
def _silent_update(self, value=None):
pass
@classmethod
def map(cls, function, items, multiprocess=False, file=None, step=100,
ipython_widget=False, multiprocessing_start_method=None):
"""Map function over items while displaying a progress bar with percentage complete.
The map operation may run in arbitrary order on the items, but the results are
returned in sequential order.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, int, optional
If `True`, use the `multiprocessing` module to distribute each task
to a different processor core. If a number greater than 1, then use
that number of cores.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
step : int, optional
Update the progress bar at least every *step* steps (default: 100).
If ``multiprocess`` is `True`, this will affect the size
of the chunks of ``items`` that are submitted as separate tasks
to the process pool. A large step size may make the job
complete faster if ``items`` is very long.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
"""
if multiprocess:
function = _mapfunc(function)
items = list(enumerate(items))
results = cls.map_unordered(
function, items, multiprocess=multiprocess,
file=file, step=step,
ipython_widget=ipython_widget,
multiprocessing_start_method=multiprocessing_start_method)
if multiprocess:
_, results = zip(*sorted(results))
results = list(results)
return results
@classmethod
def map_unordered(cls, function, items, multiprocess=False, file=None,
step=100, ipython_widget=False,
multiprocessing_start_method=None):
"""Map function over items, reporting the progress.
Does a `map` operation while displaying a progress bar with
percentage complete. The map operation may run on arbitrary order
on the items, and the results may be returned in arbitrary order.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, int, optional
If `True`, use the `multiprocessing` module to distribute each task
to a different processor core. If a number greater than 1, then use
that number of cores.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
step : int, optional
Update the progress bar at least every *step* steps (default: 100).
If ``multiprocess`` is `True`, this will affect the size
of the chunks of ``items`` that are submitted as separate tasks
to the process pool. A large step size may make the job
complete faster if ``items`` is very long.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
"""
results = []
if file is None:
file = _get_stdout()
with cls(len(items), ipython_widget=ipython_widget, file=file) as bar:
if bar._ipython_widget:
chunksize = step
else:
default_step = max(int(float(len(items)) / bar._bar_length), 1)
chunksize = min(default_step, step)
if not multiprocess or multiprocess < 1:
for i, item in enumerate(items):
results.append(function(item))
if (i % chunksize) == 0:
bar.update(i)
else:
ctx = multiprocessing.get_context(multiprocessing_start_method)
if sys.version_info >= (3, 7):
kwargs = dict(mp_context=ctx)
else:
kwargs = {}
with ProcessPoolExecutor(
max_workers=(int(multiprocess)
if multiprocess is not True
else None),
**kwargs) as p:
for i, f in enumerate(
as_completed(
p.submit(function, item)
for item in items)):
bar.update(i)
results.append(f.result())
return results
class Spinner:
"""
A class to display a spinner in the terminal.
It is designed to be used with the ``with`` statement::
with Spinner("Reticulating splines", "green") as s:
for item in enumerate(items):
s.next()
"""
_default_unicode_chars = "◓◑◒◐"
_default_ascii_chars = "-/|\\"
def __init__(self, msg, color='default', file=None, step=1,
chars=None):
"""
Parameters
----------
msg : str
The message to print
color : str, optional
An ANSI terminal color name. Must be one of: black, red,
green, brown, blue, magenta, cyan, lightgrey, default,
darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white.
file : writable file-like, optional
The file to write the spinner to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the spinner will be
completely silent.
step : int, optional
Only update the spinner every *step* steps
chars : str, optional
The character sequence to use for the spinner
"""
if file is None:
file = _get_stdout()
self._msg = msg
self._color = color
self._file = file
self._step = step
if chars is None:
if conf.unicode_output:
chars = self._default_unicode_chars
else:
chars = self._default_ascii_chars
self._chars = chars
self._silent = not isatty(file)
def _iterator(self):
chars = self._chars
index = 0
file = self._file
write = file.write
flush = file.flush
try_fallback = True
while True:
write('\r')
color_print(self._msg, self._color, file=file, end='')
write(' ')
try:
if try_fallback:
write = _write_with_fallback(chars[index], write, file)
else:
write(chars[index])
except UnicodeError:
# If even _write_with_fallback failed for any reason just give
# up on trying to use the unicode characters
chars = self._default_ascii_chars
write(chars[index])
try_fallback = False # No good will come of using this again
flush()
yield
for i in range(self._step):
yield
index = (index + 1) % len(chars)
def __enter__(self):
if self._silent:
return self._silent_iterator()
else:
return self._iterator()
def __exit__(self, exc_type, exc_value, traceback):
file = self._file
write = file.write
flush = file.flush
if not self._silent:
write('\r')
color_print(self._msg, self._color, file=file, end='')
if exc_type is None:
color_print(' [Done]', 'green', file=file)
else:
color_print(' [Failed]', 'red', file=file)
flush()
def _silent_iterator(self):
color_print(self._msg, self._color, file=self._file, end='')
self._file.flush()
while True:
yield
class ProgressBarOrSpinner:
"""
A class that displays either a `ProgressBar` or `Spinner`
depending on whether the total size of the operation is
known or not.
It is designed to be used with the ``with`` statement::
if file.has_length():
length = file.get_length()
else:
length = None
bytes_read = 0
with ProgressBarOrSpinner(length) as bar:
while file.read(blocksize):
bytes_read += blocksize
bar.update(bytes_read)
"""
def __init__(self, total, msg, color='default', file=None):
"""
Parameters
----------
total : int or None
If an int, the number of increments in the process being
tracked and a `ProgressBar` is displayed. If `None`, a
`Spinner` is displayed.
msg : str
The message to display above the `ProgressBar` or
alongside the `Spinner`.
color : str, optional
The color of ``msg``, if any. Must be an ANSI terminal
color name. Must be one of: black, red, green, brown,
blue, magenta, cyan, lightgrey, default, darkgrey,
lightred, lightgreen, yellow, lightblue, lightmagenta,
lightcyan, white.
file : writable file-like, optional
The file to write the to. Defaults to `sys.stdout`. If
``file`` is not a tty (as determined by calling its `isatty`
member, if any), only ``msg`` will be displayed: the
`ProgressBar` or `Spinner` will be silent.
"""
if file is None:
file = _get_stdout()
if total is None or not isatty(file):
self._is_spinner = True
self._obj = Spinner(msg, color=color, file=file)
else:
self._is_spinner = False
color_print(msg, color, file=file)
self._obj = ProgressBar(total, file=file)
def __enter__(self):
self._iter = self._obj.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
return self._obj.__exit__(exc_type, exc_value, traceback)
def update(self, value):
"""
Update the progress bar to the given value (out of the total
given to the constructor.
"""
if self._is_spinner:
next(self._iter)
else:
self._obj.update(value)
def print_code_line(line, col=None, file=None, tabwidth=8, width=70):
"""
Prints a line of source code, highlighting a particular character
position in the line. Useful for displaying the context of error
messages.
If the line is more than ``width`` characters, the line is truncated
accordingly and '…' characters are inserted at the front and/or
end.
It looks like this::
there_is_a_syntax_error_here :
^
Parameters
----------
line : unicode
The line of code to display
col : int, optional
The character in the line to highlight. ``col`` must be less
than ``len(line)``.
file : writable file-like, optional
Where to write to. Defaults to `sys.stdout`.
tabwidth : int, optional
The number of spaces per tab (``'\\t'``) character. Default
is 8. All tabs will be converted to spaces to ensure that the
caret lines up with the correct column.
width : int, optional
The width of the display, beyond which the line will be
truncated. Defaults to 70 (this matches the default in the
standard library's `textwrap` module).
"""
if file is None:
file = _get_stdout()
if conf.unicode_output:
ellipsis = '…'
else:
ellipsis = '...'
write = file.write
if col is not None:
if col >= len(line):
raise ValueError('col must be less the the line lenght.')
ntabs = line[:col].count('\t')
col += ntabs * (tabwidth - 1)
line = line.rstrip('\n')
line = line.replace('\t', ' ' * tabwidth)
if col is not None and col > width:
new_col = min(width // 2, len(line) - col)
offset = col - new_col
line = line[offset + len(ellipsis):]
width -= len(ellipsis)
new_col = col
col -= offset
color_print(ellipsis, 'darkgrey', file=file, end='')
if len(line) > width:
write(line[:width - len(ellipsis)])
color_print(ellipsis, 'darkgrey', file=file)
else:
write(line)
write('\n')
if col is not None:
write(' ' * col)
color_print('^', 'red', file=file)
# The following four Getch* classes implement unbuffered character reading from
# stdin on Windows, linux, MacOSX. This is taken directly from ActiveState
# Code Recipes:
# http://code.activestate.com/recipes/134892-getch-like-unbuffered-character-reading-from-stdin/
#
class Getch:
"""Get a single character from standard input without screen echo.
Returns
-------
char : str (one character)
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchMacCarbon()
except (ImportError, AttributeError):
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix:
def __init__(self):
import tty # pylint: disable=W0611
import sys # pylint: disable=W0611
# import termios now or else you'll get the Unix
# version on the Mac
import termios # pylint: disable=W0611
def __call__(self):
import sys
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt # pylint: disable=W0611
def __call__(self):
import msvcrt
return msvcrt.getch()
class _GetchMacCarbon:
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
import Carbon
Carbon.Evt # see if it has this (in Unix, it doesn't)
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0] == 0: # 0x0008 is the keyDownMask
return ''
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what, msg, when, where, mod) = Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg & 0x000000FF)
| bsd-3-clause |
whitepyro/debian_server_setup | lib/chardet/euctwprober.py | 2994 | 1676 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
| gpl-3.0 |
artmusic0/theano-learning.part02 | fixed_official_convolutional_v3(pickle_out_best_W&B)/code/DBN.py | 31 | 17088 | """
"""
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
from rbm import RBM
# start-snippet-1
class DBN(object):
"""Deep Belief Network
A deep belief network is obtained by stacking several RBMs on top of each
other. The hidden layer of the RBM at layer `i` becomes the input of the
RBM at layer `i+1`. The first layer RBM gets as input the input of the
network, and the hidden layer of the last RBM represents the output. When
used for classification, the DBN is treated as a MLP, by adding a logistic
regression layer on top.
"""
def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
hidden_layers_sizes=[500, 500], n_outs=10):
"""This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the DBN
:type hidden_layers_sizes: list of ints
:param hidden_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
"""
self.sigmoid_layers = []
self.rbm_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
assert self.n_layers > 0
if not theano_rng:
theano_rng = MRG_RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x') # the data is presented as rasterized images
self.y = T.ivector('y') # the labels are presented as 1D vector
# of [int] labels
# end-snippet-1
# The DBN is an MLP, for which all weights of intermediate
# layers are shared with a different RBM. We will first
# construct the DBN as a deep multilayer perceptron, and when
# constructing each sigmoidal layer we also construct an RBM
# that shares weights with that layer. During pretraining we
# will train these RBMs (which will lead to chainging the
# weights of the MLP as well) During finetuning we will finish
# training the DBN by doing stochastic gradient descent on the
# MLP.
for i in xrange(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden
# units of the layer below or the input size if we are on
# the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# the input to this layer is either the activation of the
# hidden layer below or the input of the DBN if you are on
# the first layer
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[-1].output
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.nnet.sigmoid)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
# its arguably a philosophical question... but we are
# going to only declare that the parameters of the
# sigmoid_layers are parameters of the DBN. The visible
# biases in the RBM are parameters of those RBMs, but not
# of the DBN.
self.params.extend(sigmoid_layer.params)
# Construct an RBM that shared weights with this layer
rbm_layer = RBM(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W,
hbias=sigmoid_layer.b)
self.rbm_layers.append(rbm_layer)
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.sigmoid_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs)
self.params.extend(self.logLayer.params)
# compute the cost for second phase of training, defined as the
# negative log likelihood of the logistic regression (output) layer
self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
self.errors = self.logLayer.errors(self.y)
def pretraining_functions(self, train_set_x, batch_size, k):
'''Generates a list of functions, for performing one step of
gradient descent at a given layer. The function will require
as input the minibatch index, and to train an RBM you just
need to iterate, calling the corresponding function on all
minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared var. that contains all datapoints used
for training the RBM
:type batch_size: int
:param batch_size: size of a [mini]batch
:param k: number of Gibbs steps to do in CD-k / PCD-k
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
learning_rate = T.scalar('lr') # learning rate to use
# number of batches
n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for rbm in self.rbm_layers:
# get the cost and the updates list
# using CD-k here (persisent=None) for training each RBM.
# TODO: change cost function to reconstruction error
cost, updates = rbm.get_cost_updates(learning_rate,
persistent=None, k=k)
# compile the theano function
fn = theano.function(
inputs=[index, theano.Param(learning_rate, default=0.1)],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin:batch_end]
}
)
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_functions(self, datasets, batch_size, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on a
batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
'''
(train_set_x, train_set_y) = datasets[0]
(valid_set_x, valid_set_y) = datasets[1]
(test_set_x, test_set_y) = datasets[2]
# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches /= batch_size
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# compute list of fine-tuning updates
updates = []
for param, gparam in zip(self.params, gparams):
updates.append((param, param - gparam * learning_rate))
train_fn = theano.function(
inputs=[index],
outputs=self.finetune_cost,
updates=updates,
givens={
self.x: train_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: train_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
test_score_i = theano.function(
[index],
self.errors,
givens={
self.x: test_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: test_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
valid_score_i = theano.function(
[index],
self.errors,
givens={
self.x: valid_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: valid_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
# Create a function that scans the entire validation set
def valid_score():
return [valid_score_i(i) for i in xrange(n_valid_batches)]
# Create a function that scans the entire test set
def test_score():
return [test_score_i(i) for i in xrange(n_test_batches)]
return train_fn, valid_score, test_score
def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
pretrain_lr=0.01, k=1, training_epochs=1000,
dataset='mnist.pkl.gz', batch_size=10):
"""
Demonstrates how to train and test a Deep Belief Network.
This is demonstrated on MNIST.
:type finetune_lr: float
:param finetune_lr: learning rate used in the finetune stage
:type pretraining_epochs: int
:param pretraining_epochs: number of epoch to do pretraining
:type pretrain_lr: float
:param pretrain_lr: learning rate to be used during pre-training
:type k: int
:param k: number of Gibbs steps in CD/PCD
:type training_epochs: int
:param training_epochs: maximal number of iterations ot run the optimizer
:type dataset: string
:param dataset: path the the pickled dataset
:type batch_size: int
:param batch_size: the size of a minibatch
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
print '... building the model'
# construct the Deep Belief Network
dbn = DBN(numpy_rng=numpy_rng, n_ins=28 * 28,
hidden_layers_sizes=[1000, 1000, 1000],
n_outs=10)
# start-snippet-2
#########################
# PRETRAINING THE MODEL #
#########################
print '... getting the pretraining functions'
pretraining_fns = dbn.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size,
k=k)
print '... pre-training the model'
start_time = timeit.default_timer()
## Pre-train layer-wise
for i in xrange(dbn.n_layers):
# go through pretraining epochs
for epoch in xrange(pretraining_epochs):
# go through the training set
c = []
for batch_index in xrange(n_train_batches):
c.append(pretraining_fns[i](index=batch_index,
lr=pretrain_lr))
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print numpy.mean(c)
end_time = timeit.default_timer()
# end-snippet-2
print >> sys.stderr, ('The pretraining code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
########################
# FINETUNING THE MODEL #
########################
# get the training, validation and testing function for the model
print '... getting the finetuning functions'
train_fn, validate_model, test_model = dbn.build_finetune_functions(
datasets=datasets,
batch_size=batch_size,
learning_rate=finetune_lr
)
print '... finetuning the model'
# early-stopping parameters
patience = 4 * n_train_batches # look as this many examples regardless
patience_increase = 2. # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatches before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < training_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_fn(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
validation_losses = validate_model()
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%'
% (
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if (
this_validation_loss < best_validation_loss *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = test_model()
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(
(
'Optimization complete with best validation score of %f %%, '
'obtained at iteration %i, '
'with test performance %f %%'
) % (best_validation_loss * 100., best_iter + 1, test_score * 100.)
)
print >> sys.stderr, ('The fine tuning code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time)
/ 60.))
if __name__ == '__main__':
test_DBN()
| gpl-3.0 |
josepht/snapcraft | integration_tests/test_after.py | 2 | 3073 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
from testtools.matchers import Contains
import integration_tests
class AfterTestCase(integration_tests.TestCase):
def test_stage_dependencies(self):
self.run_snapcraft('stage', 'dependencies')
self.assertTrue(
os.access(
os.path.join(self.stage_dir, 'bin', 'p3'),
os.X_OK))
def test_build_with_circular_dependencies(self):
self.copy_project_to_cwd('dependencies')
with open('snapcraft.yaml', 'r') as snapcraft_yaml:
snapcraft_yaml_contents = snapcraft_yaml.read()
with open('snapcraft.yaml', 'w') as snapcraft_yaml:
snapcraft_yaml.write(
snapcraft_yaml_contents.replace(
'p1:',
'p1:\n'
' after: [p3]'))
# We update here to get a clean log/stdout later
self.run_snapcraft('update')
exception = self.assertRaises(
subprocess.CalledProcessError,
self.run_snapcraft, 'build')
self.assertEqual(1, exception.returncode)
expected = (
'Issue detected while analyzing snapcraft.yaml: '
'circular dependency chain found in parts definition\n')
self.assertThat(exception.output, Contains(expected))
def test_build_with_missing_dependencies(self):
self.copy_project_to_cwd('dependencies')
with open('snapcraft.yaml', 'r') as snapcraft_yaml:
snapcraft_yaml_contents = snapcraft_yaml.read()
wrong_contents = snapcraft_yaml_contents.replace(
' after: [p1]\n',
'')
wrong_contents = wrong_contents.replace(
' after: [p2]\n',
'')
with open('snapcraft.yaml', 'w') as snapcraft_yaml:
snapcraft_yaml.write(wrong_contents)
exception = self.assertRaises(
subprocess.CalledProcessError,
self.run_snapcraft, 'build')
self.assertEqual(1, exception.returncode)
def test_pull_with_tree_of_dependencies(self):
self.run_snapcraft(
'pull', os.path.join('circular-dependencies', 'tree'))
def test_pull_with_circular_dependencies(self):
self.assertRaises(
subprocess.CalledProcessError,
self.run_snapcraft, 'pull',
os.path.join('circular-dependencies', 'circle'))
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.