repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
progamer001/Testchat | auth.py | 1 | 9013 | # coding=UTF-8
# Tornado modules.
import tornado.web
import tornado.escape
# Import application modules.
from base import BaseHandler
# General modules.
import logging
class LoginHandler(BaseHandler, tornado.auth.GoogleMixin):
"""
Handler for logins with Google Open ID / OAuth
http://www.tornadoweb.org/documentation/auth.html#google
"""
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
elif self.get_argument("start_google_oauth", None):
# Set users attributes to ask for.
ax_attrs = ['name', 'email', 'language', 'username']
self.authenticate_redirect(ax_attrs=ax_attrs)
elif self.get_argument("start_direct_auth", None):
# Get form inputs.
try:
user = dict()
user["name"] = self.get_argument("name", default="")
user["pass_login"] = self.get_argument("pass_login", default="")
user["password"] = ""
except:
# Send an error back to client.
content = "<p>There was an input error. Fill in all fields!</p>"
self.render_default("index.html", content=content)
# If user has not filled in all fields.
if not user["pass_login"] or not user["name"]:
content = ('<h2>2. Direct Login</h2>'
+ '<p>Fill in both fields!</p>'
+ '<form class="form-inline" action="/login" method="get"> '
+ '<input type="hidden" name="start_direct_auth" value="1">'
+ '<input class="form-control" type="text" name="name" placeholder="Your Name" value="' + str(user["name"]) + '"> '
+ '<input class="form-control" type="password" name="pass_login" placeholder="Your Password" value="' + str(user["pass_login"]) + '"> '
+ '<input type="submit" class="btn btn-default" value="Sign in">'
+ '</form>')
self.render_default("index.html", content=content)
# All data given. Log user in!
else:
self._on_auth(user)
elif self.get_argument("start_registration", None):
# Get form inputs.
try:
user = dict()
user["name"] = self.get_argument("name", default="")
user["password"] = self.get_argument("password", default="")
user["pass_login"] = ""
except:
# Send an error back to client.
content = "<p>There was an input error. Fill in all fields!</p>"
self.render_default("index.html", content=content)
# If user has not filled in all fields.
if not user["password"] or not user["name"]:
content = ('<h2>3. Registration</h2>'
+ '<form class="form-inline" action="/login" method="get"> '
+ '<input type="hidden" name="start_registration" value="1">'
+ '<input class="form-control" type="text" name="name" placeholder="Your Name"> '
+ '<input class="form-control" type="password" name="password" placeholder="Your Password"> '
+ '<input type="submit" class="btn btn-default" value="Register">'
+ '</form>')
self.render_default("index.html", content=content)
# All data given. Log user in!
else:
self._on_auth(user)
else:
# Logins.
content = '<div class="page-header"><h1>Login</h1></div>'
content += ('<h2>1. Google Login</h2>'
+ '<form action="/login" method="get">'
+ '<input type="hidden" name="start_google_oauth" value="1">'
+ '<input type="submit" class="btn" value="Sign in with Google">'
+ '</form>')
content += ('<h2>2. Direct Login</h2>'
+ '<form class="form-inline" action="/login" method="get"> '
+ '<input type="hidden" name="start_direct_auth" value="1">'
+ '<input class="form-control" type="text" name="name" placeholder="Your Name"> '
+ '<input class="form-control" type="password" name="pass_login" placeholder="Your Password"> '
+ '<input type="submit" class="btn btn-default" value="Sign in">'
+ '</form>')
content += ('<h2>3. Registration</h2>'
+ '<form class="form-inline" action="/login" method="get"> '
+ '<input type="hidden" name="start_registration" value="1">'
+ '<input class="form-control" type="text" name="name" placeholder="Your Name"> '
+ '<input class="form-control" type="password" name="password" placeholder="Your Password"> '
+ '<input type="submit" class="btn btn-default" value="Register">'
+ '</form>')
self.render_default("index.html", content=content)
def _on_auth(self, user):
"""
Callback for third party authentication (last step).
"""
if not user:
content = ('<div class="page-header"><h1>Login</h1></div>'
+ '<div class="alert alert-error">'
+ '<button class="close" data-dismiss="alert">×</button>'
+ '<h3>Authentication failed</h3>'
+ '<p>This might be due to a problem in Tornados GoogleMixin.</p>'
+ '</div>')
self.render_default("index.html", content=content)
return None
# @todo: Validate user data.
# Save user when authentication was successful.
def on_user_find(result, user=user):
#@todo: We should check if email is given even though we can assume.
if result == "null" or not result:
# If user does not exist, create a new entry.
# self.application.client.set("user:" + user["email"], tornado.escape.json_encode(user))
self.application.client.set("user:" + user["name"], tornado.escape.json_encode(user))
else:
dbuser = tornado.escape.json_decode(result)
# If try to register
if user["password"] != "":
content = ('<h2>Login</h2>'
+ '<p>Username taken!</p>'
+ '<form class="form-inline" action="/login" method="get"> '
+ '<input type="hidden" name="start_registration" value="1">'
+ '<input class="form-control" type="text" name="name" placeholder="Your Name"> '
+ '<input class="form-control" type="password" name="password" placeholder="Your Password"> '
+ '<input type="submit" class="btn btn-default" value="Register">'
+ '</form>')
self.render_default("index.html", content=content)
return None
# If try to login
if user["pass_login"] != dbuser.get("password"):
content = ('<h2>Login</h2>'
+ '<p>Password incorrect!</p>'
+ '<form class="form-inline" action="/login" method="get"> '
+ '<input type="hidden" name="start_direct_auth" value="1">'
+ '<input class="form-control" type="text" name="name" placeholder="Your Name"> '
+ '<input class="form-control" type="password" name="pass_login" placeholder="Your Password"> '
+ '<input type="submit" class="btn btn-default" value="Login">'
+ '</form>')
self.render_default("index.html", content=content)
return None
# dbuser.update(user)
# user = dbuser
# self.application.client.set("user:" + user["email"], tornado.escape.json_encode(user))
# self.application.client.set("user:" + user["name"], tornado.escape.json_encode(user))
# Save user id in cookie.
# self.set_secure_cookie("user", user["email"])
self.set_cookie("user", user["name"])
# self.application.usernames[user["email"]] = user.get("name") or user["email"]
self.application.usernames[user["name"]] = user.get("name") # or user["email"]
# Closed client connection
if self.request.connection.stream.closed():
logging.warning("Waiter disappeared")
return
self.redirect("/")
# dbuser = self.application.client.get("user:" + user["email"], on_user_find)
dbuser = self.application.client.get("user:" + user["name"], on_user_find)
class LogoutHandler(BaseHandler):
def get(self):
self.clear_cookie('user')
self.redirect("/")
| mit |
kbrebanov/ansible | contrib/inventory/nsot.py | 117 | 9825 | #!/usr/bin/env python
'''
nsot
====
Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox
Features
--------
* Define host groups in form of NSoT device attribute criteria
* All parameters defined by the spec as of 2015-09-05 are supported.
+ ``--list``: Returns JSON hash of host groups -> hosts and top-level
``_meta`` -> ``hostvars`` which correspond to all device attributes.
Group vars can be specified in the YAML configuration, noted below.
+ ``--host <hostname>``: Returns JSON hash where every item is a device
attribute.
* In addition to all attributes assigned to resource being returned, script
will also append ``site_id`` and ``id`` as facts to utilize.
Confguration
------------
Since it'd be annoying and failure prone to guess where you're configuration
file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it.
This file should adhere to the YAML spec. All top-level variable must be
desired Ansible group-name hashed with single 'query' item to define the NSoT
attribute query.
Queries follow the normal NSoT query syntax, `shown here`_
.. _shown here: https://github.com/dropbox/pynsot#set-queries
.. code:: yaml
routers:
query: 'deviceType=ROUTER'
vars:
a: b
c: d
juniper_fw:
query: 'deviceType=FIREWALL manufacturer=JUNIPER'
not_f10:
query: '-manufacturer=FORCE10'
The inventory will automatically use your ``.pynsotrc`` like normal pynsot from
cli would, so make sure that's configured appropriately.
.. note::
Attributes I'm showing above are influenced from ones that the Trigger
project likes. As is the spirit of NSoT, use whichever attributes work best
for your workflow.
If config file is blank or absent, the following default groups will be
created:
* ``routers``: deviceType=ROUTER
* ``switches``: deviceType=SWITCH
* ``firewalls``: deviceType=FIREWALL
These are likely not useful for everyone so please use the configuration. :)
.. note::
By default, resources will only be returned for what your default
site is set for in your ``~/.pynsotrc``.
If you want to specify, add an extra key under the group for ``site: n``.
Output Examples
---------------
Here are some examples shown from just calling the command directly::
$ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.'
{
"routers": {
"hosts": [
"test1.example.com"
],
"vars": {
"cool_level": "very",
"group": "routers"
}
},
"firewalls": {
"hosts": [
"test2.example.com"
],
"vars": {
"cool_level": "enough",
"group": "firewalls"
}
},
"_meta": {
"hostvars": {
"test2.example.com": {
"make": "SRX",
"site_id": 1,
"id": 108
},
"test1.example.com": {
"make": "MX80",
"site_id": 1,
"id": 107
}
}
},
"rtr_and_fw": {
"hosts": [
"test1.example.com",
"test2.example.com"
],
"vars": {}
}
}
$ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.'
{
"make": "MX80",
"site_id": 1,
"id": 107
}
'''
from __future__ import print_function
import sys
import os
import pkg_resources
import argparse
import json
import yaml
from textwrap import dedent
from pynsot.client import get_api_client
from pynsot.app import HttpServerError
from click.exceptions import UsageError
from six import string_types
def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)
class NSoTInventory(object):
'''NSoT Client object for gather inventory'''
def __init__(self):
self.config = dict()
config_env = os.environ.get('NSOT_INVENTORY_CONFIG')
if config_env:
try:
config_file = os.path.abspath(config_env)
except IOError: # If file non-existent, use default config
self._config_default()
except Exception as e:
sys.exit('%s\n' % e)
with open(config_file) as f:
try:
self.config.update(yaml.safe_load(f))
except TypeError: # If empty file, use default config
warning('Empty config file')
self._config_default()
except Exception as e:
sys.exit('%s\n' % e)
else: # Use defaults if env var missing
self._config_default()
self.groups = self.config.keys()
self.client = get_api_client()
self._meta = {'hostvars': dict()}
def _config_default(self):
default_yaml = '''
---
routers:
query: deviceType=ROUTER
switches:
query: deviceType=SWITCH
firewalls:
query: deviceType=FIREWALL
'''
self.config = yaml.safe_load(dedent(default_yaml))
def do_list(self):
'''Direct callback for when ``--list`` is provided
Relies on the configuration generated from init to run
_inventory_group()
'''
inventory = dict()
for group, contents in self.config.items():
group_response = self._inventory_group(group, contents)
inventory.update(group_response)
inventory.update({'_meta': self._meta})
return json.dumps(inventory)
def do_host(self, host):
return json.dumps(self._hostvars(host))
def _hostvars(self, host):
'''Return dictionary of all device attributes
Depending on number of devices in NSoT, could be rather slow since this
has to request every device resource to filter through
'''
device = [i for i in self.client.devices.get()
if host in i['hostname']][0]
attributes = device['attributes']
attributes.update({'site_id': device['site_id'], 'id': device['id']})
return attributes
def _inventory_group(self, group, contents):
'''Takes a group and returns inventory for it as dict
:param group: Group name
:type group: str
:param contents: The contents of the group's YAML config
:type contents: dict
contents param should look like::
{
'query': 'xx',
'vars':
'a': 'b'
}
Will return something like::
{ group: {
hosts: [],
vars: {},
}
'''
query = contents.get('query')
hostvars = contents.get('vars', dict())
site = contents.get('site', dict())
obj = {group: dict()}
obj[group]['hosts'] = []
obj[group]['vars'] = hostvars
try:
assert isinstance(query, string_types)
except:
sys.exit('ERR: Group queries must be a single string\n'
' Group: %s\n'
' Query: %s\n' % (group, query)
)
try:
if site:
site = self.client.sites(site)
devices = site.devices.query.get(query=query)
else:
devices = self.client.devices.query.get(query=query)
except HttpServerError as e:
if '500' in str(e.response):
_site = 'Correct site id?'
_attr = 'Queried attributes actually exist?'
questions = _site + '\n' + _attr
sys.exit('ERR: 500 from server.\n%s' % questions)
else:
raise
except UsageError:
sys.exit('ERR: Could not connect to server. Running?')
# Would do a list comprehension here, but would like to save code/time
# and also acquire attributes in this step
for host in devices:
# Iterate through each device that matches query, assign hostname
# to the group's hosts array and then use this single iteration as
# a chance to update self._meta which will be used in the final
# return
hostname = host['hostname']
obj[group]['hosts'].append(hostname)
attributes = host['attributes']
attributes.update({'site_id': host['site_id'], 'id': host['id']})
self._meta['hostvars'].update({hostname: attributes})
return obj
def parse_args():
desc = __doc__.splitlines()[4] # Just to avoid being redundant
# Establish parser with options and error out if no action provided
parser = argparse.ArgumentParser(
description=desc,
conflict_handler='resolve',
)
# Arguments
#
# Currently accepting (--list | -l) and (--host | -h)
# These must not be allowed together
parser.add_argument(
'--list', '-l',
help='Print JSON object containing hosts to STDOUT',
action='store_true',
dest='list_', # Avoiding syntax highlighting for list
)
parser.add_argument(
'--host', '-h',
help='Print JSON object containing hostvars for <host>',
action='store',
)
args = parser.parse_args()
if not args.list_ and not args.host: # Require at least one option
parser.exit(status=1, message='No action requested')
if args.list_ and args.host: # Do not allow multiple options
parser.exit(status=1, message='Too many actions requested')
return args
def main():
'''Set up argument handling and callback routing'''
args = parse_args()
client = NSoTInventory()
# Callback condition
if args.list_:
print(client.do_list())
elif args.host:
print(client.do_host(args.host))
if __name__ == '__main__':
main()
| gpl-3.0 |
Andrew-McNab-UK/DIRAC | docs/source/conf.py | 4 | 9724 | # -*- coding: utf-8 -*-
#
# DiracDocs documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 25 17:34:37 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
import sys
import subprocess
sys.path.insert(0, ".")
try:
import fakeEnvironment
except ImportError:
pass
try:
import fakeEnv
except ImportError:
pass
diracRelease = os.environ.get( 'DIRACVERSION', 'integration' )
if os.environ.get('READTHEDOCS') == 'True':
diracRelease = os.path.basename( os.path.abspath( "../../" ) )
if diracRelease.startswith("rel-"):
diracRelease = diracRelease[4:]
print 'conf.py: %s as DIRACVERSION' % diracRelease
#...............................................................................
# configuration
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
if os.environ.get('READTHEDOCS') == 'True':
sys.path.append(os.path.abspath('.'))
diracPath = os.path.abspath( os.path.join( os.getcwd(), "../..") )
print "DiracPath",diracPath
buildfolder ="_build"
try:
os.mkdir( os.path.abspath( "../"+buildfolder) )
except:
pass
##We need to have the DIRAC module somewhere, or we cannot import it, as readtheDocs clones the repo into something based on the branchname
if not os.path.exists( "../../DIRAC" ):
diracLink = os.path.abspath( os.path.join( os.getcwd() , "../" , buildfolder, "DIRAC" ) )
print "DiracLink",diracLink
if not os.path.exists( diracLink ):
RES = subprocess.check_output( ["ln","-s", diracPath, diracLink ] )
diracPath = os.path.abspath( os.path.join( diracLink, ".." ) )
sys.path.insert(0, diracPath)
for path in sys.path:
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '')+":"+path
## this is not working at the moment because the DIRAC folder is not found by the buildScriptsDOC script
# print "Pythonpath",os.environ['PYTHONPATH']
# buildCommand = os.path.join( os.getcwd() , "../Tools/buildScriptsDOC.py" )
# scriptdir = os.path.abspath(os.path.join( os.getcwd() , "../", buildfolder, "scripts" ))
# try:
# os.mkdir( scriptdir )
# except:
# pass
# print "command", buildCommand
# code = subprocess.Popen( ["python", buildCommand, scriptdir ], env = os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stdout , err = code.communicate()
# print "script",stdout
# print "script",err
os.environ["DIRAC"] = diracPath
print "DIRAC ENVIRON", os.environ["DIRAC"]
##singlehtml build needs too much memory, so we need to create less code documentation
buildtype = "limited" if any("singlehtml" in arg for arg in sys.argv ) else "full"
print "Chosing build type:", buildtype
buildCommand =os.path.join( os.getcwd() , "../Tools/MakeDoc.py" )
code = subprocess.Popen( ["python",buildCommand, buildtype], env = os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout , err = code.communicate()
print "code",stdout
print "code",err
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.graphviz',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DIRAC'
copyright = u'%s, DIRAC Project' % datetime.datetime.utcnow().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = diracRelease
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%H:%M %d/%m/%Y %Z'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
#ADRI: Ignore old stuff that is not included in the compilation
exclude_trees = [ 'AdministratorGuide/Configuration/ConfigurationReference' ]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
html_style = 'dirac.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {
# 'sidebarbgcolor':'#D5E2F2'
#}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "DIRAC Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '_static/DIRAC-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d/%m/%Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'DiracDocsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DiracDocs.tex', u'DIRAC Documentation',
u'DIRAC Project.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
## link with the python standard library docs
intersphinx_mapping = {
'python': ('https://docs.python.org/2.7', None),
}
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| gpl-3.0 |
ganeti/ganeti | lib/hooksmaster.py | 1 | 10785 | #
#
# Copyright (C) 2006, 2007, 2011, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module implementing the logic for running hooks.
"""
from ganeti import constants
from ganeti import errors
from ganeti import utils
from ganeti import compat
from ganeti import pathutils
def _RpcResultsToHooksResults(rpc_results):
"""Function to convert RPC results to the format expected by HooksMaster.
@type rpc_results: dict(node: L{rpc.RpcResult})
@param rpc_results: RPC results
@rtype: dict(node: (fail_msg, offline, hooks_results))
@return: RPC results unpacked according to the format expected by
L({hooksmaster.HooksMaster}
"""
return dict((node, (rpc_res.fail_msg, rpc_res.offline, rpc_res.payload))
for (node, rpc_res) in rpc_results.items())
class HooksMaster(object):
def __init__(self, opcode, hooks_path, nodes, hooks_execution_fn,
hooks_results_adapt_fn, build_env_fn, prepare_post_nodes_fn,
log_fn, htype=None, cluster_name=None, master_name=None):
"""Base class for hooks masters.
This class invokes the execution of hooks according to the behaviour
specified by its parameters.
@type opcode: string
@param opcode: opcode of the operation to which the hooks are tied
@type hooks_path: string
@param hooks_path: prefix of the hooks directories
@type nodes: 2-tuple of lists
@param nodes: 2-tuple of lists containing nodes on which pre-hooks must be
run and nodes on which post-hooks must be run
@type hooks_execution_fn: function that accepts the following parameters:
(node_list, hooks_path, phase, environment)
@param hooks_execution_fn: function that will execute the hooks; can be
None, indicating that no conversion is necessary.
@type hooks_results_adapt_fn: function
@param hooks_results_adapt_fn: function that will adapt the return value of
hooks_execution_fn to the format expected by RunPhase
@type build_env_fn: function that returns a dictionary having strings as
keys
@param build_env_fn: function that builds the environment for the hooks
@type prepare_post_nodes_fn: function that take a list of node UUIDs and
returns a list of node UUIDs
@param prepare_post_nodes_fn: function that is invoked right before
executing post hooks and can change the list of node UUIDs to run the post
hooks on
@type log_fn: function that accepts a string
@param log_fn: logging function
@type htype: string or None
@param htype: None or one of L{constants.HTYPE_CLUSTER},
L{constants.HTYPE_NODE}, L{constants.HTYPE_INSTANCE}
@type cluster_name: string
@param cluster_name: name of the cluster
@type master_name: string
@param master_name: name of the master
"""
self.opcode = opcode
self.hooks_path = hooks_path
self.hooks_execution_fn = hooks_execution_fn
self.hooks_results_adapt_fn = hooks_results_adapt_fn
self.build_env_fn = build_env_fn
self.prepare_post_nodes_fn = prepare_post_nodes_fn
self.log_fn = log_fn
self.htype = htype
self.cluster_name = cluster_name
self.master_name = master_name
self.pre_env = self._BuildEnv(constants.HOOKS_PHASE_PRE)
(self.pre_nodes, self.post_nodes) = nodes
def _BuildEnv(self, phase):
"""Compute the environment and the target nodes.
Based on the opcode and the current node list, this builds the
environment for the hooks and the target node list for the run.
"""
if phase == constants.HOOKS_PHASE_PRE:
prefix = "GANETI_"
elif phase == constants.HOOKS_PHASE_POST:
prefix = "GANETI_POST_"
else:
raise AssertionError("Unknown phase '%s'" % phase)
env = {}
if self.hooks_path is not None:
phase_env = self.build_env_fn()
if phase_env:
assert not compat.any(key.upper().startswith(prefix)
for key in phase_env)
env.update(("%s%s" % (prefix, key), value)
for (key, value) in phase_env.items())
if phase == constants.HOOKS_PHASE_PRE:
assert compat.all((key.startswith("GANETI_") and
not key.startswith("GANETI_POST_"))
for key in env)
elif phase == constants.HOOKS_PHASE_POST:
assert compat.all(key.startswith("GANETI_POST_") for key in env)
assert isinstance(self.pre_env, dict)
# Merge with pre-phase environment
assert not compat.any(key.startswith("GANETI_POST_")
for key in self.pre_env)
env.update(self.pre_env)
else:
raise AssertionError("Unknown phase '%s'" % phase)
return env
def _RunWrapper(self, node_list, hpath, phase, phase_env):
"""Simple wrapper over self.callfn.
This method fixes the environment before executing the hooks.
"""
env = {
"PATH": constants.HOOKS_PATH,
"GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
"GANETI_OP_CODE": self.opcode,
"GANETI_DATA_DIR": pathutils.DATA_DIR,
"GANETI_HOOKS_PHASE": phase,
"GANETI_HOOKS_PATH": hpath,
}
if self.htype:
env["GANETI_OBJECT_TYPE"] = self.htype
if self.cluster_name is not None:
env["GANETI_CLUSTER"] = self.cluster_name
if self.master_name is not None:
env["GANETI_MASTER"] = self.master_name
if phase_env:
env = utils.algo.JoinDisjointDicts(env, phase_env)
# Convert everything to strings
env = dict([(str(key), str(val)) for key, val in env.items()])
assert compat.all(key == "PATH" or key.startswith("GANETI_")
for key in env)
return self.hooks_execution_fn(node_list, hpath, phase, env)
def RunPhase(self, phase, node_names=None):
"""Run all the scripts for a phase.
This is the main function of the HookMaster.
It executes self.hooks_execution_fn, and after running
self.hooks_results_adapt_fn on its results it expects them to be in the
form {node_name: (fail_msg, [(script, result, output), ...]}).
@param phase: one of L{constants.HOOKS_PHASE_POST} or
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
@param node_names: overrides the predefined list of nodes for the given
phase
@return: the processed results of the hooks multi-node rpc call
@raise errors.HooksFailure: on communication failure to the nodes
@raise errors.HooksAbort: on failure of one of the hooks
"""
if phase == constants.HOOKS_PHASE_PRE:
if node_names is None:
node_names = self.pre_nodes
env = self.pre_env
elif phase == constants.HOOKS_PHASE_POST:
if node_names is None:
node_names = self.post_nodes
if node_names is not None and self.prepare_post_nodes_fn is not None:
node_names = frozenset(self.prepare_post_nodes_fn(list(node_names)))
env = self._BuildEnv(phase)
else:
raise AssertionError("Unknown phase '%s'" % phase)
if not node_names:
# empty node list, we should not attempt to run this as either
# we're in the cluster init phase and the rpc client part can't
# even attempt to run, or this LU doesn't do hooks at all
return
results = self._RunWrapper(node_names, self.hooks_path, phase, env)
if not results:
msg = "Communication Failure"
if phase == constants.HOOKS_PHASE_PRE:
raise errors.HooksFailure(msg)
else:
self.log_fn(msg)
return results
converted_res = results
if self.hooks_results_adapt_fn:
converted_res = self.hooks_results_adapt_fn(results)
errs = []
for node_name, (fail_msg, offline, hooks_results) in converted_res.items():
if offline:
continue
if fail_msg:
self.log_fn("Communication failure to node %s: %s", node_name, fail_msg)
continue
for script, hkr, output in hooks_results:
if hkr == constants.HKR_FAIL:
if phase == constants.HOOKS_PHASE_PRE:
errs.append((node_name, script, output))
else:
if not output:
output = "(no output)"
self.log_fn("On %s script %s failed, output: %s" %
(node_name, script, output))
if errs and phase == constants.HOOKS_PHASE_PRE:
raise errors.HooksAbort(errs)
return results
def RunConfigUpdate(self):
"""Run the special configuration update hook
This is a special hook that runs only on the master after each
top-level LI if the configuration has been updated.
"""
phase = constants.HOOKS_PHASE_POST
hpath = constants.HOOKS_NAME_CFGUPDATE
nodes = [self.master_name]
self._RunWrapper(nodes, hpath, phase, self.pre_env)
@staticmethod
def BuildFromLu(hooks_execution_fn, lu):
if lu.HPATH is None:
nodes = (None, None)
else:
hooks_nodes = lu.BuildHooksNodes()
if len(hooks_nodes) != 2:
raise errors.ProgrammerError(
"LogicalUnit.BuildHooksNodes must return a 2-tuple")
nodes = (frozenset(hooks_nodes[0]), frozenset(hooks_nodes[1]))
master_name = cluster_name = None
if lu.cfg:
master_name = lu.cfg.GetMasterNodeName()
cluster_name = lu.cfg.GetClusterName()
return HooksMaster(lu.op.OP_ID, lu.HPATH, nodes, hooks_execution_fn,
_RpcResultsToHooksResults, lu.BuildHooksEnv,
lu.PreparePostHookNodes, lu.LogWarning, lu.HTYPE,
cluster_name, master_name)
| bsd-2-clause |
ChanChiChoi/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
rupak0577/ginga | ginga/web/pgw/Plot.py | 3 | 4306 | #
# Plot.py -- Plotting widget canvas wrapper.
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from io import BytesIO
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from ginga.web.pgw import Widgets
class PlotWidget(Widgets.Canvas):
"""
This class implements the server-side backend of the surface for a
web-based plot viewer. It uses a web socket to connect to an HTML5
canvas with javascript callbacks in a web browser on the client.
The viewer is created separately on the backend and connects to this
surface via the set_viewer() method.
"""
def __init__(self, plot, width=500, height=500):
super(PlotWidget, self).__init__(width=width, height=height)
self.widget = FigureCanvas(plot.get_figure())
self.logger = plot.logger
self._configured = False
self.refresh_delay = 0.010
self.set_plot(plot)
def set_plot(self, plot):
self.logger.debug("set_plot called")
self.plot = plot
self._dispatch_event_table = {
"activate": self.ignore_event,
"setbounds": self.map_event_cb,
"mousedown": self.ignore_event,
"mouseup": self.ignore_event,
"mousemove": self.ignore_event,
"mouseout": self.ignore_event,
"mouseover": self.ignore_event,
"mousewheel": self.ignore_event,
"wheel": self.ignore_event,
"click": self.ignore_event,
"dblclick": self.ignore_event,
"keydown": self.ignore_event,
"keyup": self.ignore_event,
"keypress": self.ignore_event,
"resize": self.resize_event,
"focus": self.ignore_event,
"focusout": self.ignore_event,
"blur": self.ignore_event,
"drop": self.ignore_event,
"paste": self.ignore_event,
# Hammer.js events
"pinch": self.ignore_event,
"pinchstart": self.ignore_event,
"pinchend": self.ignore_event,
"rotate": self.ignore_event,
"rotatestart": self.ignore_event,
"rotateend": self.ignore_event,
"tap": self.ignore_event,
"pan": self.ignore_event,
"panstart": self.ignore_event,
"panend": self.ignore_event,
"swipe": self.ignore_event,
}
self.plot.add_callback('draw-canvas', self.draw_cb)
self.add_timer('refresh', self.refresh_cb)
def get_plot(self):
return self.plot
def ignore_event(self, event):
pass
def refresh_cb(self):
app = self.get_app()
app.do_operation('refresh_canvas', id=self.id)
self.reset_timer('refresh', self.refresh_delay)
def get_rgb_buffer(self, plot):
buf = BytesIO()
fig = plot.get_figure()
fig.canvas.print_figure(buf, format='png')
wd, ht = self.width, self.height
return (wd, ht, buf.getvalue())
def draw_cb(self, plot):
self.logger.debug("getting RGB buffer")
wd, ht, buf = self.get_rgb_buffer(plot)
#self.logger.debug("clear_rect")
#self.clear_rect(0, 0, wd, ht)
self.logger.debug("drawing %dx%d image" % (wd, ht))
self.draw_image(buf, 0, 0, wd, ht)
self.reset_timer('refresh', self.refresh_delay)
def configure_window(self, wd, ht):
self.logger.debug("canvas resized to %dx%d" % (wd, ht))
fig = self.plot.get_figure()
fig.set_size_inches(float(wd) / fig.dpi, float(ht) / fig.dpi)
def map_event_cb(self, event):
wd, ht = event.width, event.height
self.configure_window(wd, ht)
self.plot.draw()
def resize_event(self, event):
wd, ht = event.x, event.y
self.configure_window(wd, ht)
self.plot.draw()
def _cb_redirect(self, event):
method = self._dispatch_event_table[event.type]
try:
method(event)
except Exception as e:
self.logger.error("error redirecting '%s' event: %s" % (
event.type, str(e)))
# TODO: dump traceback to debug log
#END
| bsd-3-clause |
arifsetiawan/edx-platform | lms/djangoapps/courseware/tests/test_navigation.py | 28 | 11973 | """
This test file will run through some LMS test scenarios regarding access and navigation of the LMS
"""
import time
from mock import patch
from nose.plugins.attrib import attr
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tests.factories import GlobalStaffFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.django import modulestore
@attr('shard_1')
class TestNavigation(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check that navigation state is saved properly.
"""
STUDENT_INFO = [('[email protected]', 'foo'), ('[email protected]', 'foo')]
def setUp(self):
super(TestNavigation, self).setUp()
self.test_course = CourseFactory.create()
self.course = CourseFactory.create()
self.chapter0 = ItemFactory.create(parent=self.course,
display_name='Overview')
self.chapter9 = ItemFactory.create(parent=self.course,
display_name='factory_chapter')
self.section0 = ItemFactory.create(parent=self.chapter0,
display_name='Welcome')
self.section9 = ItemFactory.create(parent=self.chapter9,
display_name='factory_section')
self.unit0 = ItemFactory.create(parent=self.section0,
display_name='New Unit')
self.chapterchrome = ItemFactory.create(parent=self.course,
display_name='Chrome')
self.chromelesssection = ItemFactory.create(parent=self.chapterchrome,
display_name='chromeless',
chrome='none')
self.accordionsection = ItemFactory.create(parent=self.chapterchrome,
display_name='accordion',
chrome='accordion')
self.tabssection = ItemFactory.create(parent=self.chapterchrome,
display_name='tabs',
chrome='tabs')
self.defaultchromesection = ItemFactory.create(
parent=self.chapterchrome,
display_name='defaultchrome',
)
self.fullchromesection = ItemFactory.create(parent=self.chapterchrome,
display_name='fullchrome',
chrome='accordion,tabs')
self.tabtest = ItemFactory.create(parent=self.chapterchrome,
display_name='progress_tab',
default_tab='progress')
# Create student accounts and activate them.
for i in range(len(self.STUDENT_INFO)):
email, password = self.STUDENT_INFO[i]
username = 'u{0}'.format(i)
self.create_account(username, email, password)
self.activate_user(email)
self.staff_user = GlobalStaffFactory()
def assertTabActive(self, tabname, response):
''' Check if the progress tab is active in the tab set '''
for line in response.content.split('\n'):
if tabname in line and 'active' in line:
return
raise AssertionError("assertTabActive failed: {} not active".format(tabname))
def assertTabInactive(self, tabname, response):
''' Check if the progress tab is active in the tab set '''
for line in response.content.split('\n'):
if tabname in line and 'active' in line:
raise AssertionError("assertTabInactive failed: " + tabname + " active")
return
def test_chrome_settings(self):
'''
Test settings for disabling and modifying navigation chrome in the courseware:
- Accordion enabled, or disabled
- Navigation tabs enabled, disabled, or redirected
'''
email, password = self.STUDENT_INFO[0]
self.login(email, password)
self.enroll(self.course, True)
test_data = (
('tabs', False, True),
('none', False, False),
('fullchrome', True, True),
('accordion', True, False),
('fullchrome', True, True)
)
for (displayname, accordion, tabs) in test_data:
response = self.client.get(reverse('courseware_section', kwargs={
'course_id': self.course.id.to_deprecated_string(),
'chapter': 'Chrome',
'section': displayname,
}))
self.assertEquals('open_close_accordion' in response.content, accordion)
self.assertEquals('course-tabs' in response.content, tabs)
self.assertTabInactive('progress', response)
self.assertTabActive('courseware', response)
response = self.client.get(reverse('courseware_section', kwargs={
'course_id': self.course.id.to_deprecated_string(),
'chapter': 'Chrome',
'section': 'progress_tab',
}))
self.assertTabActive('progress', response)
self.assertTabInactive('courseware', response)
@override_settings(SESSION_INACTIVITY_TIMEOUT_IN_SECONDS=1)
def test_inactive_session_timeout(self):
"""
Verify that an inactive session times out and redirects to the
login page
"""
email, password = self.STUDENT_INFO[0]
self.login(email, password)
# make sure we can access courseware immediately
resp = self.client.get(reverse('dashboard'))
self.assertEquals(resp.status_code, 200)
# then wait a bit and see if we get timed out
time.sleep(2)
resp = self.client.get(reverse('dashboard'))
# re-request, and we should get a redirect to login page
self.assertRedirects(resp, settings.LOGIN_REDIRECT_URL + '?next=' + reverse('dashboard'))
def test_redirects_first_time(self):
"""
Verify that the first time we click on the courseware tab we are
redirected to the 'Welcome' section.
"""
email, password = self.STUDENT_INFO[0]
self.login(email, password)
self.enroll(self.course, True)
self.enroll(self.test_course, True)
resp = self.client.get(reverse('courseware',
kwargs={'course_id': self.course.id.to_deprecated_string()}))
self.assertRedirects(resp, reverse(
'courseware_section', kwargs={'course_id': self.course.id.to_deprecated_string(),
'chapter': 'Overview',
'section': 'Welcome'}))
def test_redirects_second_time(self):
"""
Verify the accordion remembers we've already visited the Welcome section
and redirects correpondingly.
"""
email, password = self.STUDENT_INFO[0]
self.login(email, password)
self.enroll(self.course, True)
self.enroll(self.test_course, True)
self.client.get(reverse('courseware_section', kwargs={
'course_id': self.course.id.to_deprecated_string(),
'chapter': 'Overview',
'section': 'Welcome',
}))
resp = self.client.get(reverse('courseware',
kwargs={'course_id': self.course.id.to_deprecated_string()}))
redirect_url = reverse(
'courseware_chapter',
kwargs={
'course_id': self.course.id.to_deprecated_string(),
'chapter': 'Overview'
}
)
self.assertRedirects(resp, redirect_url)
def test_accordion_state(self):
"""
Verify the accordion remembers which chapter you were last viewing.
"""
email, password = self.STUDENT_INFO[0]
self.login(email, password)
self.enroll(self.course, True)
self.enroll(self.test_course, True)
# Now we directly navigate to a section in a chapter other than 'Overview'.
url = reverse(
'courseware_section',
kwargs={
'course_id': self.course.id.to_deprecated_string(),
'chapter': 'factory_chapter',
'section': 'factory_section'
}
)
self.assert_request_status_code(200, url)
# And now hitting the courseware tab should redirect to 'factory_chapter'
url = reverse(
'courseware',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
resp = self.client.get(url)
redirect_url = reverse(
'courseware_chapter',
kwargs={
'course_id': self.course.id.to_deprecated_string(),
'chapter': 'factory_chapter',
}
)
self.assertRedirects(resp, redirect_url)
def test_incomplete_course(self):
email = self.staff_user.email
password = "test"
self.login(email, password)
self.enroll(self.test_course, True)
test_course_id = self.test_course.id.to_deprecated_string()
url = reverse(
'courseware',
kwargs={'course_id': test_course_id}
)
self.assert_request_status_code(200, url)
section = ItemFactory.create(
parent_location=self.test_course.location,
display_name='New Section'
)
url = reverse(
'courseware',
kwargs={'course_id': test_course_id}
)
self.assert_request_status_code(200, url)
subsection = ItemFactory.create(
parent_location=section.location,
display_name='New Subsection'
)
url = reverse(
'courseware',
kwargs={'course_id': test_course_id}
)
self.assert_request_status_code(200, url)
ItemFactory.create(
parent_location=subsection.location,
display_name='New Unit'
)
url = reverse(
'courseware',
kwargs={'course_id': test_course_id}
)
self.assert_request_status_code(302, url)
def test_proctoring_js_includes(self):
"""
Make sure that proctoring JS does not get included on
courseware pages if either the FEATURE flag is turned off
or the course is not proctored enabled
"""
email, password = self.STUDENT_INFO[0]
self.login(email, password)
self.enroll(self.test_course, True)
test_course_id = self.test_course.id.to_deprecated_string()
with patch.dict(settings.FEATURES, {'ENABLE_PROCTORED_EXAMS': False}):
url = reverse(
'courseware',
kwargs={'course_id': test_course_id}
)
resp = self.client.get(url)
self.assertNotContains(resp, '/static/js/lms-proctoring.js')
with patch.dict(settings.FEATURES, {'ENABLE_PROCTORED_EXAMS': True}):
url = reverse(
'courseware',
kwargs={'course_id': test_course_id}
)
resp = self.client.get(url)
self.assertNotContains(resp, '/static/js/lms-proctoring.js')
# now set up a course which is proctored enabled
self.test_course.enable_proctored_exams = True
self.test_course.save()
modulestore().update_item(self.test_course, self.user.id)
resp = self.client.get(url)
self.assertContains(resp, '/static/js/lms-proctoring.js')
| agpl-3.0 |
pangweishen/rt-thread | tools/keil.py | 20 | 13161 | #
# File : keil.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-01-20 Bernard Add copyright information
#
import os
import sys
import string
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import SubElement
from utils import _make_path_relative
from utils import xml_indent
fs_encoding = sys.getfilesystemencoding()
def _get_filetype(fn):
if fn.rfind('.cpp') != -1 or fn.rfind('.cxx') != -1:
return 8
if fn.rfind('.c') != -1 or fn.rfind('.C') != -1:
return 1
# assemble file type
if fn.rfind('.s') != -1 or fn.rfind('.S') != -1:
return 2
# header type
if fn.rfind('.h') != -1:
return 5
if fn.rfind('.lib') != -1:
return 4
# other filetype
return 5
def MDK4AddGroupForFN(ProjectFiles, parent, name, filename, project_path):
group = SubElement(parent, 'Group')
group_name = SubElement(group, 'GroupName')
group_name.text = name
name = os.path.basename(filename)
path = os.path.dirname (filename)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
files = SubElement(group, 'Files')
file = SubElement(files, 'File')
file_name = SubElement(file, 'FileName')
name = os.path.basename(path)
if name.find('.cpp') != -1:
obj_name = name.replace('.cpp', '.o')
elif name.find('.c') != -1:
obj_name = name.replace('.c', '.o')
elif name.find('.s') != -1:
obj_name = name.replace('.s', '.o')
elif name.find('.S') != -1:
obj_name = name.replace('.s', '.o')
else:
obj_name = name
if ProjectFiles.count(obj_name):
name = basename + '_' + name
ProjectFiles.append(obj_name)
file_name.text = name.decode(fs_encoding)
file_type = SubElement(file, 'FileType')
file_type.text = '%d' % _get_filetype(name)
file_path = SubElement(file, 'FilePath')
file_path.text = path.decode(fs_encoding)
def MDK4AddLibToGroup(ProjectFiles, group, name, filename, project_path):
name = os.path.basename(filename)
path = os.path.dirname (filename)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
files = SubElement(group, 'Files')
file = SubElement(files, 'File')
file_name = SubElement(file, 'FileName')
name = os.path.basename(path)
if name.find('.cpp') != -1:
obj_name = name.replace('.cpp', '.o')
elif name.find('.c') != -1:
obj_name = name.replace('.c', '.o')
elif name.find('.s') != -1:
obj_name = name.replace('.s', '.o')
elif name.find('.S') != -1:
obj_name = name.replace('.s', '.o')
else:
obj_name = name
if ProjectFiles.count(obj_name):
name = basename + '_' + name
ProjectFiles.append(obj_name)
file_name.text = name.decode(fs_encoding)
file_type = SubElement(file, 'FileType')
file_type.text = '%d' % _get_filetype(name)
file_path = SubElement(file, 'FilePath')
file_path.text = path.decode(fs_encoding)
def MDK4AddGroup(ProjectFiles, parent, name, files, project_path):
# don't add an empty group
if len(files) == 0:
return
group = SubElement(parent, 'Group')
group_name = SubElement(group, 'GroupName')
group_name.text = name
for f in files:
fn = f.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
files = SubElement(group, 'Files')
file = SubElement(files, 'File')
file_name = SubElement(file, 'FileName')
name = os.path.basename(path)
if name.find('.cpp') != -1:
obj_name = name.replace('.cpp', '.o')
elif name.find('.c') != -1:
obj_name = name.replace('.c', '.o')
elif name.find('.s') != -1:
obj_name = name.replace('.s', '.o')
elif name.find('.S') != -1:
obj_name = name.replace('.s', '.o')
if ProjectFiles.count(obj_name):
name = basename + '_' + name
ProjectFiles.append(obj_name)
file_name.text = name.decode(fs_encoding)
file_type = SubElement(file, 'FileType')
file_type.text = '%d' % _get_filetype(name)
file_path = SubElement(file, 'FilePath')
file_path.text = path.decode(fs_encoding)
return group
# The common part of making MDK4/5 project
def MDK45Project(tree, target, script):
project_path = os.path.dirname(os.path.abspath(target))
root = tree.getroot()
out = file(target, 'wb')
out.write('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n')
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CCFLAGS = ''
ProjectFiles = []
# add group
groups = tree.find('Targets/Target/Groups')
if groups is None:
groups = SubElement(tree.find('Targets/Target'), 'Groups')
groups.clear() # clean old groups
for group in script:
group_tree = MDK4AddGroup(ProjectFiles, groups, group['name'], group['src'], project_path)
# for local CPPPATH/CPPDEFINES
if (group_tree != None) and (group.has_key('LOCAL_CPPPATH') or group.has_key('LOCAL_CCFLAGS')):
GroupOption = SubElement(group_tree, 'GroupOption')
GroupArmAds = SubElement(GroupOption, 'GroupArmAds')
Cads = SubElement(GroupArmAds, 'Cads')
VariousControls = SubElement(Cads, 'VariousControls')
MiscControls = SubElement(VariousControls, 'MiscControls')
if group.has_key('LOCAL_CCFLAGS'):
MiscControls.text = group['LOCAL_CCFLAGS']
else:
MiscControls.text = ' '
Define = SubElement(VariousControls, 'Define')
if group.has_key('LOCAL_CPPDEFINES'):
Define.text = ', '.join(set(group['LOCAL_CPPDEFINES']))
else:
Define.text = ' '
Undefine = SubElement(VariousControls, 'Undefine')
Undefine.text = ' '
IncludePath = SubElement(VariousControls, 'IncludePath')
if group.has_key('LOCAL_CPPPATH'):
IncludePath.text = ';'.join([_make_path_relative(project_path, os.path.normpath(i)) for i in group['LOCAL_CPPPATH']])
else:
IncludePath.text = ' '
# get each include path
if group.has_key('CPPPATH') and group['CPPPATH']:
if CPPPATH:
CPPPATH += group['CPPPATH']
else:
CPPPATH += group['CPPPATH']
# get each group's definitions
if group.has_key('CPPDEFINES') and group['CPPDEFINES']:
if CPPDEFINES:
CPPDEFINES += group['CPPDEFINES']
else:
CPPDEFINES += group['CPPDEFINES']
# get each group's link flags
if group.has_key('LINKFLAGS') and group['LINKFLAGS']:
if LINKFLAGS:
LINKFLAGS += ' ' + group['LINKFLAGS']
else:
LINKFLAGS += group['LINKFLAGS']
if group.has_key('LIBS') and group['LIBS']:
for item in group['LIBS']:
lib_path = ''
for path_item in group['LIBPATH']:
full_path = os.path.join(path_item, item + '.lib')
if os.path.isfile(full_path): # has this library
lib_path = full_path
if lib_path != '':
if (group_tree != None):
MDK4AddLibToGroup(ProjectFiles, group_tree, group['name'], lib_path, project_path)
else:
MDK4AddGroupForFN(ProjectFiles, groups, group['name'], lib_path, project_path)
# write include path, definitions and link flags
IncludePath = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/IncludePath')
IncludePath.text = ';'.join([_make_path_relative(project_path, os.path.normpath(i)) for i in CPPPATH])
Define = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/Define')
Define.text = ', '.join(set(CPPDEFINES))
Misc = tree.find('Targets/Target/TargetOption/TargetArmAds/LDads/Misc')
Misc.text = LINKFLAGS
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close()
def MDK4Project(target, script):
template_tree = etree.parse('template.uvproj')
MDK45Project(template_tree, target, script)
# remove project.uvopt file
project_uvopt = os.path.abspath(target).replace('uvproj', 'uvopt')
if os.path.isfile(project_uvopt):
os.unlink(project_uvopt)
# copy uvopt file
if os.path.exists('template.uvopt'):
import shutil
shutil.copy2('template.uvopt', 'project.uvopt')
def MDK5Project(target, script):
template_tree = etree.parse('template.uvprojx')
MDK45Project(template_tree, target, script)
# remove project.uvopt file
project_uvopt = os.path.abspath(target).replace('uvprojx', 'uvoptx')
if os.path.isfile(project_uvopt):
os.unlink(project_uvopt)
# copy uvopt file
if os.path.exists('template.uvoptx'):
import shutil
shutil.copy2('template.uvoptx', 'project.uvoptx')
def MDKProject(target, script):
template = file('template.Uv2', "rb")
lines = template.readlines()
project = file(target, "wb")
project_path = os.path.dirname(os.path.abspath(target))
line_index = 5
# write group
for group in script:
lines.insert(line_index, 'Group (%s)\r\n' % group['name'])
line_index += 1
lines.insert(line_index, '\r\n')
line_index += 1
# write file
ProjectFiles = []
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CCFLAGS = ''
# number of groups
group_index = 1
for group in script:
# print group['name']
# get each include path
if group.has_key('CPPPATH') and group['CPPPATH']:
if CPPPATH:
CPPPATH += group['CPPPATH']
else:
CPPPATH += group['CPPPATH']
# get each group's definitions
if group.has_key('CPPDEFINES') and group['CPPDEFINES']:
if CPPDEFINES:
CPPDEFINES += ';' + group['CPPDEFINES']
else:
CPPDEFINES += group['CPPDEFINES']
# get each group's link flags
if group.has_key('LINKFLAGS') and group['LINKFLAGS']:
if LINKFLAGS:
LINKFLAGS += ' ' + group['LINKFLAGS']
else:
LINKFLAGS += group['LINKFLAGS']
# generate file items
for node in group['src']:
fn = node.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
if ProjectFiles.count(name):
name = basename + '_' + name
ProjectFiles.append(name)
lines.insert(line_index, 'File %d,%d,<%s><%s>\r\n'
% (group_index, _get_filetype(name), path, name))
line_index += 1
group_index = group_index + 1
lines.insert(line_index, '\r\n')
line_index += 1
# remove repeat path
paths = set()
for path in CPPPATH:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
paths = [i for i in paths]
CPPPATH = string.join(paths, ';')
definitions = [i for i in set(CPPDEFINES)]
CPPDEFINES = string.join(definitions, ', ')
while line_index < len(lines):
if lines[line_index].startswith(' ADSCINCD '):
lines[line_index] = ' ADSCINCD (' + CPPPATH + ')\r\n'
if lines[line_index].startswith(' ADSLDMC ('):
lines[line_index] = ' ADSLDMC (' + LINKFLAGS + ')\r\n'
if lines[line_index].startswith(' ADSCDEFN ('):
lines[line_index] = ' ADSCDEFN (' + CPPDEFINES + ')\r\n'
line_index += 1
# write project
for line in lines:
project.write(line)
project.close()
| gpl-2.0 |
Scarygami/gae-gcs-push2deploy-secrets | lib/werkzeug/testsuite/urls.py | 83 | 14595 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.urls
~~~~~~~~~~~~~~~~~~~~~~~
URL helper tests.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.datastructures import OrderedMultiDict
from werkzeug import urls
from werkzeug._compat import text_type, NativeStringIO, BytesIO
class URLsTestCase(WerkzeugTestCase):
def test_replace(self):
url = urls.url_parse('http://de.wikipedia.org/wiki/Troll')
self.assert_strict_equal(url.replace(query='foo=bar'),
urls.url_parse('http://de.wikipedia.org/wiki/Troll?foo=bar'))
self.assert_strict_equal(url.replace(scheme='https'),
urls.url_parse('https://de.wikipedia.org/wiki/Troll'))
def test_quoting(self):
self.assert_strict_equal(urls.url_quote(u'\xf6\xe4\xfc'), '%C3%B6%C3%A4%C3%BC')
self.assert_strict_equal(urls.url_unquote(urls.url_quote(u'#%="\xf6')), u'#%="\xf6')
self.assert_strict_equal(urls.url_quote_plus('foo bar'), 'foo+bar')
self.assert_strict_equal(urls.url_unquote_plus('foo+bar'), u'foo bar')
self.assert_strict_equal(urls.url_quote_plus('foo+bar'), 'foo%2Bbar')
self.assert_strict_equal(urls.url_unquote_plus('foo%2Bbar'), u'foo+bar')
self.assert_strict_equal(urls.url_encode({b'a': None, b'b': b'foo bar'}), 'b=foo+bar')
self.assert_strict_equal(urls.url_encode({u'a': None, u'b': u'foo bar'}), 'b=foo+bar')
self.assert_strict_equal(urls.url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)'),
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)')
self.assert_strict_equal(urls.url_quote_plus(42), '42')
self.assert_strict_equal(urls.url_quote(b'\xff'), '%FF')
def test_bytes_unquoting(self):
self.assert_strict_equal(urls.url_unquote(urls.url_quote(
u'#%="\xf6', charset='latin1'), charset=None), b'#%="\xf6')
def test_url_decoding(self):
x = urls.url_decode(b'foo=42&bar=23&uni=H%C3%A4nsel')
self.assert_strict_equal(x['foo'], u'42')
self.assert_strict_equal(x['bar'], u'23')
self.assert_strict_equal(x['uni'], u'Hänsel')
x = urls.url_decode(b'foo=42;bar=23;uni=H%C3%A4nsel', separator=b';')
self.assert_strict_equal(x['foo'], u'42')
self.assert_strict_equal(x['bar'], u'23')
self.assert_strict_equal(x['uni'], u'Hänsel')
x = urls.url_decode(b'%C3%9Ch=H%C3%A4nsel', decode_keys=True)
self.assert_strict_equal(x[u'Üh'], u'Hänsel')
def test_url_bytes_decoding(self):
x = urls.url_decode(b'foo=42&bar=23&uni=H%C3%A4nsel', charset=None)
self.assert_strict_equal(x[b'foo'], b'42')
self.assert_strict_equal(x[b'bar'], b'23')
self.assert_strict_equal(x[b'uni'], u'Hänsel'.encode('utf-8'))
def test_streamed_url_decoding(self):
item1 = u'a' * 100000
item2 = u'b' * 400
string = ('a=%s&b=%s&c=%s' % (item1, item2, item2)).encode('ascii')
gen = urls.url_decode_stream(BytesIO(string), limit=len(string),
return_iterator=True)
self.assert_strict_equal(next(gen), ('a', item1))
self.assert_strict_equal(next(gen), ('b', item2))
self.assert_strict_equal(next(gen), ('c', item2))
self.assert_raises(StopIteration, lambda: next(gen))
def test_stream_decoding_string_fails(self):
self.assert_raises(TypeError, urls.url_decode_stream, 'testing')
def test_url_encoding(self):
self.assert_strict_equal(urls.url_encode({'foo': 'bar 45'}), 'foo=bar+45')
d = {'foo': 1, 'bar': 23, 'blah': u'Hänsel'}
self.assert_strict_equal(urls.url_encode(d, sort=True), 'bar=23&blah=H%C3%A4nsel&foo=1')
self.assert_strict_equal(urls.url_encode(d, sort=True, separator=u';'), 'bar=23;blah=H%C3%A4nsel;foo=1')
def test_sorted_url_encode(self):
self.assert_strict_equal(urls.url_encode({u"a": 42, u"b": 23, 1: 1, 2: 2},
sort=True, key=lambda i: text_type(i[0])), '1=1&2=2&a=42&b=23')
self.assert_strict_equal(urls.url_encode({u'A': 1, u'a': 2, u'B': 3, 'b': 4}, sort=True,
key=lambda x: x[0].lower() + x[0]), 'A=1&a=2&B=3&b=4')
def test_streamed_url_encoding(self):
out = NativeStringIO()
urls.url_encode_stream({'foo': 'bar 45'}, out)
self.assert_strict_equal(out.getvalue(), 'foo=bar+45')
d = {'foo': 1, 'bar': 23, 'blah': u'Hänsel'}
out = NativeStringIO()
urls.url_encode_stream(d, out, sort=True)
self.assert_strict_equal(out.getvalue(), 'bar=23&blah=H%C3%A4nsel&foo=1')
out = NativeStringIO()
urls.url_encode_stream(d, out, sort=True, separator=u';')
self.assert_strict_equal(out.getvalue(), 'bar=23;blah=H%C3%A4nsel;foo=1')
gen = urls.url_encode_stream(d, sort=True)
self.assert_strict_equal(next(gen), 'bar=23')
self.assert_strict_equal(next(gen), 'blah=H%C3%A4nsel')
self.assert_strict_equal(next(gen), 'foo=1')
self.assert_raises(StopIteration, lambda: next(gen))
def test_url_fixing(self):
x = urls.url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
self.assert_line_equal(x, 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)')
x = urls.url_fix("http://just.a.test/$-_.+!*'(),")
self.assert_equal(x, "http://just.a.test/$-_.+!*'(),")
def test_url_fixing_qs(self):
x = urls.url_fix(b'http://example.com/?foo=%2f%2f')
self.assert_line_equal(x, 'http://example.com/?foo=%2f%2f')
x = urls.url_fix('http://acronyms.thefreedictionary.com/Algebraic+Methods+of+Solving+the+Schr%C3%B6dinger+Equation')
self.assert_equal(x, 'http://acronyms.thefreedictionary.com/Algebraic+Methods+of+Solving+the+Schr%C3%B6dinger+Equation')
def test_iri_support(self):
self.assert_strict_equal(urls.uri_to_iri('http://xn--n3h.net/'),
u'http://\u2603.net/')
self.assert_strict_equal(
urls.uri_to_iri(b'http://%C3%BCser:p%C3%[email protected]/p%C3%A5th'),
u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th')
self.assert_strict_equal(urls.iri_to_uri(u'http://☃.net/'), 'http://xn--n3h.net/')
self.assert_strict_equal(
urls.iri_to_uri(u'http://üser:pässword@☃.net/påth'),
'http://%C3%BCser:p%C3%[email protected]/p%C3%A5th')
self.assert_strict_equal(urls.uri_to_iri('http://test.com/%3Fmeh?foo=%26%2F'),
u'http://test.com/%3Fmeh?foo=%26%2F')
# this should work as well, might break on 2.4 because of a broken
# idna codec
self.assert_strict_equal(urls.uri_to_iri(b'/foo'), u'/foo')
self.assert_strict_equal(urls.iri_to_uri(u'/foo'), '/foo')
self.assert_strict_equal(urls.iri_to_uri(u'http://föö.com:8080/bam/baz'),
'http://xn--f-1gaa.com:8080/bam/baz')
def test_iri_safe_quoting(self):
uri = b'http://xn--f-1gaa.com/%2F%25?q=%C3%B6&x=%3D%25#%25'
iri = u'http://föö.com/%2F%25?q=ö&x=%3D%25#%25'
self.assert_strict_equal(urls.uri_to_iri(uri), iri)
self.assert_strict_equal(urls.iri_to_uri(urls.uri_to_iri(uri)), uri)
def test_ordered_multidict_encoding(self):
d = OrderedMultiDict()
d.add('foo', 1)
d.add('foo', 2)
d.add('foo', 3)
d.add('bar', 0)
d.add('foo', 4)
self.assert_equal(urls.url_encode(d), 'foo=1&foo=2&foo=3&bar=0&foo=4')
def test_href(self):
x = urls.Href('http://www.example.com/')
self.assert_strict_equal(x(u'foo'), 'http://www.example.com/foo')
self.assert_strict_equal(x.foo(u'bar'), 'http://www.example.com/foo/bar')
self.assert_strict_equal(x.foo(u'bar', x=42), 'http://www.example.com/foo/bar?x=42')
self.assert_strict_equal(x.foo(u'bar', class_=42), 'http://www.example.com/foo/bar?class=42')
self.assert_strict_equal(x.foo(u'bar', {u'class': 42}), 'http://www.example.com/foo/bar?class=42')
self.assert_raises(AttributeError, lambda: x.__blah__)
x = urls.Href('blah')
self.assert_strict_equal(x.foo(u'bar'), 'blah/foo/bar')
self.assert_raises(TypeError, x.foo, {u"foo": 23}, x=42)
x = urls.Href('')
self.assert_strict_equal(x('foo'), 'foo')
def test_href_url_join(self):
x = urls.Href(u'test')
self.assert_line_equal(x(u'foo:bar'), u'test/foo:bar')
self.assert_line_equal(x(u'http://example.com/'), u'test/http://example.com/')
self.assert_line_equal(x.a(), u'test/a')
def test_href_past_root(self):
base_href = urls.Href('http://www.blagga.com/1/2/3')
self.assert_strict_equal(base_href('../foo'), 'http://www.blagga.com/1/2/foo')
self.assert_strict_equal(base_href('../../foo'), 'http://www.blagga.com/1/foo')
self.assert_strict_equal(base_href('../../../foo'), 'http://www.blagga.com/foo')
self.assert_strict_equal(base_href('../../../../foo'), 'http://www.blagga.com/foo')
self.assert_strict_equal(base_href('../../../../../foo'), 'http://www.blagga.com/foo')
self.assert_strict_equal(base_href('../../../../../../foo'), 'http://www.blagga.com/foo')
def test_url_unquote_plus_unicode(self):
# was broken in 0.6
self.assert_strict_equal(urls.url_unquote_plus(u'\x6d'), u'\x6d')
self.assert_is(type(urls.url_unquote_plus(u'\x6d')), text_type)
def test_quoting_of_local_urls(self):
rv = urls.iri_to_uri(u'/foo\x8f')
self.assert_strict_equal(rv, '/foo%C2%8F')
self.assert_is(type(rv), str)
def test_url_attributes(self):
rv = urls.url_parse('http://foo%3a:bar%3a@[::1]:80/123?x=y#frag')
self.assert_strict_equal(rv.scheme, 'http')
self.assert_strict_equal(rv.auth, 'foo%3a:bar%3a')
self.assert_strict_equal(rv.username, u'foo:')
self.assert_strict_equal(rv.password, u'bar:')
self.assert_strict_equal(rv.raw_username, 'foo%3a')
self.assert_strict_equal(rv.raw_password, 'bar%3a')
self.assert_strict_equal(rv.host, '::1')
self.assert_equal(rv.port, 80)
self.assert_strict_equal(rv.path, '/123')
self.assert_strict_equal(rv.query, 'x=y')
self.assert_strict_equal(rv.fragment, 'frag')
rv = urls.url_parse(u'http://\N{SNOWMAN}.com/')
self.assert_strict_equal(rv.host, u'\N{SNOWMAN}.com')
self.assert_strict_equal(rv.ascii_host, 'xn--n3h.com')
def test_url_attributes_bytes(self):
rv = urls.url_parse(b'http://foo%3a:bar%3a@[::1]:80/123?x=y#frag')
self.assert_strict_equal(rv.scheme, b'http')
self.assert_strict_equal(rv.auth, b'foo%3a:bar%3a')
self.assert_strict_equal(rv.username, u'foo:')
self.assert_strict_equal(rv.password, u'bar:')
self.assert_strict_equal(rv.raw_username, b'foo%3a')
self.assert_strict_equal(rv.raw_password, b'bar%3a')
self.assert_strict_equal(rv.host, b'::1')
self.assert_equal(rv.port, 80)
self.assert_strict_equal(rv.path, b'/123')
self.assert_strict_equal(rv.query, b'x=y')
self.assert_strict_equal(rv.fragment, b'frag')
def test_url_joining(self):
self.assert_strict_equal(urls.url_join('/foo', '/bar'), '/bar')
self.assert_strict_equal(urls.url_join('http://example.com/foo', '/bar'),
'http://example.com/bar')
self.assert_strict_equal(urls.url_join('file:///tmp/', 'test.html'),
'file:///tmp/test.html')
self.assert_strict_equal(urls.url_join('file:///tmp/x', 'test.html'),
'file:///tmp/test.html')
self.assert_strict_equal(urls.url_join('file:///tmp/x', '../../../x.html'),
'file:///x.html')
def test_partial_unencoded_decode(self):
ref = u'foo=정상처리'.encode('euc-kr')
x = urls.url_decode(ref, charset='euc-kr')
self.assert_strict_equal(x['foo'], u'정상처리')
def test_iri_to_uri_idempotence_ascii_only(self):
uri = u'http://www.idempoten.ce'
uri = urls.iri_to_uri(uri)
self.assert_equal(urls.iri_to_uri(uri), uri)
def test_iri_to_uri_idempotence_non_ascii(self):
uri = u'http://\N{SNOWMAN}/\N{SNOWMAN}'
uri = urls.iri_to_uri(uri)
self.assert_equal(urls.iri_to_uri(uri), uri)
def test_uri_to_iri_idempotence_ascii_only(self):
uri = 'http://www.idempoten.ce'
uri = urls.uri_to_iri(uri)
self.assert_equal(urls.uri_to_iri(uri), uri)
def test_uri_to_iri_idempotence_non_ascii(self):
uri = 'http://xn--n3h/%E2%98%83'
uri = urls.uri_to_iri(uri)
self.assert_equal(urls.uri_to_iri(uri), uri)
def test_iri_to_uri_to_iri(self):
iri = u'http://föö.com/'
uri = urls.iri_to_uri(iri)
self.assert_equal(urls.uri_to_iri(uri), iri)
def test_uri_to_iri_to_uri(self):
uri = 'http://xn--f-rgao.com/%C3%9E'
iri = urls.uri_to_iri(uri)
self.assert_equal(urls.iri_to_uri(iri), uri)
def test_uri_iri_normalization(self):
uri = 'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93'
iri = u'http://föñ.com/\N{BALLOT BOX}/fred?utf8=\u2713'
tests = [
u'http://föñ.com/\N{BALLOT BOX}/fred?utf8=\u2713',
u'http://xn--f-rgao.com/\u2610/fred?utf8=\N{CHECK MARK}',
b'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93',
u'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93',
u'http://föñ.com/\u2610/fred?utf8=%E2%9C%93',
b'http://xn--f-rgao.com/\xe2\x98\x90/fred?utf8=\xe2\x9c\x93',
]
for test in tests:
self.assert_equal(urls.uri_to_iri(test), iri)
self.assert_equal(urls.iri_to_uri(test), uri)
self.assert_equal(urls.uri_to_iri(urls.iri_to_uri(test)), iri)
self.assert_equal(urls.iri_to_uri(urls.uri_to_iri(test)), uri)
self.assert_equal(urls.uri_to_iri(urls.uri_to_iri(test)), iri)
self.assert_equal(urls.iri_to_uri(urls.iri_to_uri(test)), uri)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(URLsTestCase))
return suite
| apache-2.0 |
bohdan-shramko/learning-python | source/sublime-packages/Packages/mdpopups/st3/mdpopups/mdx/superfences.py | 2 | 21280 | """
Superfences.
pymdownx.superfences
Neseted Fenced Code Blocks
This is a modification of the original Fenced Code Extension.
Algorithm has been rewritten to allow for fenced blocks in blockquotes,
lists, etc. And also , allow for special UML fences like 'flow' for flowcharts
and `sequence` for sequence diagrams.
Modified: 2014 - 2017 Isaac Muse <[email protected]>
---
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
See <https://pythonhosted.org/Markdown/extensions/fenced_code_blocks.html>
for documentation.
Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from markdown.blockprocessors import CodeBlockProcessor
from markdown import util as md_util
from . import highlight as hl
from .util import PymdownxDeprecationWarning
import warnings
import re
NESTED_FENCE_START = r'''(?x)
(?:^(?P<ws>[\> ]*)(?P<fence>~{3,}|`{3,}))[ ]* # Fence opening
(\{? # Language opening
\.?(?P<lang>[\w#.+-]*))?[ ]* # Language
(?:
(hl_lines=(?P<quot>"|')(?P<hl_lines>\d+(?:[ ]+\d+)*)(?P=quot))?[ ]*| # highlight lines
(linenums=(?P<quot2>"|') # Line numbers
(?P<linestart>[\d]+) # Line number start
(?:[ ]+(?P<linestep>[\d]+))? # Line step
(?:[ ]+(?P<linespecial>[\d]+))? # Line special
(?P=quot2))?[ ]*
){,2}
}?[ ]*$ # Language closing
'''
NESTED_FENCE_END = r'^[\> ]*%s[ ]*$'
WS = r'^([\> ]{0,%d})(.*)'
RE_FENCE = re.compile(
r'''(?xsm)
(?P<fence>^(?:~{3,}|`{3,}))[ ]* # Opening
(\{?\.?(?P<lang>[\w#.+-]*))?[ ]* # Optional {, and lang
(?:
(hl_lines=(?P<quot>"|')(?P<hl_lines>\d+(?:[ ]+\d+)*)(?P=quot))?[ ]*| # Optional highlight lines option
(linenums=(?P<quot2>"|') # Line numbers
(?P<linestart>[\d]+) # Line number start
(?:[ ]+(?P<linestep>[\d]+))? # Line step
(?:[ ]+(?P<linespecial>[\d]+))? # Line special
(?P=quot2))?[ ]*
){,2}
}?[ ]*\n # Optional closing }
(?P<code>.*?)(?<=\n) # Code
(?P=fence)[ ]*$ # Closing
'''
)
def _escape(txt):
"""Basic html escaping."""
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
class CodeStash(object):
"""
Stash code for later retrieval.
Store original fenced code here in case we were
too greedy and need to restore in an indented code
block.
"""
def __init__(self):
"""Initialize."""
self.stash = {}
def __len__(self): # pragma: no cover
"""Length of stash."""
return len(self.stash)
def get(self, key, default=None):
"""Get the code from the key."""
code = self.stash.get(key, default)
return code
def remove(self, key):
"""Remove the stashed code."""
del self.stash[key]
def store(self, key, code, indent_level):
"""Store the code in the stash."""
self.stash[key] = (code, indent_level)
def clear_stash(self):
"""Clear the stash."""
self.stash = {}
def fence_code_format(source, language, css_class):
"""Format source as code blocks."""
return '<pre class="%s"><code>%s</code></pre>' % (css_class, _escape(source))
def fence_div_format(source, language, css_class):
"""Format source as div."""
return '<div class="%s">%s</div>' % (css_class, _escape(source))
class SuperFencesCodeExtension(Extension):
"""Superfences code block extension."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.superfences = []
self.config = {
'disable_indented_code_blocks': [False, "Disable indented code blocks - Default: False"],
'uml_flow': [True, "Enable flowcharts - Default: True"],
'uml_sequence': [True, "Enable sequence diagrams - Default: True"],
'custom_fences': [
[
{'name': 'flow', 'class': 'uml-flowchart'},
{'name': 'sequence', 'class': 'uml-sequence-diagram'}
],
'Specify custom fences. Default: See documentation.'
],
'highlight_code': [True, "Highlight code - Default: True"],
'use_codehilite_settings': [
None,
"Deprecatd and does nothing. "
"- Default: None"
],
'css_class': [
'',
"Set class name for wrapper element. The default of CodeHilite or Highlight will be used"
"if nothing is set. - "
"Default: ''"
]
}
super(SuperFencesCodeExtension, self).__init__(*args, **kwargs)
def extend_super_fences(self, name, formatter):
"""Extend superfences with the given name, language, and formatter."""
self.superfences.append(
{
"name": name,
"test": lambda l, language=name: language == l,
"formatter": formatter
}
)
def extendMarkdown(self, md, md_globals):
"""Add FencedBlockPreprocessor to the Markdown instance."""
# Not super yet, so let's make it super
md.registerExtension(self)
config = self.getConfigs()
# Default fenced blocks
self.superfences.insert(
0,
{
"name": "superfences",
"test": lambda language: True,
"formatter": None
}
)
if config.get('use_codehilite_settings'): # pragma: no coverage
warnings.warn(
"'use_codehilite_settings' is deprecated and does nothing.\n"
"\nCodeHilite settings will only be used if CodeHilite is configured\n"
" and 'pymdownx.highlight' is not configured.\n"
"Please discontinue use of this setting as it will be removed in the future.",
PymdownxDeprecationWarning
)
# UML blocks
custom_fences = config.get('custom_fences', [])
for custom in custom_fences:
name = custom.get('name')
class_name = custom.get('class')
fence_format = custom.get('format', fence_code_format)
if name is not None and class_name is not None:
self.extend_super_fences(
name,
lambda s, l, c=class_name, f=fence_format: f(s, l, c)
)
self.markdown = md
self.patch_fenced_rule()
for entry in self.superfences:
entry["stash"] = CodeStash()
def patch_fenced_rule(self):
"""
Patch Python Markdown with our own fenced block extension.
We don't attempt to protect against a user loading the `fenced_code` extension with this.
Most likely they will have issues, but they shouldn't have loaded them together in the first place :).
"""
config = self.getConfigs()
fenced = SuperFencesBlockPreprocessor(self.markdown)
indented_code = SuperFencesCodeBlockProcessor(self)
fenced.config = config
fenced.extension = self
indented_code.config = config
indented_code.markdown = self.markdown
indented_code.extension = self
self.superfences[0]["formatter"] = fenced.highlight
self.markdown.parser.blockprocessors['code'] = indented_code
self.markdown.preprocessors.add('fenced_code_block', fenced, ">normalize_whitespace")
def reset(self):
"""Clear the stash."""
for entry in self.superfences:
entry["stash"].clear_stash()
class SuperFencesBlockPreprocessor(Preprocessor):
"""
Preprocessor to find fenced code blocks.
Because this is done as a preprocessor, it might be too greedy.
We will stash the blocks code and restore if we mistakenly processed
text from an indented code block.
"""
fence_start = re.compile(NESTED_FENCE_START)
CODE_WRAP = '<pre%s><code%s>%s</code></pre>'
def __init__(self, md):
"""Initialize."""
super(SuperFencesBlockPreprocessor, self).__init__(md)
self.markdown = md
self.checked_hl_settings = False
self.codehilite_conf = {}
def rebuild_block(self, lines):
"""Deindent the fenced block lines."""
return '\n'.join([line[self.ws_len:] for line in lines])
def get_hl_settings(self):
"""Check for code hilite extension to get its config."""
if not self.checked_hl_settings:
self.checked_hl_settings = True
self.highlight_code = self.config['highlight_code']
config = hl.get_hl_settings(self.markdown)
css_class = self.config['css_class']
self.css_class = css_class if css_class else config['css_class']
self.extend_pygments_lang = config.get('extend_pygments_lang', None)
self.guess_lang = config['guess_lang']
self.pygments_style = config['pygments_style']
self.use_pygments = config['use_pygments']
self.noclasses = config['noclasses']
self.linenums = config['linenums']
def clear(self):
"""Reset the class variables."""
self.ws = None
self.ws_len = 0
self.fence = None
self.lang = None
self.hl_lines = None
self.linestart = None
self.linestep = None
self.linespecial = None
self.quote_level = 0
self.code = []
self.empty_lines = 0
self.whitespace = None
self.fence_end = None
def eval(self, m, start, end):
"""Evaluate a normal fence."""
if m.group(0).strip() == '':
# Empty line is okay
self.empty_lines += 1
self.code.append(m.group(0))
elif len(m.group(1)) != self.ws_len and m.group(2) != '':
# Not indented enough
self.clear()
elif self.fence_end.match(m.group(0)) is not None and not m.group(2).startswith(' '):
# End of fence
self.process_nested_block(m, start, end)
else:
# Content line
self.empty_lines = 0
self.code.append(m.group(0))
def eval_quoted(self, m, quote_level, start, end):
"""Evaluate fence inside a blockquote."""
if quote_level > self.quote_level:
# Quote level exceeds the starting quote level
self.clear()
elif quote_level <= self.quote_level:
if m.group(2) == '':
# Empty line is okay
self.code.append(m.group(0))
self.empty_lines += 1
elif len(m.group(1)) < self.ws_len:
# Not indented enough
self.clear()
elif self.empty_lines and quote_level < self.quote_level:
# Quote levels don't match and we are signified
# the end of the block with an empty line
self.clear()
elif self.fence_end.match(m.group(0)) is not None:
# End of fence
self.process_nested_block(m, start, end)
else:
# Content line
self.empty_lines = 0
self.code.append(m.group(0))
def process_nested_block(self, m, start, end):
"""Process the contents of the nested block."""
self.last = m.group(0)
code = None
for entry in reversed(self.extension.superfences):
if entry["test"](self.lang):
code = entry["formatter"](self.rebuild_block(self.code), self.lang)
break
if code is not None:
self._store('\n'.join(self.code) + '\n', code, start, end, entry)
self.clear()
def parse_hl_lines(self, hl_lines):
"""Parse the lines to highlight."""
return list(map(int, hl_lines.strip().split())) if hl_lines else []
def parse_line_start(self, linestart):
"""Parse line start."""
return int(linestart) if linestart else -1
def parse_line_step(self, linestep):
"""Parse line start."""
step = int(linestep) if linestep else -1
return step if step > 1 else -1
def parse_line_special(self, linespecial):
"""Parse line start."""
return int(linespecial) if linespecial else -1
def search_nested(self, lines):
"""Search for nested fenced blocks."""
count = 0
for line in lines:
if self.fence is None:
# Found the start of a fenced block.
m = self.fence_start.match(line)
if m is not None:
start = count
self.first = m.group(0)
self.ws = m.group('ws') if m.group('ws') else ''
self.ws_len = len(self.ws)
self.quote_level = self.ws.count(">")
self.empty_lines = 0
self.fence = m.group('fence')
self.lang = m.group('lang')
self.hl_lines = m.group('hl_lines')
self.linestart = m.group('linestart')
self.linestep = m.group('linestep')
self.linespecial = m.group('linespecial')
self.fence_end = re.compile(NESTED_FENCE_END % self.fence)
self.whitespace = re.compile(WS % self.ws_len)
else:
# Evaluate lines
# - Determine if it is the ending line or content line
# - If is a content line, make sure it is all indentend
# with the opening and closing lines (lines with just
# whitespace will be stripped so those don't matter).
# - When content lines are inside blockquotes, make sure
# the nested block quote levels make sense according to
# blockquote rules.
m = self.whitespace.match(line)
if m:
end = count + 1
quote_level = m.group(1).count(">")
if self.quote_level:
# Handle blockquotes
self.eval_quoted(m, quote_level, start, end)
elif quote_level == 0:
# Handle all other cases
self.eval(m, start, end)
else:
# Looks like we got a blockquote line
# when not in a blockquote.
self.clear()
else: # pragma: no cover
# I am 99.9999% sure we will never hit this line.
# But I am too chicken to pull it out :).
self.clear()
count += 1
# Now that we are done iterating the lines,
# let's replace the original content with the
# fenced blocks.
while len(self.stack):
fenced, start, end = self.stack.pop()
lines = lines[:start] + [fenced] + lines[end:]
return lines
def highlight(self, src, language):
"""
Syntax highlight the code block.
If config is not empty, then the codehlite extension
is enabled, so we call into it to highlight the code.
"""
if self.highlight_code:
linestep = self.parse_line_step(self.linestep)
linestart = self.parse_line_start(self.linestart)
linespecial = self.parse_line_special(self.linespecial)
hl_lines = self.parse_hl_lines(self.hl_lines)
el = hl.Highlight(
guess_lang=self.guess_lang,
pygments_style=self.pygments_style,
use_pygments=self.use_pygments,
noclasses=self.noclasses,
linenums=self.linenums,
extend_pygments_lang=self.extend_pygments_lang
).highlight(
src,
language,
self.css_class,
hl_lines=hl_lines,
linestart=linestart,
linestep=linestep,
linespecial=linespecial
)
else:
# Format as a code block.
el = self.CODE_WRAP % ('', '', _escape(src))
return el
def _store(self, source, code, start, end, obj):
"""
Store the fenced blocks in the stack to be replaced when done iterating.
Store the original text in case we need to restore if we are too greedy.
"""
# Save the fenced blocks to add once we are done iterating the lines
placeholder = self.markdown.htmlStash.store(code, safe=True)
self.stack.append(('%s%s' % (self.ws, placeholder), start, end))
if not self.disabled_indented:
# If an indented block consumes this placeholder,
# we can restore the original source
obj["stash"].store(
placeholder[1:-1],
"%s\n%s%s" % (self.first, source, self.last),
self.ws_len
)
def run(self, lines):
"""Search for fenced blocks."""
self.get_hl_settings()
self.clear()
self.stack = []
self.disabled_indented = self.config.get("disable_indented_code_blocks", False)
lines = self.search_nested(lines)
return lines
class SuperFencesCodeBlockProcessor(CodeBlockProcessor):
"""Process idented code blocks to see if we accidentaly processed its content as a fenced block."""
FENCED_BLOCK_RE = re.compile(
r'^([\> ]*)%s(%s)%s$' % (
md_util.HTML_PLACEHOLDER[0],
md_util.HTML_PLACEHOLDER[1:-1] % r'([0-9]+)',
md_util.HTML_PLACEHOLDER[-1]
)
)
def test(self, parent, block):
"""Test method that is one day to be deprecated."""
return True
def reindent(self, text, pos, level):
"""Reindent the code to where it is supposed to be."""
indented = []
for line in text.split('\n'):
index = pos - level
indented.append(line[index:])
return '\n'.join(indented)
def revert_greedy_fences(self, block):
"""Revert a prematurely converted fenced block."""
new_block = []
for line in block.split('\n'):
m = self.FENCED_BLOCK_RE.match(line)
if m:
key = m.group(2)
indent_level = len(m.group(1))
original = None
for entry in self.extension.superfences:
stash = entry["stash"]
original, pos = stash.get(key)
if original is not None:
code = self.reindent(original, pos, indent_level)
new_block.append(code)
stash.remove(key)
break
if original is None: # pragma: no cover
# Too much work to test this. This is just a fall back in case
# we find a placeholder, and we went to revert it and it wasn't in our stash.
# Most likely this would be caused by someone else. We just want to put it
# back in the block if we can't revert it. Maybe we can do a more directed
# unit test in the future.
new_block.append(line)
else:
new_block.append(line)
return '\n'.join(new_block)
def run(self, parent, blocks):
"""Look for and parse code block."""
handled = False
if not self.config.get("disable_indented_code_blocks", False):
handled = CodeBlockProcessor.test(self, parent, blocks[0])
if handled:
if self.config.get("nested", True):
blocks[0] = self.revert_greedy_fences(blocks[0])
handled = CodeBlockProcessor.run(self, parent, blocks) is not False
return handled
def makeExtension(*args, **kwargs):
"""Return extension."""
return SuperFencesCodeExtension(*args, **kwargs)
| mit |
dneiter/exabgp | lib/exabgp/reactor/api/command/text.py | 2 | 15907 | # encoding: utf-8
"""
command.py
Created by Thomas Mangin on 2015-12-15.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from exabgp.version import version as _version
class Text (object):
callback = {}
def __new__ (cls,name):
def register (function):
cls.callback[name] = function
return function
return register
@Text('shutdown')
def shutdown (self, reactor, service, command):
reactor.answer(service,'shutdown in progress')
return reactor.api.shutdown()
@Text('reload')
def reload (self, reactor, service, command):
reactor.answer(service,'reload in progress')
return reactor.api.reload()
@Text('restart')
def restart (self, reactor, service, command):
reactor.answer(service,'restart in progress')
return reactor.api.restart()
@Text('version')
def version (self, reactor, service, command):
reactor.answer(service,'exabgp %s\n' % _version)
return True
@Text('teardown')
def teardown (self, reactor, service, command):
try:
descriptions,command = self.parser.extract_neighbors(command)
_,code = command.split(' ',1)
for key in reactor.peers:
for description in descriptions:
if reactor.match_neighbor(description,key):
reactor.peers[key].teardown(int(code))
self.logger.reactor('teardown scheduled for %s' % ' '.join(description))
return True
except ValueError:
return False
except IndexError:
return False
@Text('show neighbor')
def show_neighbor (self, reactor, service, command):
def callback ():
for key in reactor.configuration.neighbor.neighbors.keys():
neighbor = reactor.configuration.neighbor.neighbors[key]
for line in str(neighbor).split('\n'):
reactor.answer(service,line)
yield True
reactor.plan(callback(),'show_neighbor')
return True
@Text('show neighbors')
def show_neighbors (self, reactor, service, command):
def callback ():
for key in reactor.configuration.neighbor.neighbors.keys():
neighbor = reactor.configuration.neighbor.neighbors[key]
for line in str(neighbor).split('\n'):
reactor.answer(service,line)
yield True
reactor.plan(callback(),'show_neighbors')
return True
@Text('show routes')
def show_routes (self, reactor, service, command):
def callback ():
last = command.split()[-1]
if last == 'routes':
neighbors = reactor.configuration.neighbor.neighbors.keys()
else:
neighbors = [n for n in reactor.configuration.neighbor.neighbors.keys() if 'neighbor %s' % last in n]
for key in neighbors:
neighbor = reactor.configuration.neighbor.neighbors[key]
for change in list(neighbor.rib.outgoing.sent_changes()):
reactor.answer(service,'neighbor %s %s' % (neighbor.local_address,str(change.nlri)))
yield True
reactor.plan(callback(),'show_routes')
return True
@Text('show routes extensive')
def show_routes_extensive (self, reactor, service, command):
def callback ():
last = command.split()[-1]
if last == 'extensive':
neighbors = reactor.configuration.neighbor.neighbors.keys()
else:
neighbors = [n for n in reactor.configuration.neighbor.neighbors.keys() if 'neighbor %s' % last in n]
for key in neighbors:
neighbor = reactor.configuration.neighbor.neighbors[key]
for change in list(neighbor.rib.outgoing.sent_changes()):
reactor.answer(service,'neighbor %s %s' % (neighbor.name(),change.extensive()))
yield True
reactor.plan(callback(),'show_routes_extensive')
return True
@Text('announce watchdog')
def announce_watchdog (self, reactor, service, command):
def callback (name):
# XXX: move into Action
for neighbor in reactor.configuration.neighbor.neighbors:
reactor.configuration.neighbor.neighbors[neighbor].rib.outgoing.announce_watchdog(name)
yield False
reactor.route_update = True
try:
name = command.split(' ')[2]
except IndexError:
name = service
reactor.plan(callback(name),'announce_watchdog')
return True
@Text('withdraw watchdog')
def withdraw_watchdog (self, reactor, service, command):
def callback (name):
# XXX: move into Action
for neighbor in reactor.configuration.neighbor.neighbors:
reactor.configuration.neighbor.neighbors[neighbor].rib.outgoing.withdraw_watchdog(name)
yield False
reactor.route_update = True
try:
name = command.split(' ')[2]
except IndexError:
name = service
reactor.plan(callback(name),'withdraw_watchdog')
return True
@Text('flush route')
def flush_route (self, reactor, service, command):
def callback (self, peers):
self.logger.reactor("Flushing routes for %s" % ', '.join(peers if peers else []) if peers is not None else 'all peers')
yield True
reactor.route_update = True
try:
descriptions,command = self.parser.extract_neighbors(command)
peers = reactor.match_neighbors(descriptions)
if not peers:
self.logger.reactor('no neighbor matching the command : %s' % command,'warning')
return False
reactor.plan(callback(self,peers),'flush_route')
return True
except ValueError:
return False
except IndexError:
return False
@Text('announce route')
def announce_route (self, reactor, service, command):
def callback (self, command, nexthops):
changes = self.parser.api_route(command,nexthops,'announce')
if not changes:
self.logger.reactor("Command could not parse route in : %s" % command,'warning')
yield True
else:
peers = []
for (peer,change) in changes:
peers.append(peer)
reactor.api.change_to_peers(change,[peer,])
self.logger.reactor("Route added to %s : %s" % (', '.join(peers if peers else []) if peers is not None else 'all peers',change.extensive()))
yield False
reactor.route_update = True
try:
descriptions,command = self.parser.extract_neighbors(command)
peers = reactor.match_neighbors(descriptions)
if not peers:
self.logger.reactor('no neighbor matching the command : %s' % command,'warning')
return False
reactor.plan(callback(self,command,reactor.nexthops(peers)),'announce_route')
return True
except ValueError:
return False
except IndexError:
return False
@Text('withdraw route')
def withdraw_route (self, reactor, service, command):
def callback (self, command, nexthops):
changes = self.parser.api_route(command,nexthops,'withdraw')
if not changes:
self.logger.reactor("Command could not parse route in : %s" % command,'warning')
yield True
else:
for (peer,change) in changes:
if reactor.api.change_to_peers(change,[peer,]):
self.logger.reactor("Route removed : %s" % change.extensive())
yield False
else:
self.logger.reactor("Could not find therefore remove route : %s" % change.extensive(),'warning')
yield False
reactor.route_update = True
try:
descriptions,command = self.parser.extract_neighbors(command)
peers = reactor.match_neighbors(descriptions)
if not peers:
self.logger.reactor('no neighbor matching the command : %s' % command,'warning')
return False
reactor.plan(callback(self,command,reactor.nexthops(peers)),'withdraw_route')
return True
except ValueError:
return False
except IndexError:
return False
@Text('announce vpls')
def announce_vpls (self, reactor, service, command):
def callback (self, command, nexthops):
changes = self.parser.api_vpls(command,nexthops,'announce')
if not changes:
self.logger.reactor("Command could not parse vpls in : %s" % command,'warning')
yield True
else:
peers = []
for (peer,change) in changes:
peers.append(peer)
reactor.api.change_to_peers(change,[peer,])
self.logger.reactor("vpls added to %s : %s" % (', '.join(peers if peers else []) if peers is not None else 'all peers',change.extensive()))
yield False
reactor.route_update = True
try:
descriptions,command = self.parser.extract_neighbors(command)
peers = reactor.match_neighbors(descriptions)
if not peers:
self.logger.reactor('no neighbor matching the command : %s' % command,'warning')
return False
reactor.plan(callback(self,command,reactor.nexthops(peers)),'announce_vpls')
return True
except ValueError:
return False
except IndexError:
return False
@Text('withdraw vpls')
def withdraw_vpls (self, reactor, service, command):
def callback (self, command, nexthops):
changes = self.parser.api_vpls(command,nexthops,'withdraw')
if not changes:
self.logger.reactor("Command could not parse vpls in : %s" % command,'warning')
yield True
else:
for (peer,change) in changes:
if reactor.api.change_to_peers(change,[peer,]):
self.logger.reactor("vpls removed : %s" % change.extensive())
yield False
else:
self.logger.reactor("Could not find therefore remove vpls : %s" % change.extensive(),'warning')
yield False
reactor.route_update = True
try:
descriptions,command = self.parser.extract_neighbors(command)
peers = reactor.match_neighbors(descriptions)
if not peers:
self.logger.reactor('no neighbor matching the command : %s' % command,'warning')
return False
reactor.plan(callback(self,command,reactor.nexthops(peers)),'withdraw_vpls')
return True
except ValueError:
return False
except IndexError:
return False
@Text('announce attribute')
def announce_attribute (self, reactor, service, command):
def callback (self, command, nexthops):
changes = self.parser.api_attribute(command,nexthops,'announce')
if not changes:
self.logger.reactor("Command could not parse attribute in : %s" % command,'warning')
yield True
else:
for (peers,change) in changes:
reactor.api.change_to_peers(change,peers)
self.logger.reactor("Route added to %s : %s" % (', '.join(peers if peers else []) if peers is not None else 'all peers',change.extensive()))
yield False
reactor.route_update = True
try:
descriptions,command = self.parser.extract_neighbors(command)
peers = reactor.match_neighbors(descriptions)
if not peers:
self.logger.reactor('no neighbor matching the command : %s' % command,'warning')
return False
reactor.plan(callback(self,command,reactor.nexthops(peers)),'announce_attribute')
return True
except ValueError:
return False
except IndexError:
return False
@Text('withdraw attribute')
def withdraw_attribute (self, reactor, service, command):
def callback (self, command, nexthops):
changes = self.parser.api_attribute(command,nexthops,'withdraw')
if not changes:
self.logger.reactor("Command could not parse attribute in : %s" % command,'warning')
yield True
else:
for (peers,change) in changes:
if reactor.api.change_to_peers(change,peers):
self.logger.reactor("Route removed : %s" % change.extensive())
yield False
else:
self.logger.reactor("Could not find therefore remove route : %s" % change.extensive(),'warning')
yield False
reactor.route_update = True
try:
descriptions,command = self.parser.extract_neighbors(command)
peers = reactor.match_neighbors(descriptions)
if not peers:
self.logger.reactor('no neighbor matching the command : %s' % command,'warning')
return False
reactor.plan(callback(self,command,reactor.nexthops(peers)),'withdraw_attribute')
return True
except ValueError:
return False
except IndexError:
return False
@Text('announce flow')
def announce_flow (self, reactor, service, command):
def callback (self, command, peers):
changes = self.parser.api_flow(command,'announce')
if not changes:
self.logger.reactor("Command could not parse flow in : %s" % command)
yield True
else:
for change in changes:
reactor.api.change_to_peers(change,peers)
self.logger.reactor("Flow added to %s : %s" % (', '.join(peers if peers else []) if peers is not None else 'all peers',change.extensive()))
yield False
reactor.route_update = True
try:
descriptions,command = self.parser.extract_neighbors(command)
peers = reactor.match_neighbors(descriptions)
if not peers:
self.logger.reactor('no neighbor matching the command : %s' % command,'warning')
return False
reactor.plan(callback(self,command,peers),'announce_flow')
return True
except ValueError:
return False
except IndexError:
return False
@Text('withdraw flow')
def withdraw_flow (self, reactor, service, command):
def callback (self, command, peers):
changes = self.parser.api_flow(command,'withdraw')
if not changes:
self.logger.reactor("Command could not parse flow in : %s" % command)
yield True
else:
for change in changes:
if reactor.api.change_to_peers(change,peers):
self.logger.reactor("Flow found and removed : %s" % change.extensive())
yield False
else:
self.logger.reactor("Could not find therefore remove flow : %s" % change.extensive(),'warning')
yield False
reactor.route_update = True
try:
descriptions,command = self.parser.extract_neighbors(command)
peers = reactor.match_neighbors(descriptions)
if not peers:
self.logger.reactor('no neighbor matching the command : %s' % command,'warning')
return False
reactor.plan(callback(self,command,peers),'withdraw_flow')
return True
except ValueError:
return False
except IndexError:
return False
@Text('announce eor')
def announce_eor (self, reactor, service, command):
def callback (self, command, peers):
family = self.parser.api_eor(command)
if not family:
self.logger.reactor("Command could not parse eor : %s" % command)
yield True
else:
reactor.api.eor_to_peers(family,peers)
self.logger.reactor("Sent to %s : %s" % (', '.join(peers if peers else []) if peers is not None else 'all peers',family.extensive()))
yield False
reactor.route_update = True
try:
descriptions,command = self.parser.extract_neighbors(command)
peers = reactor.match_neighbors(descriptions)
if not peers:
self.logger.reactor('no neighbor matching the command : %s' % command,'warning')
return False
reactor.plan(callback(self,command,peers),'announce_eor')
return True
except ValueError:
return False
except IndexError:
return False
@Text('announce route-refresh')
def announce_refresh (self, reactor, service, command):
def callback (self, command, peers):
refresh = self.parser.api_refresh(command)
if not refresh:
self.logger.reactor("Command could not parse flow in : %s" % command)
yield True
else:
reactor.api.refresh_to_peers(refresh,peers)
self.logger.reactor("Sent to %s : %s" % (', '.join(peers if peers else []) if peers is not None else 'all peers',refresh.extensive()))
yield False
reactor.route_update = True
try:
descriptions,command = self.parser.extract_neighbors(command)
peers = reactor.match_neighbors(descriptions)
if not peers:
self.logger.reactor('no neighbor matching the command : %s' % command,'warning')
return False
reactor.plan(callback(self,command,peers),'announce_refresh')
return True
except ValueError:
return False
except IndexError:
return False
@Text('announce operational')
def announce_operational (self, reactor, service, command):
def callback (self, command, peers):
operational = self.parser.api_operational(command)
if not operational:
self.logger.reactor("Command could not parse operational command : %s" % command)
yield True
else:
reactor.api.operational_to_peers(operational,peers)
self.logger.reactor("operational message sent to %s : %s" % (
', '.join(peers if peers else []) if peers is not None else 'all peers',operational.extensive()
)
)
yield False
reactor.route_update = True
if (command.split() + ['be','safe'])[2].lower() not in ('asm','adm','rpcq','rpcp','apcq','apcp','lpcq','lpcp'):
return False
try:
descriptions,command = self.parser.extract_neighbors(command)
peers = reactor.match_neighbors(descriptions)
if not peers:
self.logger.reactor('no neighbor matching the command : %s' % command,'warning')
return False
reactor.plan(callback(self,command,peers),'announce_operational')
return True
except ValueError:
return False
except IndexError:
return False
| bsd-3-clause |
redhat-openstack/django | django/contrib/contenttypes/views.py | 115 | 3383 | from __future__ import unicode_literals
from django import http
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site, get_current_site
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
def shortcut(request, content_type_id, object_id):
"""
Redirect to an object's page based on a content-type ID and an object ID.
"""
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise http.Http404(_("Content type %(ct_id)s object has no associated model") %
{'ct_id': content_type_id})
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDoesNotExist, ValueError):
raise http.Http404(_("Content type %(ct_id)s object %(obj_id)s doesn't exist") %
{'ct_id': content_type_id, 'obj_id': object_id})
try:
get_absolute_url = obj.get_absolute_url
except AttributeError:
raise http.Http404(_("%(ct_name)s objects don't have a get_absolute_url() method") %
{'ct_name': content_type.name})
absurl = get_absolute_url()
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith('http://') or absurl.startswith('https://'):
return http.HttpResponseRedirect(absurl)
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
object_domain = None
if Site._meta.installed:
opts = obj._meta
# First, look for an many-to-many relationship to Site.
for field in opts.many_to_many:
if field.rel.to is Site:
try:
# Caveat: In the case of multiple related Sites, this just
# selects the *first* one, which is arbitrary.
object_domain = getattr(obj, field.name).all()[0].domain
except IndexError:
pass
if object_domain is not None:
break
# Next, look for a many-to-one relationship to Site.
if object_domain is None:
for field in obj._meta.fields:
if field.rel and field.rel.to is Site:
try:
object_domain = getattr(obj, field.name).domain
except Site.DoesNotExist:
pass
if object_domain is not None:
break
# Fall back to the current site (if possible).
if object_domain is None:
try:
object_domain = get_current_site(request).domain
except Site.DoesNotExist:
pass
# If all that malarkey found an object domain, use it. Otherwise, fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = 'https' if request.is_secure() else 'http'
return http.HttpResponseRedirect('%s://%s%s'
% (protocol, object_domain, absurl))
else:
return http.HttpResponseRedirect(absurl)
| bsd-3-clause |
bdang2012/taiga-back | taiga/projects/attachments/api.py | 5 | 3684 | # Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os.path as path
import mimetypes
mimetypes.init()
from django.utils.translation import ugettext as _
from django.contrib.contenttypes.models import ContentType
from taiga.base import filters
from taiga.base import exceptions as exc
from taiga.base.api import ModelCrudViewSet
from taiga.base.api.utils import get_object_or_404
from taiga.projects.notifications.mixins import WatchedResourceMixin
from taiga.projects.history.mixins import HistoryResourceMixin
from . import permissions
from . import serializers
from . import models
class BaseAttachmentViewSet(HistoryResourceMixin, WatchedResourceMixin, ModelCrudViewSet):
model = models.Attachment
serializer_class = serializers.AttachmentSerializer
filter_fields = ["project", "object_id"]
content_type = None
def update(self, *args, **kwargs):
partial = kwargs.get("partial", False)
if not partial:
raise exc.NotSupported(_("Non partial updates not supported"))
return super().update(*args, **kwargs)
def get_content_type(self):
app_name, model = self.content_type.split(".", 1)
return get_object_or_404(ContentType, app_label=app_name, model=model)
def pre_save(self, obj):
if not obj.id:
obj.content_type = self.get_content_type()
obj.owner = self.request.user
obj.size = obj.attached_file.size
obj.name = path.basename(obj.attached_file.name).lower()
if obj.project_id != obj.content_object.project_id:
raise exc.WrongArguments(_("Project ID not matches between object and project"))
super().pre_save(obj)
def post_delete(self, obj):
# NOTE: When destroy an attachment, the content_object change
# after and not before
self.persist_history_snapshot(obj, delete=True)
super().pre_delete(obj)
def get_object_for_snapshot(self, obj):
return obj.content_object
class UserStoryAttachmentViewSet(BaseAttachmentViewSet):
permission_classes = (permissions.UserStoryAttachmentPermission,)
filter_backends = (filters.CanViewUserStoryAttachmentFilterBackend,)
content_type = "userstories.userstory"
class IssueAttachmentViewSet(BaseAttachmentViewSet):
permission_classes = (permissions.IssueAttachmentPermission,)
filter_backends = (filters.CanViewIssueAttachmentFilterBackend,)
content_type = "issues.issue"
class TaskAttachmentViewSet(BaseAttachmentViewSet):
permission_classes = (permissions.TaskAttachmentPermission,)
filter_backends = (filters.CanViewTaskAttachmentFilterBackend,)
content_type = "tasks.task"
class WikiAttachmentViewSet(BaseAttachmentViewSet):
permission_classes = (permissions.WikiAttachmentPermission,)
filter_backends = (filters.CanViewWikiAttachmentFilterBackend,)
content_type = "wiki.wikipage"
| agpl-3.0 |
bhatfield/titanium_mobile | support/common/markdown/inlinepatterns.py | 107 | 12185 | """
INLINE PATTERNS
=============================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() # returns a regular expression
pattern.handleMatch(m) # takes a match object and returns
# an ElementTree element or just plain text
All of python markdown's built-in patterns subclass from Pattern,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
import markdown
import re
from urlparse import urlparse, urlunparse
import sys
if sys.version >= "3.0":
from html import entities as htmlentitydefs
else:
import htmlentitydefs
"""
The actual regular expressions for patterns
-----------------------------------------------------------------------------
"""
NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[('
+ (NOBRACKET + r'(\[')*6
+ (NOBRACKET+ r'\])*')*6
+ NOBRACKET + r')\]' )
NOIMG = r'(?<!\!)'
BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")``
ESCAPE_RE = r'\\(.)' # \<
EMPHASIS_RE = r'(\*)([^\*]+)\2' # *emphasis*
STRONG_RE = r'(\*{2}|_{2})(.+?)\2' # **strong**
STRONG_EM_RE = r'(\*{3}|_{3})(.+?)\2' # ***strong***
if markdown.SMART_EMPHASIS:
EMPHASIS_2_RE = r'(?<!\w)(_)(\S.+?)\2(?!\w)' # _emphasis_
else:
EMPHASIS_2_RE = r'(_)(.+?)\2' # _emphasis_
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12)?\)'''
# [text](url) or [text](<url>)
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)'
#  or 
REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2]
NOT_STRONG_RE = r'((^| )(\*|_)( |$))' # stand-alone * or _
AUTOLINK_RE = r'<((?:f|ht)tps?://[^>]*)>' # <http://www.123.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <[email protected]>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...>
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
LINE_BREAK_RE = r' \n' # two spaces at end of line
LINE_BREAK_2_RE = r' $' # two spaces at end of text
def dequote(string):
"""Remove quotes from around a string."""
if ( ( string.startswith('"') and string.endswith('"'))
or (string.startswith("'") and string.endswith("'")) ):
return string[1:-1]
else:
return string
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123})."""
def attributeCallback(match):
parent.set(match.group(1), match.group(2).replace('\n', ' '))
return ATTR_RE.sub(attributeCallback, text)
"""
The pattern classes
-----------------------------------------------------------------------------
"""
class Pattern:
"""Base class that inline patterns subclass. """
def __init__ (self, pattern, markdown_instance=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, re.DOTALL)
# Api for Markdown to pass safe_mode into instance
self.safe_mode = False
if markdown_instance:
self.markdown = markdown_instance
def getCompiledRegExp (self):
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m):
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
"""
pass
def type(self):
""" Return class name, to define pattern type """
return self.__class__.__name__
BasePattern = Pattern # for backward compatibility
class SimpleTextPattern (Pattern):
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
text = m.group(2)
if text == markdown.INLINE_PLACEHOLDER_PREFIX:
return None
return text
class SimpleTagPattern (Pattern):
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__ (self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = m.group(3)
return el
class SubstituteTagPattern (SimpleTagPattern):
""" Return a eLement of type `tag` with no children. """
def handleMatch (self, m):
return markdown.etree.Element(self.tag)
class BacktickPattern (Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__ (self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = markdown.AtomicString(m.group(3).strip())
return el
class DoubleTagPattern (SimpleTagPattern):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m):
tag1, tag2 = self.tag.split(",")
el1 = markdown.etree.Element(tag1)
el2 = markdown.etree.SubElement(el1, tag2)
el2.text = m.group(3)
return el1
class HtmlPattern (Pattern):
""" Store raw inline html and return a placeholder. """
def handleMatch (self, m):
rawhtml = m.group(2)
inline = True
place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder
class LinkPattern (Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.text = m.group(2)
title = m.group(11)
href = m.group(9)
if href:
if href[0] == "<":
href = href[1:-1]
el.set("href", self.sanitize_url(href.strip()))
else:
el.set("href", "")
if title:
title = dequote(title) #.replace('"', """)
el.set("title", title)
return el
def sanitize_url(self, url):
"""
Sanitize a url against xss attacks in "safe_mode".
Rather than specifically blacklisting `javascript:alert("XSS")` and all
its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
safe url formats. Most urls contain a network location, however some
are known not to (i.e.: mailto links). Script urls do not contain a
location. Additionally, for `javascript:...`, the scheme would be
"javascript" but some aliases will appear to `urlparse()` to have no
scheme. On top of that relative links (i.e.: "foo/bar.html") have no
scheme. Therefore we must check "path", "parameters", "query" and
"fragment" for any literal colons. We don't check "scheme" for colons
because it *should* never have any and "netloc" must allow the form:
`username:password@host:port`.
"""
locless_schemes = ['', 'mailto', 'news']
scheme, netloc, path, params, query, fragment = url = urlparse(url)
safe_url = False
if netloc != '' or scheme in locless_schemes:
safe_url = True
for part in url[2:]:
if ":" in part:
safe_url = False
if self.markdown.safeMode and not safe_url:
return ''
else:
return urlunparse(url)
class ImagePattern(LinkPattern):
""" Return a img element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("img")
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
el.set('src', self.sanitize_url(src))
else:
el.set('src', "")
if len(src_parts) > 1:
el.set('title', dequote(" ".join(src_parts[1:])))
if markdown.ENABLE_ATTRIBUTES:
truealt = handleAttributes(m.group(2), el)
else:
truealt = m.group(2)
el.set('alt', truealt)
return el
class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """
def handleMatch(self, m):
if m.group(9):
id = m.group(9).lower()
else:
# if we got something like "[Google][]"
# we'll use "google" as the id
id = m.group(2).lower()
if not id in self.markdown.references: # ignore undefined refs
return None
href, title = self.markdown.references[id]
text = m.group(2)
return self.makeTag(href, title, text)
def makeTag(self, href, title, text):
el = markdown.etree.Element('a')
el.set('href', self.sanitize_url(href))
if title:
el.set('title', title)
el.text = text
return el
class ImageReferencePattern (ReferencePattern):
""" Match to a stored reference and return img element. """
def makeTag(self, href, title, text):
el = markdown.etree.Element("img")
el.set("src", self.sanitize_url(href))
if title:
el.set("title", title)
el.set("alt", text)
return el
class AutolinkPattern (Pattern):
""" Return a link Element given an autolink (`<http://example/com>`). """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.set('href', m.group(2))
el.text = markdown.AtomicString(m.group(2))
return el
class AutomailPattern (Pattern):
"""
Return a mailto link Element given an automail link (`<[email protected]>`).
"""
def handleMatch(self, m):
el = markdown.etree.Element('a')
email = m.group(2)
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code):
"""Return entity definition by code, or the code if not defined."""
entity = htmlentitydefs.codepoint2name.get(code)
if entity:
return "%s%s;" % (markdown.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (markdown.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = markdown.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([markdown.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el
| apache-2.0 |
lochiiconnectivity/boto | boto/sdb/db/blob.py | 57 | 2398 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Blob(object):
"""Blob object"""
def __init__(self, value=None, file=None, id=None):
self._file = file
self.id = id
self.value = value
@property
def file(self):
from StringIO import StringIO
if self._file:
f = self._file
else:
f = StringIO(self.value)
return f
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
if hasattr(self.file, "get_contents_as_string"):
value = self.file.get_contents_as_string()
else:
value = self.file.getvalue()
if isinstance(value, unicode):
return value
else:
return value.decode('utf-8')
def read(self):
if hasattr(self.file, "get_contents_as_string"):
return self.file.get_contents_as_string()
else:
return self.file.read()
def readline(self):
return self.file.readline()
def next(self):
return self.file.next()
def __iter__(self):
return iter(self.file)
@property
def size(self):
if self._file:
return self._file.size
elif self.value:
return len(self.value)
else:
return 0
| mit |
richardcs/ansible | lib/ansible/modules/network/cloudengine/ce_switchport.py | 7 | 27424 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_switchport
version_added: "2.4"
short_description: Manages Layer 2 switchport interfaces on HUAWEI CloudEngine switches.
description:
- Manages Layer 2 switchport interfaces on HUAWEI CloudEngine switches.
author: QijunPan (@QijunPan)
notes:
- When C(state=absent), VLANs can be added/removed from trunk links and
the existing access VLAN can be 'unconfigured' to just having VLAN 1
on that interface.
- When working with trunks VLANs the keywords add/remove are always sent
in the C(port trunk allow-pass vlan) command. Use verbose mode to see
commands sent.
- When C(state=unconfigured), the interface will result with having a default
Layer 2 interface, i.e. vlan 1 in access mode.
options:
interface:
description:
- Full name of the interface, i.e. 40GE1/0/22.
required: true
mode:
description:
- The link type of an interface.
choices: ['access','trunk']
access_vlan:
description:
- If C(mode=access), used as the access VLAN ID, in the range from 1 to 4094.
native_vlan:
description:
- If C(mode=trunk), used as the trunk native VLAN ID, in the range from 1 to 4094.
trunk_vlans:
description:
- If C(mode=trunk), used as the VLAN range to ADD or REMOVE
from the trunk, such as 2-10 or 2,5,10-15, etc.
state:
description:
- Manage the state of the resource.
default: present
choices: ['present', 'absent', 'unconfigured']
'''
EXAMPLES = '''
- name: switchport module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Ensure 10GE1/0/22 is in its default switchport state
ce_switchport:
interface: 10GE1/0/22
state: unconfigured
provider: '{{ cli }}'
- name: Ensure 10GE1/0/22 is configured for access vlan 20
ce_switchport:
interface: 10GE1/0/22
mode: access
access_vlan: 20
provider: '{{ cli }}'
- name: Ensure 10GE1/0/22 only has vlans 5-10 as trunk vlans
ce_switchport:
interface: 10GE1/0/22
mode: trunk
native_vlan: 10
trunk_vlans: 5-10
provider: '{{ cli }}'
- name: Ensure 10GE1/0/22 is a trunk port and ensure 2-50 are being tagged (doesn't mean others aren't also being tagged)
ce_switchport:
interface: 10GE1/0/22
mode: trunk
native_vlan: 10
trunk_vlans: 2-50
provider: '{{ cli }}'
- name: Ensure these VLANs are not being tagged on the trunk
ce_switchport:
interface: 10GE1/0/22
mode: trunk
trunk_vlans: 51-4000
state: absent
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"access_vlan": "20", "interface": "10GE1/0/22", "mode": "access"}
existing:
description: k/v pairs of existing switchport
returned: always
type: dict
sample: {"access_vlan": "10", "interface": "10GE1/0/22",
"mode": "access", "switchport": "enable"}
end_state:
description: k/v pairs of switchport after module execution
returned: always
type: dict
sample: {"access_vlan": "20", "interface": "10GE1/0/22",
"mode": "access", "switchport": "enable"}
updates:
description: command string sent to the device
returned: always
type: list
sample: ["10GE1/0/22", "port default vlan 20"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_INTF = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<isL2SwitchPort></isL2SwitchPort>
</interface>
</interfaces>
</ifm>
</filter>
"""
CE_NC_GET_PORT_ATTR = """
<filter type="subtree">
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf>
<ifName>%s</ifName>
<l2Enable></l2Enable>
<l2Attribute>
<linkType></linkType>
<pvid></pvid>
<trunkVlans></trunkVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
</filter>
"""
CE_NC_SET_ACCESS_PORT = """
<config>
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>access</linkType>
<pvid>%s</pvid>
<trunkVlans></trunkVlans>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
</config>
"""
CE_NC_SET_TRUNK_PORT_MODE = """
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>trunk</linkType>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
"""
CE_NC_SET_TRUNK_PORT_PVID = """
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>trunk</linkType>
<pvid>%s</pvid>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
"""
CE_NC_SET_TRUNK_PORT_VLANS = """
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>trunk</linkType>
<trunkVlans>%s:%s</trunkVlans>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
"""
CE_NC_SET_DEFAULT_PORT = """
<config>
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>access</linkType>
<pvid>1</pvid>
<trunkVlans></trunkVlans>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
</config>
"""
SWITCH_PORT_TYPE = ('ge', '10ge', '25ge',
'4x10ge', '40ge', '100ge', 'eth-trunk')
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def is_portswitch_enalbed(iftype):
""""[undo] portswitch"""
return bool(iftype in SWITCH_PORT_TYPE)
def vlan_bitmap_undo(bitmap):
"""convert vlan bitmap to undo bitmap"""
vlan_bit = ['F'] * 1024
if not bitmap or len(bitmap) == 0:
return ''.join(vlan_bit)
bit_len = len(bitmap)
for num in range(bit_len):
undo = (~int(bitmap[num], 16)) & 0xF
vlan_bit[num] = hex(undo)[2]
return ''.join(vlan_bit)
def is_vlan_bitmap_empty(bitmap):
"""check vlan bitmap empty"""
if not bitmap or len(bitmap) == 0:
return True
bit_len = len(bitmap)
for num in range(bit_len):
if bitmap[num] != '0':
return False
return True
class SwitchPort(object):
"""
Manages Layer 2 switchport interfaces.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# interface and vlan info
self.interface = self.module.params['interface']
self.mode = self.module.params['mode']
self.state = self.module.params['state']
self.access_vlan = self.module.params['access_vlan']
self.native_vlan = self.module.params['native_vlan']
self.trunk_vlans = self.module.params['trunk_vlans']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
self.intf_info = dict() # interface vlan info
self.intf_type = None # loopback tunnel ...
def init_module(self):
""" init module """
required_if = [('state', 'absent', ['mode']), ('state', 'present', ['mode'])]
self.module = AnsibleModule(
argument_spec=self.spec, required_if=required_if, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_interface_dict(self, ifname):
""" get one interface attributes dict."""
intf_info = dict()
conf_str = CE_NC_GET_PORT_ATTR % ifname
rcv_xml = get_nc_config(self.module, conf_str)
if "<data/>" in rcv_xml:
return intf_info
intf = re.findall(
r'.*<ifName>(.*)</ifName>.*\s*<l2Enable>(.*)</l2Enable>.*', rcv_xml)
if intf:
intf_info = dict(ifName=intf[0][0],
l2Enable=intf[0][1],
linkType="",
pvid="",
trunkVlans="")
if intf_info["l2Enable"] == "enable":
attr = re.findall(
r'.*<linkType>(.*)</linkType>.*.*\s*<pvid>(.*)'
r'</pvid>.*\s*<trunkVlans>(.*)</trunkVlans>.*', rcv_xml)
if attr:
intf_info["linkType"] = attr[0][0]
intf_info["pvid"] = attr[0][1]
intf_info["trunkVlans"] = attr[0][2]
return intf_info
def is_l2switchport(self):
"""Check layer2 switch port"""
return bool(self.intf_info["l2Enable"] == "enable")
def merge_access_vlan(self, ifname, access_vlan):
"""Merge access interface vlan"""
change = False
conf_str = ""
self.updates_cmd.append("interface %s" % ifname)
if self.state == "present":
if self.intf_info["linkType"] == "access":
if access_vlan and self.intf_info["pvid"] != access_vlan:
self.updates_cmd.append(
"port default vlan %s" % access_vlan)
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, access_vlan)
change = True
else: # not access
self.updates_cmd.append("port link-type access")
if access_vlan:
self.updates_cmd.append(
"port default vlan %s" % access_vlan)
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, access_vlan)
else:
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, "1")
change = True
elif self.state == "absent":
if self.intf_info["linkType"] == "access":
if access_vlan and self.intf_info["pvid"] == access_vlan and access_vlan != "1":
self.updates_cmd.append(
"undo port default vlan %s" % access_vlan)
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, "1")
change = True
else: # not access
self.updates_cmd.append("port link-type access")
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, "1")
change = True
if not change:
self.updates_cmd.pop() # remove interface
return
rcv_xml = set_nc_config(self.module, conf_str)
self.check_response(rcv_xml, "MERGE_ACCESS_PORT")
self.changed = True
def merge_trunk_vlan(self, ifname, native_vlan, trunk_vlans):
"""Merge trunk interface vlan"""
change = False
xmlstr = ""
self.updates_cmd.append("interface %s" % ifname)
if trunk_vlans:
vlan_list = self.vlan_range_to_list(trunk_vlans)
vlan_map = self.vlan_list_to_bitmap(vlan_list)
if self.state == "present":
if self.intf_info["linkType"] == "trunk":
if native_vlan and self.intf_info["pvid"] != native_vlan:
self.updates_cmd.append(
"port trunk pvid vlan %s" % native_vlan)
xmlstr += CE_NC_SET_TRUNK_PORT_PVID % (ifname, native_vlan)
change = True
if trunk_vlans:
add_vlans = self.vlan_bitmap_add(
self.intf_info["trunkVlans"], vlan_map)
if not is_vlan_bitmap_empty(add_vlans):
self.updates_cmd.append(
"port trunk allow-pass %s"
% trunk_vlans.replace(',', ' ').replace('-', ' to '))
xmlstr += CE_NC_SET_TRUNK_PORT_VLANS % (
ifname, add_vlans, add_vlans)
change = True
else: # not trunk
self.updates_cmd.append("port link-type trunk")
change = True
if native_vlan:
self.updates_cmd.append(
"port trunk pvid vlan %s" % native_vlan)
xmlstr += CE_NC_SET_TRUNK_PORT_PVID % (ifname, native_vlan)
if trunk_vlans:
self.updates_cmd.append(
"port trunk allow-pass %s"
% trunk_vlans.replace(',', ' ').replace('-', ' to '))
xmlstr += CE_NC_SET_TRUNK_PORT_VLANS % (
ifname, vlan_map, vlan_map)
if not native_vlan and not trunk_vlans:
xmlstr += CE_NC_SET_TRUNK_PORT_MODE % ifname
self.updates_cmd.append(
"undo port trunk allow-pass vlan 1")
elif self.state == "absent":
if self.intf_info["linkType"] == "trunk":
if native_vlan and self.intf_info["pvid"] == native_vlan and native_vlan != '1':
self.updates_cmd.append(
"undo port trunk pvid vlan %s" % native_vlan)
xmlstr += CE_NC_SET_TRUNK_PORT_PVID % (ifname, 1)
change = True
if trunk_vlans:
del_vlans = self.vlan_bitmap_del(
self.intf_info["trunkVlans"], vlan_map)
if not is_vlan_bitmap_empty(del_vlans):
self.updates_cmd.append(
"undo port trunk allow-pass %s"
% trunk_vlans.replace(',', ' ').replace('-', ' to '))
undo_map = vlan_bitmap_undo(del_vlans)
xmlstr += CE_NC_SET_TRUNK_PORT_VLANS % (
ifname, undo_map, del_vlans)
change = True
else: # not trunk
self.updates_cmd.append("port link-type trunk")
self.updates_cmd.append("undo port trunk allow-pass vlan 1")
xmlstr += CE_NC_SET_TRUNK_PORT_MODE % ifname
change = True
if not change:
self.updates_cmd.pop()
return
conf_str = "<config>" + xmlstr + "</config>"
rcv_xml = set_nc_config(self.module, conf_str)
self.check_response(rcv_xml, "MERGE_TRUNK_PORT")
self.changed = True
def default_switchport(self, ifname):
"""Set interface default or unconfigured"""
change = False
if self.intf_info["linkType"] != "access":
self.updates_cmd.append("interface %s" % ifname)
self.updates_cmd.append("port link-type access")
self.updates_cmd.append("port default vlan 1")
change = True
else:
if self.intf_info["pvid"] != "1":
self.updates_cmd.append("interface %s" % ifname)
self.updates_cmd.append("port default vlan 1")
change = True
if not change:
return
conf_str = CE_NC_SET_DEFAULT_PORT % ifname
rcv_xml = set_nc_config(self.module, conf_str)
self.check_response(rcv_xml, "DEFAULT_INTF_VLAN")
self.changed = True
def vlan_series(self, vlanid_s):
""" convert vlan range to vlan list """
vlan_list = []
peerlistlen = len(vlanid_s)
if peerlistlen != 2:
self.module.fail_json(msg='Error: Format of vlanid is invalid.')
for num in range(peerlistlen):
if not vlanid_s[num].isdigit():
self.module.fail_json(
msg='Error: Format of vlanid is invalid.')
if int(vlanid_s[0]) > int(vlanid_s[1]):
self.module.fail_json(msg='Error: Format of vlanid is invalid.')
elif int(vlanid_s[0]) == int(vlanid_s[1]):
vlan_list.append(str(vlanid_s[0]))
return vlan_list
for num in range(int(vlanid_s[0]), int(vlanid_s[1])):
vlan_list.append(str(num))
vlan_list.append(vlanid_s[1])
return vlan_list
def vlan_region(self, vlanid_list):
""" convert vlan range to vlan list """
vlan_list = []
peerlistlen = len(vlanid_list)
for num in range(peerlistlen):
if vlanid_list[num].isdigit():
vlan_list.append(vlanid_list[num])
else:
vlan_s = self.vlan_series(vlanid_list[num].split('-'))
vlan_list.extend(vlan_s)
return vlan_list
def vlan_range_to_list(self, vlan_range):
""" convert vlan range to vlan list """
vlan_list = self.vlan_region(vlan_range.split(','))
return vlan_list
def vlan_list_to_bitmap(self, vlanlist):
""" convert vlan list to vlan bitmap """
vlan_bit = ['0'] * 1024
bit_int = [0] * 1024
vlan_list_len = len(vlanlist)
for num in range(vlan_list_len):
tagged_vlans = int(vlanlist[num])
if tagged_vlans <= 0 or tagged_vlans > 4094:
self.module.fail_json(
msg='Error: Vlan id is not in the range from 1 to 4094.')
j = tagged_vlans / 4
bit_int[j] |= 0x8 >> (tagged_vlans % 4)
vlan_bit[j] = hex(bit_int[j])[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def vlan_bitmap_add(self, oldmap, newmap):
"""vlan add bitmap"""
vlan_bit = ['0'] * 1024
if len(newmap) != 1024:
self.module.fail_json(msg='Error: New vlan bitmap is invalid.')
if len(oldmap) != 1024 and len(oldmap) != 0:
self.module.fail_json(msg='Error: old vlan bitmap is invalid.')
if len(oldmap) == 0:
return newmap
for num in range(1024):
new_tmp = int(newmap[num], 16)
old_tmp = int(oldmap[num], 16)
add = (~(new_tmp & old_tmp)) & new_tmp
vlan_bit[num] = hex(add)[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def vlan_bitmap_del(self, oldmap, delmap):
"""vlan del bitmap"""
vlan_bit = ['0'] * 1024
if not oldmap or len(oldmap) == 0:
return ''.join(vlan_bit)
if len(oldmap) != 1024 or len(delmap) != 1024:
self.module.fail_json(msg='Error: vlan bitmap is invalid.')
for num in range(1024):
tmp = int(delmap[num], 16) & int(oldmap[num], 16)
vlan_bit[num] = hex(tmp)[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def check_params(self):
"""Check all input params"""
# interface type check
if self.interface:
self.intf_type = get_interface_type(self.interface)
if not self.intf_type:
self.module.fail_json(
msg='Error: Interface name of %s is error.' % self.interface)
if not self.intf_type or not is_portswitch_enalbed(self.intf_type):
self.module.fail_json(msg='Error: Interface %s is error.')
# check access_vlan
if self.access_vlan:
if not self.access_vlan.isdigit():
self.module.fail_json(msg='Error: Access vlan id is invalid.')
if int(self.access_vlan) <= 0 or int(self.access_vlan) > 4094:
self.module.fail_json(
msg='Error: Access vlan id is not in the range from 1 to 4094.')
# check native_vlan
if self.native_vlan:
if not self.native_vlan.isdigit():
self.module.fail_json(msg='Error: Native vlan id is invalid.')
if int(self.native_vlan) <= 0 or int(self.native_vlan) > 4094:
self.module.fail_json(
msg='Error: Native vlan id is not in the range from 1 to 4094.')
# get interface info
self.intf_info = self.get_interface_dict(self.interface)
if not self.intf_info:
self.module.fail_json(msg='Error: Interface does not exist.')
if not self.is_l2switchport():
self.module.fail_json(
msg='Error: Interface is not layer2 swtich port.')
def get_proposed(self):
"""get proposed info"""
self.proposed['state'] = self.state
self.proposed['interface'] = self.interface
self.proposed['mode'] = self.mode
self.proposed['access_vlan'] = self.access_vlan
self.proposed['native_vlan'] = self.native_vlan
self.proposed['trunk_vlans'] = self.trunk_vlans
def get_existing(self):
"""get existing info"""
if self.intf_info:
self.existing["interface"] = self.intf_info["ifName"]
self.existing["mode"] = self.intf_info["linkType"]
self.existing["switchport"] = self.intf_info["l2Enable"]
self.existing['access_vlan'] = self.intf_info["pvid"]
self.existing['native_vlan'] = self.intf_info["pvid"]
self.existing['trunk_vlans'] = self.intf_info["trunkVlans"]
def get_end_state(self):
"""get end state info"""
if self.intf_info:
end_info = self.get_interface_dict(self.interface)
if end_info:
self.end_state["interface"] = end_info["ifName"]
self.end_state["mode"] = end_info["linkType"]
self.end_state["switchport"] = end_info["l2Enable"]
self.end_state['access_vlan'] = end_info["pvid"]
self.end_state['native_vlan'] = end_info["pvid"]
self.end_state['trunk_vlans'] = end_info["trunkVlans"]
def work(self):
"""worker"""
self.check_params()
if not self.intf_info:
self.module.fail_json(msg='Error: interface does not exist.')
self.get_existing()
self.get_proposed()
# present or absent
if self.state == "present" or self.state == "absent":
if self.mode == "access":
self.merge_access_vlan(self.interface, self.access_vlan)
elif self.mode == "trunk":
self.merge_trunk_vlan(
self.interface, self.native_vlan, self.trunk_vlans)
# unconfigured
else:
self.default_switchport(self.interface)
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
interface=dict(required=True, type='str'),
mode=dict(choices=['access', 'trunk'], required=False),
access_vlan=dict(type='str', required=False),
native_vlan=dict(type='str', required=False),
trunk_vlans=dict(type='str', required=False),
state=dict(choices=['absent', 'present', 'unconfigured'],
default='present')
)
argument_spec.update(ce_argument_spec)
switchport = SwitchPort(argument_spec)
switchport.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
bguillot/OpenUpgrade | addons/account/report/account_partner_ledger.py | 81 | 13063 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.report import report_sxw
from common_report_header import common_report_header
class third_party_ledger(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
super(third_party_ledger, self).__init__(cr, uid, name, context=context)
self.init_bal_sum = 0.0
self.localcontext.update({
'time': time,
'lines': self.lines,
'sum_debit_partner': self._sum_debit_partner,
'sum_credit_partner': self._sum_credit_partner,
'get_currency': self._get_currency,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_account': self._get_account,
'get_filter': self._get_filter,
'get_start_date': self._get_start_date,
'get_end_date': self._get_end_date,
'get_fiscalyear': self._get_fiscalyear,
'get_journal': self._get_journal,
'get_partners':self._get_partners,
'get_intial_balance':self._get_intial_balance,
'display_initial_balance':self._display_initial_balance,
'display_currency':self._display_currency,
'get_target_move': self._get_target_move,
})
def _get_filter(self, data):
if data['form']['filter'] == 'unreconciled':
return _('Unreconciled Entries')
return super(third_party_ledger, self)._get_filter(data)
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
obj_partner = self.pool.get('res.partner')
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
ctx2 = data['form'].get('used_context',{}).copy()
self.initial_balance = data['form'].get('initial_balance', True)
if self.initial_balance:
ctx2.update({'initial_bal': True})
self.init_query = obj_move._query_get(self.cr, self.uid, obj='l', context=ctx2)
self.reconcil = True
if data['form']['filter'] == 'unreconciled':
self.reconcil = False
self.result_selection = data['form'].get('result_selection', 'customer')
self.amount_currency = data['form'].get('amount_currency', False)
self.target_move = data['form'].get('target_move', 'all')
PARTNER_REQUEST = ''
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
if self.result_selection == 'supplier':
self.ACCOUNT_TYPE = ['payable']
elif self.result_selection == 'customer':
self.ACCOUNT_TYPE = ['receivable']
else:
self.ACCOUNT_TYPE = ['payable','receivable']
self.cr.execute(
"SELECT a.id " \
"FROM account_account a " \
"LEFT JOIN account_account_type t " \
"ON (a.type=t.code) " \
'WHERE a.type IN %s' \
"AND a.active", (tuple(self.ACCOUNT_TYPE), ))
self.account_ids = [a for (a,) in self.cr.fetchall()]
params = [tuple(move_state), tuple(self.account_ids)]
#if we print from the partners, add a clause on active_ids
if (data['model'] == 'res.partner') and ids:
PARTNER_REQUEST = "AND l.partner_id IN %s"
params += [tuple(ids)]
self.cr.execute(
"SELECT DISTINCT l.partner_id " \
"FROM account_move_line AS l, account_account AS account, " \
" account_move AS am " \
"WHERE l.partner_id IS NOT NULL " \
"AND l.account_id = account.id " \
"AND am.id = l.move_id " \
"AND am.state IN %s"
# "AND " + self.query +" " \
"AND l.account_id IN %s " \
" " + PARTNER_REQUEST + " " \
"AND account.active ", params)
self.partner_ids = [res['partner_id'] for res in self.cr.dictfetchall()]
objects = obj_partner.browse(self.cr, self.uid, self.partner_ids)
return super(third_party_ledger, self).set_context(objects, data, self.partner_ids, report_type)
def lines(self, partner):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
full_account = []
if self.reconcil:
RECONCILE_TAG = " "
else:
RECONCILE_TAG = "AND l.reconcile_id IS NULL"
self.cr.execute(
"SELECT l.id, l.date, j.code, acc.code as a_code, acc.name as a_name, l.ref, m.name as move_name, l.name, l.debit, l.credit, l.amount_currency,l.currency_id, c.symbol AS currency_code " \
"FROM account_move_line l " \
"LEFT JOIN account_journal j " \
"ON (l.journal_id = j.id) " \
"LEFT JOIN account_account acc " \
"ON (l.account_id = acc.id) " \
"LEFT JOIN res_currency c ON (l.currency_id=c.id)" \
"LEFT JOIN account_move m ON (m.id=l.move_id)" \
"WHERE l.partner_id = %s " \
"AND l.account_id IN %s AND " + self.query +" " \
"AND m.state IN %s " \
" " + RECONCILE_TAG + " "\
"ORDER BY l.date",
(partner.id, tuple(self.account_ids), tuple(move_state)))
res = self.cr.dictfetchall()
sum = 0.0
if self.initial_balance:
sum = self.init_bal_sum
for r in res:
sum += r['debit'] - r['credit']
r['progress'] = sum
full_account.append(r)
return full_account
def _get_intial_balance(self, partner):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
if self.reconcil:
RECONCILE_TAG = " "
else:
RECONCILE_TAG = "AND l.reconcile_id IS NULL"
self.cr.execute(
"SELECT COALESCE(SUM(l.debit),0.0), COALESCE(SUM(l.credit),0.0), COALESCE(sum(debit-credit), 0.0) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id = %s " \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " "\
"AND " + self.init_query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids)))
res = self.cr.fetchall()
self.init_bal_sum = res[0][2]
return res
def _sum_debit_partner(self, partner):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
result_tmp = 0.0
result_init = 0.0
if self.reconcil:
RECONCILE_TAG = " "
else:
RECONCILE_TAG = "AND reconcile_id IS NULL"
if self.initial_balance:
self.cr.execute(
"SELECT sum(debit) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id = %s" \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " " \
"AND " + self.init_query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids)))
contemp = self.cr.fetchone()
if contemp != None:
result_init = contemp[0] or 0.0
else:
result_init = result_tmp + 0.0
self.cr.execute(
"SELECT sum(debit) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id = %s " \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " " \
"AND " + self.query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids),))
contemp = self.cr.fetchone()
if contemp != None:
result_tmp = contemp[0] or 0.0
else:
result_tmp = result_tmp + 0.0
return result_tmp + result_init
def _sum_credit_partner(self, partner):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
result_tmp = 0.0
result_init = 0.0
if self.reconcil:
RECONCILE_TAG = " "
else:
RECONCILE_TAG = "AND reconcile_id IS NULL"
if self.initial_balance:
self.cr.execute(
"SELECT sum(credit) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id = %s" \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " " \
"AND " + self.init_query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids)))
contemp = self.cr.fetchone()
if contemp != None:
result_init = contemp[0] or 0.0
else:
result_init = result_tmp + 0.0
self.cr.execute(
"SELECT sum(credit) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id=%s " \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " " \
"AND " + self.query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids),))
contemp = self.cr.fetchone()
if contemp != None:
result_tmp = contemp[0] or 0.0
else:
result_tmp = result_tmp + 0.0
return result_tmp + result_init
def _get_partners(self):
# TODO: deprecated, to remove in trunk
if self.result_selection == 'customer':
return _('Receivable Accounts')
elif self.result_selection == 'supplier':
return _('Payable Accounts')
elif self.result_selection == 'customer_supplier':
return _('Receivable and Payable Accounts')
return ''
def _sum_currency_amount_account(self, account, form):
self._set_get_account_currency_code(account.id)
self.cr.execute("SELECT sum(aml.amount_currency) FROM account_move_line as aml,res_currency as rc WHERE aml.currency_id = rc.id AND aml.account_id= %s ", (account.id,))
total = self.cr.fetchone()
if self.account_currency:
return_field = str(total[0]) + self.account_currency
return return_field
else:
currency_total = self.tot_currency = 0.0
return currency_total
def _display_initial_balance(self, data):
if self.initial_balance:
return True
return False
def _display_currency(self, data):
if self.amount_currency:
return True
return False
class report_partnerledger(osv.AbstractModel):
_name = 'report.account.report_partnerledger'
_inherit = 'report.abstract_report'
_template = 'account.report_partnerledger'
_wrapped_report_class = third_party_ledger
class report_partnerledgerother(osv.AbstractModel):
_name = 'report.account.report_partnerledgerother'
_inherit = 'report.abstract_report'
_template = 'account.report_partnerledgerother'
_wrapped_report_class = third_party_ledger
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bradwoo8621/Swift-Study | Instagram/Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/lib/TestCommon.py | 307 | 21397 | """
TestCommon.py: a testing framework for commands and scripts
with commonly useful error handling
The TestCommon module provides a simple, high-level interface for writing
tests of executable commands and scripts, especially commands and scripts
that interact with the file system. All methods throw exceptions and
exit on failure, with useful error messages. This makes a number of
explicit checks unnecessary, making the test scripts themselves simpler
to write and easier to read.
The TestCommon class is a subclass of the TestCmd class. In essence,
TestCommon is a wrapper that handles common TestCmd error conditions in
useful ways. You can use TestCommon directly, or subclass it for your
program and add additional (or override) methods to tailor it to your
program's specific needs. Alternatively, the TestCommon class serves
as a useful example of how to define your own TestCmd subclass.
As a subclass of TestCmd, TestCommon provides access to all of the
variables and methods from the TestCmd module. Consequently, you can
use any variable or method documented in the TestCmd module without
having to explicitly import TestCmd.
A TestCommon environment object is created via the usual invocation:
import TestCommon
test = TestCommon.TestCommon()
You can use all of the TestCmd keyword arguments when instantiating a
TestCommon object; see the TestCmd documentation for details.
Here is an overview of the methods and keyword arguments that are
provided by the TestCommon class:
test.must_be_writable('file1', ['file2', ...])
test.must_contain('file', 'required text\n')
test.must_contain_all_lines(output, lines, ['title', find])
test.must_contain_any_line(output, lines, ['title', find])
test.must_exist('file1', ['file2', ...])
test.must_match('file', "expected contents\n")
test.must_not_be_writable('file1', ['file2', ...])
test.must_not_contain('file', 'banned text\n')
test.must_not_contain_any_line(output, lines, ['title', find])
test.must_not_exist('file1', ['file2', ...])
test.run(options = "options to be prepended to arguments",
stdout = "expected standard output from the program",
stderr = "expected error output from the program",
status = expected_status,
match = match_function)
The TestCommon module also provides the following variables
TestCommon.python_executable
TestCommon.exe_suffix
TestCommon.obj_suffix
TestCommon.shobj_prefix
TestCommon.shobj_suffix
TestCommon.lib_prefix
TestCommon.lib_suffix
TestCommon.dll_prefix
TestCommon.dll_suffix
"""
# Copyright 2000-2010 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCommon.py 0.37.D001 2010/01/11 16:55:50 knight"
__version__ = "0.37"
import copy
import os
import os.path
import stat
import string
import sys
import types
import UserList
from TestCmd import *
from TestCmd import __all__
__all__.extend([ 'TestCommon',
'exe_suffix',
'obj_suffix',
'shobj_prefix',
'shobj_suffix',
'lib_prefix',
'lib_suffix',
'dll_prefix',
'dll_suffix',
])
# Variables that describe the prefixes and suffixes on this system.
if sys.platform == 'win32':
exe_suffix = '.exe'
obj_suffix = '.obj'
shobj_suffix = '.obj'
shobj_prefix = ''
lib_prefix = ''
lib_suffix = '.lib'
dll_prefix = ''
dll_suffix = '.dll'
elif sys.platform == 'cygwin':
exe_suffix = '.exe'
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = ''
dll_suffix = '.dll'
elif string.find(sys.platform, 'irix') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.o'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
elif string.find(sys.platform, 'darwin') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.dylib'
elif string.find(sys.platform, 'sunos') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = 'so_'
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.dylib'
else:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
def is_List(e):
return type(e) is types.ListType \
or isinstance(e, UserList.UserList)
def is_writable(f):
mode = os.stat(f)[stat.ST_MODE]
return mode & stat.S_IWUSR
def separate_files(flist):
existing = []
missing = []
for f in flist:
if os.path.exists(f):
existing.append(f)
else:
missing.append(f)
return existing, missing
def _failed(self, status = 0):
if self.status is None or status is None:
return None
try:
return _status(self) not in status
except TypeError:
# status wasn't an iterable
return _status(self) != status
def _status(self):
return self.status
class TestCommon(TestCmd):
# Additional methods from the Perl Test::Cmd::Common module
# that we may wish to add in the future:
#
# $test->subdir('subdir', ...);
#
# $test->copy('src_file', 'dst_file');
def __init__(self, **kw):
"""Initialize a new TestCommon instance. This involves just
calling the base class initialization, and then changing directory
to the workdir.
"""
apply(TestCmd.__init__, [self], kw)
os.chdir(self.workdir)
def must_be_writable(self, *files):
"""Ensures that the specified file(s) exist and are writable.
An individual file can be specified as a list of directory names,
in which case the pathname will be constructed by concatenating
them. Exits FAILED if any of the files does not exist or is
not writable.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
existing, missing = separate_files(files)
unwritable = filter(lambda x, iw=is_writable: not iw(x), existing)
if missing:
print "Missing files: `%s'" % string.join(missing, "', `")
if unwritable:
print "Unwritable files: `%s'" % string.join(unwritable, "', `")
self.fail_test(missing + unwritable)
def must_contain(self, file, required, mode = 'rb'):
"""Ensures that the specified file contains the required text.
"""
file_contents = self.read(file, mode)
contains = (string.find(file_contents, required) != -1)
if not contains:
print "File `%s' does not contain required string." % file
print self.banner('Required string ')
print required
print self.banner('%s contents ' % file)
print file_contents
self.fail_test(not contains)
def must_contain_all_lines(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains all of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
find = lambda o, l: string.find(o, l) != -1
missing = []
for line in lines:
if not find(output, line):
missing.append(line)
if missing:
if title is None:
title = 'output'
sys.stdout.write("Missing expected lines from %s:\n" % title)
for line in missing:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' '))
sys.stdout.write(output)
self.fail_test()
def must_contain_any_line(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains at least one of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
find = lambda o, l: string.find(o, l) != -1
for line in lines:
if find(output, line):
return
if title is None:
title = 'output'
sys.stdout.write("Missing any expected line from %s:\n" % title)
for line in lines:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' '))
sys.stdout.write(output)
self.fail_test()
def must_contain_lines(self, lines, output, title=None):
# Deprecated; retain for backwards compatibility.
return self.must_contain_all_lines(output, lines, title)
def must_exist(self, *files):
"""Ensures that the specified file(s) must exist. An individual
file be specified as a list of directory names, in which case the
pathname will be constructed by concatenating them. Exits FAILED
if any of the files does not exist.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
missing = filter(lambda x: not os.path.exists(x), files)
if missing:
print "Missing files: `%s'" % string.join(missing, "', `")
self.fail_test(missing)
def must_match(self, file, expect, mode = 'rb'):
"""Matches the contents of the specified file (first argument)
against the expected contents (second argument). The expected
contents are a list of lines or a string which will be split
on newlines.
"""
file_contents = self.read(file, mode)
try:
self.fail_test(not self.match(file_contents, expect))
except KeyboardInterrupt:
raise
except:
print "Unexpected contents of `%s'" % file
self.diff(expect, file_contents, 'contents ')
raise
def must_not_contain(self, file, banned, mode = 'rb'):
"""Ensures that the specified file doesn't contain the banned text.
"""
file_contents = self.read(file, mode)
contains = (string.find(file_contents, banned) != -1)
if contains:
print "File `%s' contains banned string." % file
print self.banner('Banned string ')
print banned
print self.banner('%s contents ' % file)
print file_contents
self.fail_test(contains)
def must_not_contain_any_line(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
does not contain any of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
find = lambda o, l: string.find(o, l) != -1
unexpected = []
for line in lines:
if find(output, line):
unexpected.append(line)
if unexpected:
if title is None:
title = 'output'
sys.stdout.write("Unexpected lines in %s:\n" % title)
for line in unexpected:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' '))
sys.stdout.write(output)
self.fail_test()
def must_not_contain_lines(self, lines, output, title=None):
return self.must_not_contain_any_line(output, lines, title)
def must_not_exist(self, *files):
"""Ensures that the specified file(s) must not exist.
An individual file be specified as a list of directory names, in
which case the pathname will be constructed by concatenating them.
Exits FAILED if any of the files exists.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
existing = filter(os.path.exists, files)
if existing:
print "Unexpected files exist: `%s'" % string.join(existing, "', `")
self.fail_test(existing)
def must_not_be_writable(self, *files):
"""Ensures that the specified file(s) exist and are not writable.
An individual file can be specified as a list of directory names,
in which case the pathname will be constructed by concatenating
them. Exits FAILED if any of the files does not exist or is
writable.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
existing, missing = separate_files(files)
writable = filter(is_writable, existing)
if missing:
print "Missing files: `%s'" % string.join(missing, "', `")
if writable:
print "Writable files: `%s'" % string.join(writable, "', `")
self.fail_test(missing + writable)
def _complete(self, actual_stdout, expected_stdout,
actual_stderr, expected_stderr, status, match):
"""
Post-processes running a subcommand, checking for failure
status and displaying output appropriately.
"""
if _failed(self, status):
expect = ''
if status != 0:
expect = " (expected %s)" % str(status)
print "%s returned %s%s" % (self.program, str(_status(self)), expect)
print self.banner('STDOUT ')
print actual_stdout
print self.banner('STDERR ')
print actual_stderr
self.fail_test()
if not expected_stdout is None and not match(actual_stdout, expected_stdout):
self.diff(expected_stdout, actual_stdout, 'STDOUT ')
if actual_stderr:
print self.banner('STDERR ')
print actual_stderr
self.fail_test()
if not expected_stderr is None and not match(actual_stderr, expected_stderr):
print self.banner('STDOUT ')
print actual_stdout
self.diff(expected_stderr, actual_stderr, 'STDERR ')
self.fail_test()
def start(self, program = None,
interpreter = None,
arguments = None,
universal_newlines = None,
**kw):
"""
Starts a program or script for the test environment.
This handles the "options" keyword argument and exceptions.
"""
options = kw.pop('options', None)
if options:
if arguments is None:
arguments = options
else:
arguments = options + " " + arguments
try:
return apply(TestCmd.start,
(self, program, interpreter, arguments, universal_newlines),
kw)
except KeyboardInterrupt:
raise
except Exception, e:
print self.banner('STDOUT ')
try:
print self.stdout()
except IndexError:
pass
print self.banner('STDERR ')
try:
print self.stderr()
except IndexError:
pass
cmd_args = self.command_args(program, interpreter, arguments)
sys.stderr.write('Exception trying to execute: %s\n' % cmd_args)
raise e
def finish(self, popen, stdout = None, stderr = '', status = 0, **kw):
"""
Finishes and waits for the process being run under control of
the specified popen argument. Additional arguments are similar
to those of the run() method:
stdout The expected standard output from
the command. A value of None means
don't test standard output.
stderr The expected error output from
the command. A value of None means
don't test error output.
status The expected exit status from the
command. A value of None means don't
test exit status.
"""
apply(TestCmd.finish, (self, popen,), kw)
match = kw.get('match', self.match)
self._complete(self.stdout(), stdout,
self.stderr(), stderr, status, match)
def run(self, options = None, arguments = None,
stdout = None, stderr = '', status = 0, **kw):
"""Runs the program under test, checking that the test succeeded.
The arguments are the same as the base TestCmd.run() method,
with the addition of:
options Extra options that get appended to the beginning
of the arguments.
stdout The expected standard output from
the command. A value of None means
don't test standard output.
stderr The expected error output from
the command. A value of None means
don't test error output.
status The expected exit status from the
command. A value of None means don't
test exit status.
By default, this expects a successful exit (status = 0), does
not test standard output (stdout = None), and expects that error
output is empty (stderr = "").
"""
if options:
if arguments is None:
arguments = options
else:
arguments = options + " " + arguments
kw['arguments'] = arguments
match = kw.pop('match', self.match)
apply(TestCmd.run, [self], kw)
self._complete(self.stdout(), stdout,
self.stderr(), stderr, status, match)
def skip_test(self, message="Skipping test.\n"):
"""Skips a test.
Proper test-skipping behavior is dependent on the external
TESTCOMMON_PASS_SKIPS environment variable. If set, we treat
the skip as a PASS (exit 0), and otherwise treat it as NO RESULT.
In either case, we print the specified message as an indication
that the substance of the test was skipped.
(This was originally added to support development under Aegis.
Technically, skipping a test is a NO RESULT, but Aegis would
treat that as a test failure and prevent the change from going to
the next step. Since we ddn't want to force anyone using Aegis
to have to install absolutely every tool used by the tests, we
would actually report to Aegis that a skipped test has PASSED
so that the workflow isn't held up.)
"""
if message:
sys.stdout.write(message)
sys.stdout.flush()
pass_skips = os.environ.get('TESTCOMMON_PASS_SKIPS')
if pass_skips in [None, 0, '0']:
# skip=1 means skip this function when showing where this
# result came from. They only care about the line where the
# script called test.skip_test(), not the line number where
# we call test.no_result().
self.no_result(skip=1)
else:
# We're under the development directory for this change,
# so this is an Aegis invocation; pass the test (exit 0).
self.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
ogenstad/ansible | lib/ansible/modules/cloud/amazon/s3_sync.py | 44 | 19602 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: s3_sync
short_description: Efficiently upload multiple files to S3
description:
- The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing,
inclusions/exclusions, mime types, expiration mapping, recursion, cache control and smart directory mapping.
version_added: "2.3"
options:
mode:
description:
- sync direction.
required: true
default: 'push'
choices: [ push ]
file_change_strategy:
description:
- Difference determination method to allow changes-only syncing. Unlike rsync, files are not patched- they are fully skipped or fully uploaded.
- date_size will upload if file sizes don't match or if local file modified date is newer than s3's version
- checksum will compare etag values based on s3's implementation of chunked md5s.
- force will always upload all files.
required: false
default: 'date_size'
choices: [ force, checksum, date_size ]
bucket:
description:
- Bucket name.
required: true
key_prefix:
description:
- In addition to file path, prepend s3 path with this prefix. Module will add slash at end of prefix if necessary.
required: false
file_root:
description:
- File/directory path for synchronization. This is a local path.
- This root path is scrubbed from the key name, so subdirectories will remain as keys.
required: true
permission:
description:
- Canned ACL to apply to synced files.
- Changing this ACL only changes newly synced files, it does not trigger a full reupload.
required: false
choices: [ '', private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control ]
mime_map:
description:
- >
Dict entry from extension to MIME type. This will override any default/sniffed MIME type.
For example C({".txt": "application/text", ".yml": "application/text"})
required: false
include:
description:
- Shell pattern-style file matching.
- Used before exclude to determine eligible files (for instance, only "*.gif")
- For multiple patterns, comma-separate them.
required: false
default: "*"
exclude:
description:
- Shell pattern-style file matching.
- Used after include to remove files (for instance, skip "*.txt")
- For multiple patterns, comma-separate them.
required: false
default: ".*"
cache_control:
description:
- This is a string.
- Cache-Control header set on uploaded objects.
- Directives are separated by commmas.
required: false
version_added: "2.4"
delete:
description:
- Remove remote files that exist in bucket but are not present in the file root.
required: false
default: no
version_added: "2.4"
requirements:
- boto3 >= 1.4.4
- botocore
- python-dateutil
author: tedder
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: basic upload
s3_sync:
bucket: tedder
file_root: roles/s3/files/
- name: all the options
s3_sync:
bucket: tedder
file_root: roles/s3/files
mime_map:
.yml: application/text
.json: application/text
key_prefix: config_files/web
file_change_strategy: force
permission: public-read
cache_control: "public, max-age=31536000"
include: "*"
exclude: "*.txt,.*"
'''
RETURN = '''
filelist_initial:
description: file listing (dicts) from inital globbing
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"modified_epoch": 1477416706
}]
filelist_local_etag:
description: file listing (dicts) including calculated local etag
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"mime_type": "application/json",
"modified_epoch": 1477416706,
"s3_path": "s3sync/policy.json"
}]
filelist_s3:
description: file listing (dicts) including information about previously-uploaded versions
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"mime_type": "application/json",
"modified_epoch": 1477416706,
"s3_path": "s3sync/policy.json"
}]
filelist_typed:
description: file listing (dicts) with calculated or overridden mime types
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"mime_type": "application/json",
"modified_epoch": 1477416706
}]
filelist_actionable:
description: file listing (dicts) of files that will be uploaded after the strategy decision
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"mime_type": "application/json",
"modified_epoch": 1477931256,
"s3_path": "s3sync/policy.json",
"whysize": "151 / 151",
"whytime": "1477931256 / 1477929260"
}]
uploaded:
description: file listing (dicts) of files that were actually uploaded
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"s3_path": "s3sync/policy.json",
"whysize": "151 / 151",
"whytime": "1477931637 / 1477931489"
}]
'''
import datetime
import fnmatch
import hashlib
import mimetypes
import os
import stat as osstat # os.stat constants
import traceback
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ec2_argument_spec, boto3_conn, get_aws_connection_info, HAS_BOTO3, boto_exception
from ansible.module_utils._text import to_text
try:
from dateutil import tz
HAS_DATEUTIL = True
except ImportError:
HAS_DATEUTIL = False
try:
import botocore
except ImportError:
# Handled by imported HAS_BOTO3
pass
# the following function, calculate_multipart_etag, is from tlastowka
# on github and is used under its (compatible) GPL license. So this
# license applies to the following function.
# source: https://github.com/tlastowka/calculate_multipart_etag/blob/master/calculate_multipart_etag.py
#
# calculate_multipart_etag Copyright (C) 2015
# Tony Lastowka <tlastowka at gmail dot com>
# https://github.com/tlastowka
#
#
# calculate_multipart_etag is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# calculate_multipart_etag is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with calculate_multipart_etag. If not, see <http://www.gnu.org/licenses/>.
DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024
def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
"""
calculates a multipart upload etag for amazon s3
Arguments:
source_path -- The file to calculate the etag for
chunk_size -- The chunk size to calculate for.
"""
md5s = []
with open(source_path, 'rb') as fp:
while True:
data = fp.read(chunk_size)
if not data:
break
md5s.append(hashlib.md5(data))
if len(md5s) == 1:
new_etag = '"{0}"'.format(md5s[0].hexdigest())
else: # > 1
digests = b"".join(m.digest() for m in md5s)
new_md5 = hashlib.md5(digests)
new_etag = '"{0}-{1}"'.format(new_md5.hexdigest(), len(md5s))
return new_etag
def gather_files(fileroot, include=None, exclude=None):
ret = []
for (dirpath, dirnames, filenames) in os.walk(fileroot):
for fn in filenames:
fullpath = os.path.join(dirpath, fn)
# include/exclude
if include:
found = False
for x in include.split(','):
if fnmatch.fnmatch(fn, x):
found = True
if not found:
# not on the include list, so we don't want it.
continue
if exclude:
found = False
for x in exclude.split(','):
if fnmatch.fnmatch(fn, x):
found = True
if found:
# skip it, even if previously included.
continue
chopped_path = os.path.relpath(fullpath, start=fileroot)
fstat = os.stat(fullpath)
f_size = fstat[osstat.ST_SIZE]
f_modified_epoch = fstat[osstat.ST_MTIME]
ret.append({
'fullpath': fullpath,
'chopped_path': chopped_path,
'modified_epoch': f_modified_epoch,
'bytes': f_size,
})
# dirpath = path *to* the directory
# dirnames = subdirs *in* our directory
# filenames
return ret
def calculate_s3_path(filelist, key_prefix=''):
ret = []
for fileentry in filelist:
# don't modify the input dict
retentry = fileentry.copy()
retentry['s3_path'] = os.path.join(key_prefix, fileentry['chopped_path'])
ret.append(retentry)
return ret
def calculate_local_etag(filelist, key_prefix=''):
'''Really, "calculate md5", but since AWS uses their own format, we'll just call
it a "local etag". TODO optimization: only calculate if remote key exists.'''
ret = []
for fileentry in filelist:
# don't modify the input dict
retentry = fileentry.copy()
retentry['local_etag'] = calculate_multipart_etag(fileentry['fullpath'])
ret.append(retentry)
return ret
def determine_mimetypes(filelist, override_map):
ret = []
for fileentry in filelist:
retentry = fileentry.copy()
localfile = fileentry['fullpath']
# reminder: file extension is '.txt', not 'txt'.
_, file_extension = os.path.splitext(localfile)
if override_map and override_map.get(file_extension):
# override? use it.
retentry['mime_type'] = override_map[file_extension]
else:
# else sniff it
retentry['mime_type'], retentry['encoding'] = mimetypes.guess_type(localfile, strict=False)
# might be None or '' from one of the above. Not a great type but better than nothing.
if not retentry['mime_type']:
retentry['mime_type'] = 'application/octet-stream'
ret.append(retentry)
return ret
def head_s3(s3, bucket, s3keys):
retkeys = []
for entry in s3keys:
retentry = entry.copy()
# don't modify the input dict
try:
retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path'])
except botocore.exceptions.ClientError as err:
if (hasattr(err, 'response') and
'ResponseMetadata' in err.response and
'HTTPStatusCode' in err.response['ResponseMetadata'] and
str(err.response['ResponseMetadata']['HTTPStatusCode']) == '404'):
pass
else:
raise Exception(err)
# error_msg = boto_exception(err)
# return {'error': error_msg}
retkeys.append(retentry)
return retkeys
def filter_list(s3, bucket, s3filelist, strategy):
keeplist = list(s3filelist)
for e in keeplist:
e['_strategy'] = strategy
# init/fetch info from S3 if we're going to use it for comparisons
if not strategy == 'force':
keeplist = head_s3(s3, bucket, s3filelist)
# now actually run the strategies
if strategy == 'checksum':
for entry in keeplist:
if entry.get('s3_head'):
# since we have a remote s3 object, compare the values.
if entry['s3_head']['ETag'] == entry['local_etag']:
# files match, so remove the entry
entry['skip_flag'] = True
else:
# file etags don't match, keep the entry.
pass
else: # we don't have an etag, so we'll keep it.
pass
elif strategy == 'date_size':
for entry in keeplist:
if entry.get('s3_head'):
# fstat = entry['stat']
local_modified_epoch = entry['modified_epoch']
local_size = entry['bytes']
# py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward.
# remote_modified_epoch = entry['s3_head']['LastModified'].timestamp()
remote_modified_datetime = entry['s3_head']['LastModified']
delta = (remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc()))
remote_modified_epoch = delta.seconds + (delta.days * 86400)
remote_size = entry['s3_head']['ContentLength']
entry['whytime'] = '{0} / {1}'.format(local_modified_epoch, remote_modified_epoch)
entry['whysize'] = '{0} / {1}'.format(local_size, remote_size)
if local_modified_epoch <= remote_modified_epoch or local_size == remote_size:
entry['skip_flag'] = True
else:
entry['why'] = "no s3_head"
# else: probably 'force'. Basically we don't skip with any with other strategies.
else:
pass
# prune 'please skip' entries, if any.
return [x for x in keeplist if not x.get('skip_flag')]
def upload_files(s3, bucket, filelist, params):
ret = []
for entry in filelist:
args = {
'ContentType': entry['mime_type']
}
if params.get('permission'):
args['ACL'] = params['permission']
if params.get('cache_control'):
args['CacheControl'] = params['cache_control']
# if this fails exception is caught in main()
s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None)
ret.append(entry)
return ret
def remove_files(s3, sourcelist, params):
bucket = params.get('bucket')
key_prefix = params.get('key_prefix')
paginator = s3.get_paginator('list_objects_v2')
current_keys = set(x['Key'] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get('Contents', []))
keep_keys = set(to_text(source_file['s3_path']) for source_file in sourcelist)
delete_keys = list(current_keys - keep_keys)
# can delete 1000 objects at a time
groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)]
for keys in groups_of_keys:
s3.delete_objects(Bucket=bucket, Delete={'Objects': [{'Key': key} for key in keys]})
return delete_keys
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
mode=dict(choices=['push'], default='push'),
file_change_strategy=dict(choices=['force', 'date_size', 'checksum'], default='date_size'),
bucket=dict(required=True),
key_prefix=dict(required=False, default=''),
file_root=dict(required=True, type='path'),
permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read',
'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']),
retries=dict(required=False),
mime_map=dict(required=False, type='dict'),
exclude=dict(required=False, default=".*"),
include=dict(required=False, default="*"),
cache_control=dict(required=False, default=''),
delete=dict(required=False, type='bool', default=False),
# future options: encoding, metadata, storage_class, retries
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_DATEUTIL:
module.fail_json(msg='dateutil required for this module')
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
result = {}
mode = module.params['mode']
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified")
s3 = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_kwargs)
if mode == 'push':
try:
result['filelist_initial'] = gather_files(module.params['file_root'], exclude=module.params['exclude'], include=module.params['include'])
result['filelist_typed'] = determine_mimetypes(result['filelist_initial'], module.params.get('mime_map'))
result['filelist_s3'] = calculate_s3_path(result['filelist_typed'], module.params['key_prefix'])
result['filelist_local_etag'] = calculate_local_etag(result['filelist_s3'])
result['filelist_actionable'] = filter_list(s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy'])
result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params)
if module.params['delete']:
result['removed'] = remove_files(s3, result['filelist_local_etag'], module.params)
# mark changed if we actually upload something.
if result.get('uploads') or result.get('removed'):
result['changed'] = True
# result.update(filelist=actionable_filelist)
except botocore.exceptions.ClientError as err:
error_msg = boto_exception(err)
module.fail_json(msg=error_msg, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
yangleo/cloud-github | openstack_dashboard/dashboards/admin/hypervisors/compute/tabs.py | 57 | 1309 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.admin.hypervisors.compute import tables
class ComputeHostTab(tabs.TableTab):
table_classes = (tables.ComputeHostTable,)
name = _("Compute Host")
slug = "compute_host"
template_name = "horizon/common/_detail_table.html"
def get_compute_host_data(self):
try:
return nova.service_list(self.tab_group.request,
binary='nova-compute')
except Exception:
msg = _('Unable to get nova services list.')
exceptions.handle(self.tab_group.request, msg)
return []
| apache-2.0 |
dashpay/electrum-dash | plugins/greenaddress_instant/qt.py | 12 | 3985 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import urllib
import sys
import requests
from PyQt4.QtGui import QApplication, QPushButton
from electrum.plugins import BasePlugin, hook
from electrum.i18n import _
class Plugin(BasePlugin):
button_label = _("Verify GA instant")
@hook
def transaction_dialog(self, d):
d.verify_button = QPushButton(self.button_label)
d.verify_button.clicked.connect(lambda: self.do_verify(d))
d.buttons.insert(0, d.verify_button)
self.transaction_dialog_update(d)
def get_my_addr(self, d):
"""Returns the address for given tx which can be used to request
instant confirmation verification from GreenAddress"""
for addr, _ in d.tx.get_outputs():
if d.wallet.is_mine(addr):
return addr
return None
@hook
def transaction_dialog_update(self, d):
if d.tx.is_complete() and self.get_my_addr(d):
d.verify_button.show()
else:
d.verify_button.hide()
def do_verify(self, d):
tx = d.tx
wallet = d.wallet
window = d.parent
# 1. get the password and sign the verification request
password = None
if wallet.use_encryption:
msg = _('GreenAddress requires your signature \n'
'to verify that transaction is instant.\n'
'Please enter your password to sign a\n'
'verification request.')
password = window.password_dialog(msg, parent=d)
if not password:
return
try:
d.verify_button.setText(_('Verifying...'))
QApplication.processEvents() # update the button label
addr = self.get_my_addr(d)
message = "Please verify if %s is GreenAddress instant confirmed" % tx.hash()
sig = wallet.sign_message(addr, message, password)
sig = base64.b64encode(sig)
# 2. send the request
response = requests.request("GET", ("https://greenaddress.it/verify/?signature=%s&txhash=%s" % (urllib.quote(sig), tx.hash())),
headers = {'User-Agent': 'Electrum'})
response = response.json()
# 3. display the result
if response.get('verified'):
d.show_message(_('%s is covered by GreenAddress instant confirmation') % (tx.hash()), title=_('Verification successful!'))
else:
d.show_critical(_('%s is not covered by GreenAddress instant confirmation') % (tx.hash()), title=_('Verification failed!'))
except BaseException as e:
import traceback
traceback.print_exc(file=sys.stdout)
d.show_error(str(e))
finally:
d.verify_button.setText(self.button_label)
| mit |
endorphinl/horizon-fork | openstack_dashboard/enabled/_1000_project.py | 21 | 1201 | # Copyright 2015, Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The slug of the dashboard to be added to HORIZON['dashboards']. Required.
DASHBOARD = 'project'
# If set to True, this dashboard will be set as the default dashboard.
DEFAULT = True
# A dictionary of exception classes to be added to HORIZON['exceptions'].
ADD_EXCEPTIONS = {}
# A list of applications to be added to INSTALLED_APPS.
ADD_INSTALLED_APPS = ['openstack_dashboard.dashboards.project']
ADD_ANGULAR_MODULES = [
'horizon.dashboard.project',
]
AUTO_DISCOVER_STATIC_FILES = True
ADD_JS_FILES = []
ADD_JS_SPEC_FILES = []
ADD_SCSS_FILES = [
'dashboard/project/project.scss'
]
| apache-2.0 |
mattesno1/CouchPotatoServer | libs/rtorrent/common.py | 88 | 4050 | # Copyright (c) 2013 Chris Lucas, <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import urlparse
import os
from rtorrent.compat import is_py3
def bool_to_int(value):
"""Translates python booleans to RPC-safe integers"""
if value is True:
return("1")
elif value is False:
return("0")
else:
return(value)
def cmd_exists(cmds_list, cmd):
"""Check if given command is in list of available commands
@param cmds_list: see L{RTorrent._rpc_methods}
@type cmds_list: list
@param cmd: name of command to be checked
@type cmd: str
@return: bool
"""
return(cmd in cmds_list)
def find_torrent(info_hash, torrent_list):
"""Find torrent file in given list of Torrent classes
@param info_hash: info hash of torrent
@type info_hash: str
@param torrent_list: list of L{Torrent} instances (see L{RTorrent.get_torrents})
@type torrent_list: list
@return: L{Torrent} instance, or -1 if not found
"""
for t in torrent_list:
if t.info_hash == info_hash:
return t
return None
def is_valid_port(port):
"""Check if given port is valid"""
return(0 <= int(port) <= 65535)
def convert_version_tuple_to_str(t):
return(".".join([str(n) for n in t]))
def safe_repr(fmt, *args, **kwargs):
""" Formatter that handles unicode arguments """
if not is_py3():
# unicode fmt can take str args, str fmt cannot take unicode args
fmt = fmt.decode("utf-8")
out = fmt.format(*args, **kwargs)
return out.encode("utf-8")
else:
return fmt.format(*args, **kwargs)
def split_path(path):
fragments = path.split('/')
if len(fragments) == 1:
return fragments
if not fragments[-1]:
return fragments[:-1]
return fragments
def join_path(base, path):
# Return if we have a new absolute path
if os.path.isabs(path):
return path
# non-absolute base encountered
if base and not os.path.isabs(base):
raise NotImplementedError()
return '/'.join(split_path(base) + split_path(path))
def join_uri(base, uri, construct=True):
p_uri = urlparse.urlparse(uri)
# Return if there is nothing to join
if not p_uri.path:
return base
scheme, netloc, path, params, query, fragment = urlparse.urlparse(base)
# Switch to 'uri' parts
_, _, _, params, query, fragment = p_uri
path = join_path(path, p_uri.path)
result = urlparse.ParseResult(scheme, netloc, path, params, query, fragment)
if not construct:
return result
# Construct from parts
return urlparse.urlunparse(result)
def update_uri(uri, construct=True, **kwargs):
if isinstance(uri, urlparse.ParseResult):
uri = dict(uri._asdict())
if type(uri) is not dict:
raise ValueError("Unknown URI type")
uri.update(kwargs)
result = urlparse.ParseResult(**uri)
if not construct:
return result
return urlparse.urlunparse(result)
| gpl-3.0 |
zuck/scribee | scribee.py | 1 | 2854 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""This file is part of the Scribee project.
"""
__author__ = 'Emanuele Bertoldi <[email protected]>'
__copyright__ = 'Copyright (c) 2011 Emanuele Bertoldi'
__version__ = '0.0.1'
import sys
import os
import fileinput
import settings
class Scribox(object):
def __init__(self):
self._inspectors = getattr(settings, "INSPECTORS", {})
self._output_dir = getattr(settings, "OUTPUT_DIR", 'output')
self._verbose = getattr(settings, "VERBOSE", False)
self._cache_sources = []
def generate(self, sources=[], renderers=getattr(settings, "RENDERERS", {})):
from entity import Entity, DocBlock
# Clear buffers.
Entity.entities = []
DocBlock.blocks = []
self._cache_sources = []
# Start a new generation.
print 'SCRIBOX ----- ver %s' % __version__
print '======================='
print 'Searching for entities...'
for source in sources:
self.parse_file(source)
sys.stdout.flush()
print 'Found a total of %d entity/ies.' % len(Entity.entities)
for format, renderer in renderers.items():
print 'Generating contents in "%s" format...' % format,
renderer.render(Entity.entities, self._output_dir)
print "Done."
print 'Generated %d format/s.' % len(renderers)
def parse_file(self, filename=''):
filename = filename.replace('\\', '/').replace('//', '/')
if filename not in self._cache_sources:
self._cache_sources.append(filename)
# File not found.
if not os.path.exists(filename):
return
# File.
elif os.path.isfile(filename):
root, ext = os.path.splitext(filename)
# Inspector not found for this extension.
if not self._inspectors.has_key(ext):
if self._verbose:
print "Skipped %s." % filename
return
inspector = self._inspectors[ext]
f = fileinput.input(filename)
print "Inspecting %s..." % filename,
sys.stdout.flush()
new_entities_count = inspector.parse(f)
print "Found %d entity/ies." % new_entities_count
# Directory.
elif os.path.isdir(filename):
os.path.walk(filename, self.parse_dir, [])
def parse_dir(self, arg, dirname, fnames):
for filename in fnames:
pathname = '/'.join([dirname, filename])
self.parse_file(pathname)
if __name__ == "__main__":
s = Scribox()
s.generate(sys.argv[1:])
| mit |
danieldresser/cortex | python/IECoreHoudini/FnParameterisedHolder.py | 12 | 8982 | ##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import warnings
import hou
import toolutils
import IECore
import IECoreHoudini
class FnParameterisedHolder():
_nodeType = None
# create our function set and stash which node we're looking at
def __init__(self, node=None):
self.__node = node
# check this node is still valid
def nodeValid(self):
if not self.__node:
raise "FnParameterisedHolder does not have a node to operate on."
try:
p = self.__node.path()
return True
except hou.ObjectWasDeleted:
return False
# return the node we're currently wrapping
def node(self):
return self.__node if self.nodeValid() else None
@staticmethod
# nodeType: type of node to create (str)
# name: desired node name (str)
# className: class path to op stub (str)
# version: op version, or None for latest (int)
# envVarName: environment variable to use as a search path for ops (str)
# parent: parent node, or None to create a new /obj geo. Ignored if contextArgs is used in UI mode (hou.Node)
# contextArgs: args related to the creation context, as would come from UI menu interactions (dict)
# If empty or not in UI mode, will create a top level OBJ to house the new holder
def _doCreate( nodeType, name, className, version=None, envVarName=None, parent=None, contextArgs={} ) :
if hou.isUIAvailable() and contextArgs.get( "toolname", "" ) :
holder = toolutils.genericTool( contextArgs, nodeType, nodename = name )
else :
parent = parent if parent else hou.node( "/obj" ).createNode( "geo", node_name=name, run_init_scripts=False )
holder = parent.createNode( nodeType, node_name=name )
IECoreHoudini.FnParameterisedHolder( holder ).setParameterised( className, version, envVarName )
if contextArgs.get( "shiftclick", False ) :
converter = holder.parent().createNode( "ieCortexConverter", node_name = holder.name()+"Converter" )
outputNode = hou.node( contextArgs.get( "outputnodename", "" ) )
toolutils.connectInputsAndOutputs( converter, False, holder, outputNode, 0, 0 )
x, y = holder.position()
converter.setPosition( [x,y-1] )
return holder
# do we have a valid parameterised instance?
def hasParameterised( self ) :
return IECoreHoudini._IECoreHoudini._FnParameterisedHolder( self.node() ).hasParameterised() if self.nodeValid() else False
# this sets a parameterised object on our node and then updates the parameters
def setParameterised( self, classNameOrParameterised, classVersion=None, envVarName=None, updateGui=True ) :
if not self.nodeValid() :
return
if isinstance( classNameOrParameterised, str ) :
if classVersion is None or classVersion < 0 :
classVersions = IECore.ClassLoader.defaultLoader( envVarName ).versions( classNameOrParameterised )
classVersion = classVersions[-1] if classVersions else 0
IECoreHoudini._IECoreHoudini._FnParameterisedHolder( self.node() ).setParameterised( classNameOrParameterised, classVersion, envVarName )
else :
IECoreHoudini._IECoreHoudini._FnParameterisedHolder( self.node() ).setParameterised( classNameOrParameterised )
parameterised = self.getParameterised()
if updateGui and parameterised :
self.updateParameters( parameterised )
# this returns the parameterised object our node is working with
def getParameterised( self ) :
return IECoreHoudini._IECoreHoudini._FnParameterisedHolder( self.node() ).getParameterised() if self.hasParameterised() else None
def setParameterisedValues( self, time = None ) :
time = hou.time() if time is None else time
IECoreHoudini._IECoreHoudini._FnParameterisedHolder( self.node() ).setParameterisedValues( time )
# get our list of class names based on matchString
def classNames( self ) :
if not self.nodeValid() :
return []
matchString = self.__node.parm( "__classMatchString" ).eval()
searchPathEnvVar = self.__node.parm( "__classSearchPathEnvVar" ).eval()
return IECore.ClassLoader.defaultLoader( searchPathEnvVar ).classNames( matchString )
# takes a snapshot of the parameter values & expressions on our node so
# that if we change the procedural/op we can restore the parameters afterwards.
def cacheParameters(self):
cached_parameters = {}
for p in self.__node.parmTuplesInFolder(['Parameters']):
if p.isSpare():
data = {}
data['value'] = p.eval()
expressions = []
for i in range(len(p)):
try:
expr = p[i].expression()
lang = p[i].expressionLanguage()
expressions.append( ( expr, lang ) )
except:
expressions.append( ( None, None ) )
data['expressions'] = expressions
cached_parameters[p.name()] = data
return cached_parameters
# resores parameter values/expressions from those cached by cacheParameters
def restoreCachedParameters(self, cached):
for p in self.__node.parmTuplesInFolder(['Parameters']):
if p.name() in cached:
cached_data = cached[p.name()]
p.set( cached_data['value'] )
for i in range(len(p)):
if cached_data['expressions'][i][0]:
expr = cached_data['expressions'][i][0]
lang = cached_data['expressions'][i][1]
p[i].setExpression( expr, lang )
# return the spare parameters under the "Parameters" tab
def spareParameters( self, tuples=True ) :
result = []
for p in self.__node.spareParms() :
if "Parameters" in p.containingFolders() :
result.append( p.tuple() if tuples else p )
return result
# this method removes all spare parameters from the "Parameters" folder
def removeParameters( self ) :
if not self.nodeValid() :
return
spareParms = self.spareParameters()
while spareParms :
self.__node.removeSpareParmTuple( spareParms[0] )
# this is needed to account for parms removed by a containing folder
spareParms = self.spareParameters()
# add/remove parameters on our node so we correctly reflect our Procedural
def updateParameters( self, parameterised ) :
if not self.nodeValid():
return
# cache parameters & then remove them
cached_parameters = self.cacheParameters()
self.removeParameters()
if not parameterised:
return
# get a list of our parm templates by calling createParm on our top-level CompoundParameter
# and add them as spare parameter
parms = IECoreHoudini.ParmTemplates.createParm( parameterised.parameters(), top_level=True )
parm_names = []
for p in parms:
parm_names.append( p['name'] )
parm = self.__node.addSpareParmTuple( p['tuple'], in_folder=p['folder'], create_missing_folders=True )
parm.set( p['initialValue'] )
# restore our cached parameters
self.restoreCachedParameters( cached_parameters )
# update the nodes parameter evaluation expression
# this creates cook dependencies on the parameters
expr = ""
for p in parm_names:
expr += "if parmTuple('%s'):\n\t%s = evalParmTuple('%s')\n" % ( p, p, p )
expr += "return 1"
if len(parm_names)==0:
expr = "1"
eval_parm = self.__node.parm( "__evaluateParameters" )
eval_parm.lock(False)
eval_parm.setExpression( expr, language=hou.exprLanguage.Python, replace_expression=True )
eval_parm.lock(True)
| bsd-3-clause |
mateor/pants | tests/python/pants_test/backend/jvm/tasks/test_check_published_deps.py | 10 | 3985 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.backend.jvm.artifact import Artifact
from pants.backend.jvm.repository import Repository
from pants.backend.jvm.scala_artifact import ScalaArtifact
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_jar_dependency import ScalaJarDependency
from pants.backend.jvm.tasks.check_published_deps import CheckPublishedDeps
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class CheckPublishedDepsTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'target': Target,
'jar_library': JarLibrary,
'java_library': JavaLibrary,
},
objects={
'artifact': Artifact,
'jar': JarDependency,
'scala_artifact': ScalaArtifact,
'scala_jar': ScalaJarDependency,
'repo': Repository(name='repo',
url='http://www.www.com',
push_db_basedir=os.path.join(self.build_root, 'repo')),
}
)
@classmethod
def task_type(cls):
return CheckPublishedDeps
def assert_console_output(self, *args, **kwargs):
# Ensure that JarPublish's repos option is set, as CheckPublishedDeps consults it.
self.set_options_for_scope('publish.jar', repos={})
return super(CheckPublishedDepsTest, self).assert_console_output(*args, **kwargs)
def setUp(self):
super(CheckPublishedDepsTest, self).setUp()
self.create_file('repo/org.name/lib1/publish.properties', dedent("""
revision.major.org.name%lib1=2
revision.minor.org.name%lib1=0
revision.patch.org.name%lib1=0
revision.sha.org.name%lib1=12345
"""))
self.create_file('repo/org.name/lib2/publish.properties', dedent("""
revision.major.org.name%lib2=2
revision.minor.org.name%lib2=0
revision.patch.org.name%lib2=0
revision.sha.org.name%lib2=12345
"""))
self.add_to_build_file('provider/BUILD', dedent("""
java_library(name='lib1',
provides=artifact(
org='org.name',
name='lib1',
repo=repo),
sources=[])
java_library(name='lib2',
provides=artifact(
org='org.name',
name='lib2',
repo=repo),
sources=[])
"""))
self.add_to_build_file('outdated/BUILD', dedent("""
jar_library(name='outdated',
jars=[jar(org='org.name', name='lib1', rev='1.0.0')]
)
"""))
self.add_to_build_file('uptodate/BUILD', dedent("""
jar_library(name='uptodate',
jars=[jar(org='org.name', name='lib2', rev='2.0.0')]
)
"""))
self.add_to_build_file('both/BUILD', dedent("""
target(name='both',
dependencies=[
'outdated',
'uptodate',
]
)
"""))
def test_all_up_to_date(self):
self.assert_console_output(
targets=[self.target('uptodate')]
)
def test_print_up_to_date_and_outdated(self):
self.assert_console_output(
'outdated org.name#lib1 1.0.0 latest 2.0.0',
'up-to-date org.name#lib2 2.0.0',
targets=[self.target('both')],
options={'print_uptodate': True}
)
def test_outdated(self):
self.assert_console_output(
'outdated org.name#lib1 1.0.0 latest 2.0.0',
targets=[self.target('outdated')]
)
| apache-2.0 |
reachedu14/traininginstitute | coursebuilder/tests/suite.py | 4 | 10911 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Course Builder test suite.
This script runs all functional and units test in the Course Builder project.
Here is how to use the script:
- download WebTest Python package from a URL below and put
the files in a folder of your choice, for example: tmp/webtest:
http://pypi.python.org/packages/source/W/WebTest/WebTest-1.4.2.zip
- update your Python path:
PYTHONPATH=$PYTHONPATH:/tmp/webtest
- invoke this test suite from the command line:
# Automatically find and run all Python tests in tests/*.
python tests/suite.py
# Run only tests matching shell glob *_functional_test.py in tests/*.
python tests/suite.py --pattern *_functional_test.py
# Run test method baz in unittest.TestCase Bar found in tests/foo.py.
python tests/suite.py --test_class_name tests.foo.Bar.baz
- review the output to make sure there are no errors or warnings
Good luck!
"""
__author__ = 'Sean Lip'
import argparse
import os
import shutil
import signal
import subprocess
import sys
import time
import unittest
import task_queue
import webtest
import appengine_config
from google.appengine.api.search import simple_search_stub
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--pattern', default='*.py',
help='shell pattern for discovering files containing tests', type=str)
_PARSER.add_argument(
'--test_class_name',
help='optional dotted module name of the test(s) to run', type=str)
_PARSER.add_argument(
'--integration_server_start_cmd',
help='script to start an external CB server', type=str)
# Base filesystem location for test data.
TEST_DATA_BASE = '/tmp/experimental/coursebuilder/test-data/'
def empty_environ():
os.environ['AUTH_DOMAIN'] = 'example.com'
os.environ['SERVER_NAME'] = 'localhost'
os.environ['HTTP_HOST'] = 'localhost'
os.environ['SERVER_PORT'] = '8080'
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
os.environ['DEFAULT_VERSION_HOSTNAME'] = (
os.environ['HTTP_HOST'] + ':' + os.environ['SERVER_PORT'])
def iterate_tests(test_suite_or_case):
"""Iterate through all of the test cases in 'test_suite_or_case'."""
try:
suite = iter(test_suite_or_case)
except TypeError:
yield test_suite_or_case
else:
for test in suite:
for subtest in iterate_tests(test):
yield subtest
class TestBase(unittest.TestCase):
"""Base class for all Course Builder tests."""
REQUIRES_INTEGRATION_SERVER = 1
INTEGRATION_SERVER_BASE_URL = 'http://localhost:8081'
def setUp(self):
super(TestBase, self).setUp()
# Map of object -> {symbol_string: original_value}
self._originals = {}
def tearDown(self):
self._unswap_all()
super(TestBase, self).tearDown()
def swap(self, source, symbol, new): # pylint: disable=invalid-name
"""Swaps out source.symbol for a new value.
Allows swapping of members and methods:
myobject.foo = 'original_foo'
self.swap(myobject, 'foo', 'bar')
self.assertEqual('bar', myobject.foo)
myobject.baz() # -> 'original_baz'
self.swap(myobject, 'baz', lambda: 'quux')
self.assertEqual('quux', myobject.bar())
Swaps are automatically undone in tearDown().
Args:
source: object. The source object to swap from.
symbol: string. The name of the symbol to swap.
new: object. The new value to swap in.
"""
if source not in self._originals:
self._originals[source] = {}
if not self._originals[source].get(symbol, None):
self._originals[source][symbol] = getattr(source, symbol)
setattr(source, symbol, new)
# Allow protected method names. pylint: disable=g-bad-name
def _unswap_all(self):
for source, symbol_to_value in self._originals.iteritems():
for symbol, value in symbol_to_value.iteritems():
setattr(source, symbol, value)
def shortDescription(self):
"""Additional information logged during unittest invocation."""
# Suppress default logging of docstrings. Instead log name/status only.
return None
class FunctionalTestBase(TestBase):
"""Base class for functional tests."""
def setUp(self):
super(FunctionalTestBase, self).setUp()
# e.g. TEST_DATA_BASE/tests/functional/tests/MyTestCase.
self.test_tempdir = os.path.join(
TEST_DATA_BASE, self.__class__.__module__.replace('.', os.sep),
self.__class__.__name__)
self.reset_filesystem()
def tearDown(self):
self.reset_filesystem(remove_only=True)
super(FunctionalTestBase, self).tearDown()
def reset_filesystem(self, remove_only=False):
if os.path.exists(self.test_tempdir):
shutil.rmtree(self.test_tempdir)
if not remove_only:
os.makedirs(self.test_tempdir)
class AppEngineTestBase(FunctionalTestBase):
"""Base class for tests that require App Engine services."""
def getApp(self): # pylint: disable=g-bad-namer
"""Returns the main application to be tested."""
raise Exception('Not implemented.')
def setUp(self): # pylint: disable=g-bad-name
super(AppEngineTestBase, self).setUp()
empty_environ()
# setup an app to be tested
self.testapp = webtest.TestApp(self.getApp())
self.testbed = testbed.Testbed()
self.testbed.activate()
# configure datastore policy to emulate instantaneously and globally
# consistent HRD; we also patch dev_appserver in main.py to run under
# the same policy
policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(
probability=1)
# declare any relevant App Engine service stubs here
self.testbed.init_user_stub()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
self.testbed.init_taskqueue_stub()
self.taskq = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
self.testbed.init_urlfetch_stub()
self.testbed.init_files_stub()
self.testbed.init_blobstore_stub()
# TODO(emichael): Fix this when an official stub is created
self.testbed._register_stub(
'search', simple_search_stub.SearchServiceStub())
self.task_dispatcher = task_queue.TaskQueueHandlerDispatcher(
self.testapp, self.taskq)
def tearDown(self): # pylint: disable=g-bad-name
self.testbed.deactivate()
super(AppEngineTestBase, self).tearDown()
def execute_all_deferred_tasks(self, queue_name='default'):
"""Executes all pending deferred tasks."""
# Outer loop here because some tasks (esp. map/reduce) will enqueue
# more tasks as part of their operation.
while True:
tasks = self.taskq.GetTasks(queue_name)
if not tasks:
break
for task in tasks:
self.task_dispatcher.dispatch_task(task)
def create_test_suite(parsed_args):
"""Loads all requested test suites.
By default, loads all unittest.TestCases found under the project root's
tests/ directory.
Args:
parsed_args: argparse.Namespace. Processed command-line arguments.
Returns:
unittest.TestSuite. The test suite populated with all tests to run.
"""
loader = unittest.TestLoader()
if parsed_args.test_class_name:
return loader.loadTestsFromName(parsed_args.test_class_name)
else:
return loader.discover(
os.path.dirname(__file__), pattern=parsed_args.pattern)
def start_integration_server(integration_server_start_cmd):
print 'Starting external server: %s' % integration_server_start_cmd
server = subprocess.Popen(integration_server_start_cmd)
time.sleep(3) # Wait for server to start up
return server
def stop_integration_server(server):
server.kill() # dev_appserver.py itself.
# The new dev appserver starts a _python_runtime.py process that isn't
# captured by start_integration_server and so doesn't get killed. Until it's
# done, our tests will never complete so we kill it manually.
pid = int(subprocess.Popen(
['pgrep', '-f', '_python_runtime.py'], stdout=subprocess.PIPE
).communicate()[0][:-1])
os.kill(pid, signal.SIGKILL)
def fix_sys_path():
"""Fix the sys.path to include GAE extra paths."""
import dev_appserver # pylint: disable=C6204
# dev_appserver.fix_sys_path() prepends GAE paths to sys.path and hides
# our classes like 'tests' behind other modules that have 'tests'.
# Here, unlike dev_appserver, we append the path instead of prepending it,
# so that our classes come first.
sys.path += dev_appserver.EXTRA_PATHS[:]
def main():
"""Starts in-process server and runs all test cases in this module."""
fix_sys_path()
parsed_args = _PARSER.parse_args()
test_suite = create_test_suite(parsed_args)
all_tags = set()
for test in iterate_tests(test_suite):
if hasattr(test, 'TAGS'):
all_tags.update(test.TAGS)
server = None
if TestBase.REQUIRES_INTEGRATION_SERVER in all_tags:
server = start_integration_server(
parsed_args.integration_server_start_cmd)
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
if server:
stop_integration_server(server)
if result.errors or result.failures:
raise Exception(
'Test suite failed: %s errors, %s failures of '
' %s tests run.' % (
len(result.errors), len(result.failures), result.testsRun))
import tests.functional.actions as actions # pylint: disable=g-import-not-at-top
count = len(actions.UNIQUE_URLS_FOUND.keys())
result.stream.writeln('INFO: Unique URLs found: %s' % count)
result.stream.writeln('INFO: All %s tests PASSED!' % result.testsRun)
if __name__ == '__main__':
appengine_config.gcb_force_default_encoding('ascii')
main()
| apache-2.0 |
HiSPARC/station-software | user/python/Lib/site-packages/win32/Demos/service/serviceEvents.py | 40 | 4127 | # A Demo of a service that takes advantage of the additional notifications
# available in later Windows versions.
# Note that all output is written as event log entries - so you must install
# and start the service, then look at the event log for messages as events
# are generated.
# Events are generated for USB device insertion and removal, power state
# changes and hardware profile events - so try putting your computer to
# sleep and waking it, inserting a memory stick, etc then check the event log
import win32serviceutil, win32service
import win32event
import servicemanager
# Most event notification support lives around win32gui
import win32gui, win32gui_struct, win32con
GUID_DEVINTERFACE_USB_DEVICE = "{A5DCBF10-6530-11D2-901F-00C04FB951ED}"
class EventDemoService(win32serviceutil.ServiceFramework):
_svc_name_ = "PyServiceEventDemo"
_svc_display_name_ = "Python Service Event Demo"
_svc_description_ = "Demonstrates a Python service which takes advantage of the extra notifications"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
# register for a device notification - we pass our service handle
# instead of a window handle.
filter = win32gui_struct.PackDEV_BROADCAST_DEVICEINTERFACE(
GUID_DEVINTERFACE_USB_DEVICE)
self.hdn = win32gui.RegisterDeviceNotification(self.ssh, filter,
win32con.DEVICE_NOTIFY_SERVICE_HANDLE)
# Override the base class so we can accept additional events.
def GetAcceptedControls(self):
# say we accept them all.
rc = win32serviceutil.ServiceFramework.GetAcceptedControls(self)
rc |= win32service.SERVICE_ACCEPT_PARAMCHANGE \
| win32service.SERVICE_ACCEPT_NETBINDCHANGE \
| win32service.SERVICE_CONTROL_DEVICEEVENT \
| win32service.SERVICE_ACCEPT_HARDWAREPROFILECHANGE \
| win32service.SERVICE_ACCEPT_POWEREVENT \
| win32service.SERVICE_ACCEPT_SESSIONCHANGE
return rc
# All extra events are sent via SvcOtherEx (SvcOther remains as a
# function taking only the first args for backwards compat)
def SvcOtherEx(self, control, event_type, data):
# This is only showing a few of the extra events - see the MSDN
# docs for "HandlerEx callback" for more info.
if control == win32service.SERVICE_CONTROL_DEVICEEVENT:
info = win32gui_struct.UnpackDEV_BROADCAST(data)
msg = "A device event occurred: %x - %s" % (event_type, info)
elif control == win32service.SERVICE_CONTROL_HARDWAREPROFILECHANGE:
msg = "A hardware profile changed: type=%s, data=%s" % (event_type, data)
elif control == win32service.SERVICE_CONTROL_POWEREVENT:
msg = "A power event: setting %s" % data
elif control == win32service.SERVICE_CONTROL_SESSIONCHANGE:
# data is a single elt tuple, but this could potentially grow
# in the future if the win32 struct does
msg = "Session event: type=%s, data=%s" % (event_type, data)
else:
msg = "Other event: code=%d, type=%s, data=%s" \
% (control, event_type, data)
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
0xF000, # generic message
(msg, '')
)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
# do nothing at all - just wait to be stopped
win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE)
# Write a stop message.
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STOPPED,
(self._svc_name_, '')
)
if __name__=='__main__':
win32serviceutil.HandleCommandLine(EventDemoService)
| gpl-3.0 |
jpshort/odoo | addons/pad/py_etherpad/__init__.py | 505 | 7804 | """Module to talk to EtherpadLite API."""
import json
import urllib
import urllib2
class EtherpadLiteClient:
"""Client to talk to EtherpadLite API."""
API_VERSION = 1 # TODO probably 1.1 sometime soon
CODE_OK = 0
CODE_INVALID_PARAMETERS = 1
CODE_INTERNAL_ERROR = 2
CODE_INVALID_FUNCTION = 3
CODE_INVALID_API_KEY = 4
TIMEOUT = 20
apiKey = ""
baseUrl = "http://localhost:9001/api"
def __init__(self, apiKey=None, baseUrl=None):
if apiKey:
self.apiKey = apiKey
if baseUrl:
self.baseUrl = baseUrl
def call(self, function, arguments=None):
"""Create a dictionary of all parameters"""
url = '%s/%d/%s' % (self.baseUrl, self.API_VERSION, function)
params = arguments or {}
params.update({'apikey': self.apiKey})
data = urllib.urlencode(params, True)
try:
opener = urllib2.build_opener()
request = urllib2.Request(url=url, data=data)
response = opener.open(request, timeout=self.TIMEOUT)
result = response.read()
response.close()
except urllib2.HTTPError:
raise
result = json.loads(result)
if result is None:
raise ValueError("JSON response could not be decoded")
return self.handleResult(result)
def handleResult(self, result):
"""Handle API call result"""
if 'code' not in result:
raise Exception("API response has no code")
if 'message' not in result:
raise Exception("API response has no message")
if 'data' not in result:
result['data'] = None
if result['code'] == self.CODE_OK:
return result['data']
elif result['code'] == self.CODE_INVALID_PARAMETERS or result['code'] == self.CODE_INVALID_API_KEY:
raise ValueError(result['message'])
elif result['code'] == self.CODE_INTERNAL_ERROR:
raise Exception(result['message'])
elif result['code'] == self.CODE_INVALID_FUNCTION:
raise Exception(result['message'])
else:
raise Exception("An unexpected error occurred whilst handling the response")
# GROUPS
# Pads can belong to a group. There will always be public pads that do not belong to a group (or we give this group the id 0)
def createGroup(self):
"""creates a new group"""
return self.call("createGroup")
def createGroupIfNotExistsFor(self, groupMapper):
"""this functions helps you to map your application group ids to etherpad lite group ids"""
return self.call("createGroupIfNotExistsFor", {
"groupMapper": groupMapper
})
def deleteGroup(self, groupID):
"""deletes a group"""
return self.call("deleteGroup", {
"groupID": groupID
})
def listPads(self, groupID):
"""returns all pads of this group"""
return self.call("listPads", {
"groupID": groupID
})
def createGroupPad(self, groupID, padName, text=''):
"""creates a new pad in this group"""
params = {
"groupID": groupID,
"padName": padName,
}
if text:
params['text'] = text
return self.call("createGroupPad", params)
# AUTHORS
# Theses authors are bind to the attributes the users choose (color and name).
def createAuthor(self, name=''):
"""creates a new author"""
params = {}
if name:
params['name'] = name
return self.call("createAuthor", params)
def createAuthorIfNotExistsFor(self, authorMapper, name=''):
"""this functions helps you to map your application author ids to etherpad lite author ids"""
params = {
'authorMapper': authorMapper
}
if name:
params['name'] = name
return self.call("createAuthorIfNotExistsFor", params)
# SESSIONS
# Sessions can be created between a group and a author. This allows
# an author to access more than one group. The sessionID will be set as
# a cookie to the client and is valid until a certain date.
def createSession(self, groupID, authorID, validUntil):
"""creates a new session"""
return self.call("createSession", {
"groupID": groupID,
"authorID": authorID,
"validUntil": validUntil
})
def deleteSession(self, sessionID):
"""deletes a session"""
return self.call("deleteSession", {
"sessionID": sessionID
})
def getSessionInfo(self, sessionID):
"""returns informations about a session"""
return self.call("getSessionInfo", {
"sessionID": sessionID
})
def listSessionsOfGroup(self, groupID):
"""returns all sessions of a group"""
return self.call("listSessionsOfGroup", {
"groupID": groupID
})
def listSessionsOfAuthor(self, authorID):
"""returns all sessions of an author"""
return self.call("listSessionsOfAuthor", {
"authorID": authorID
})
# PAD CONTENT
# Pad content can be updated and retrieved through the API
def getText(self, padID, rev=None):
"""returns the text of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getText", params)
# introduced with pull request merge
def getHtml(self, padID, rev=None):
"""returns the html of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getHTML", params)
def setText(self, padID, text):
"""sets the text of a pad"""
return self.call("setText", {
"padID": padID,
"text": text
})
def setHtml(self, padID, html):
"""sets the text of a pad from html"""
return self.call("setHTML", {
"padID": padID,
"html": html
})
# PAD
# Group pads are normal pads, but with the name schema
# GROUPID$PADNAME. A security manager controls access of them and its
# forbidden for normal pads to include a in the name.
def createPad(self, padID, text=''):
"""creates a new pad"""
params = {
"padID": padID,
}
if text:
params['text'] = text
return self.call("createPad", params)
def getRevisionsCount(self, padID):
"""returns the number of revisions of this pad"""
return self.call("getRevisionsCount", {
"padID": padID
})
def deletePad(self, padID):
"""deletes a pad"""
return self.call("deletePad", {
"padID": padID
})
def getReadOnlyID(self, padID):
"""returns the read only link of a pad"""
return self.call("getReadOnlyID", {
"padID": padID
})
def setPublicStatus(self, padID, publicStatus):
"""sets a boolean for the public status of a pad"""
return self.call("setPublicStatus", {
"padID": padID,
"publicStatus": publicStatus
})
def getPublicStatus(self, padID):
"""return true of false"""
return self.call("getPublicStatus", {
"padID": padID
})
def setPassword(self, padID, password):
"""returns ok or a error message"""
return self.call("setPassword", {
"padID": padID,
"password": password
})
def isPasswordProtected(self, padID):
"""returns true or false"""
return self.call("isPasswordProtected", {
"padID": padID
})
| agpl-3.0 |
martynovp/edx-platform | lms/djangoapps/shoppingcart/migrations/0023_auto__add_field_coupon_expiration_date.py | 110 | 18437 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Coupon.expiration_date'
db.add_column('shoppingcart_coupon', 'expiration_date',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Coupon.expiration_date'
db.delete_column('shoppingcart_coupon', 'expiration_date')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 6, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregcodeitem': {
'Meta': {'object_name': 'CourseRegCodeItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.courseregcodeitemannotation': {
'Meta': {'object_name': 'CourseRegCodeItemAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 6, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_order'", 'null': 'True', 'to': "orm['shoppingcart.Order']"})
},
'shoppingcart.donation': {
'Meta': {'object_name': 'Donation', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'general'", 'max_length': '32'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.donationconfiguration': {
'Meta': {'object_name': 'DonationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.invoice': {
'Meta': {'object_name': 'Invoice'},
'address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'address_line_3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'total_amount': ('django.db.models.fields.FloatField', [], {}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_type': ('django.db.models.fields.CharField', [], {'default': "'personal'", 'max_length': '32'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']", 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.registrationcoderedemption': {
'Meta': {'object_name': 'RegistrationCodeRedemption'},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']", 'null': 'True'}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 6, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart'] | agpl-3.0 |
synicalsyntax/zulip | zerver/webhooks/statuspage/view.py | 4 | 2145 | # Webhooks for external integrations.
from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import REQ, api_key_only_webhook_view, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
INCIDENT_TEMPLATE = """
**{name}**:
* State: **{state}**
* Description: {content}
""".strip()
COMPONENT_TEMPLATE = "**{name}** has changed status from **{old_status}** to **{new_status}**."
TOPIC_TEMPLATE = '{name}: {description}'
def get_incident_events_body(payload: Dict[str, Any]) -> str:
return INCIDENT_TEMPLATE.format(
name = payload["incident"]["name"],
state = payload["incident"]["status"],
content = payload["incident"]["incident_updates"][0]["body"],
)
def get_components_update_body(payload: Dict[str, Any]) -> str:
return COMPONENT_TEMPLATE.format(
name = payload["component"]["name"],
old_status = payload["component_update"]["old_status"],
new_status = payload["component_update"]["new_status"],
)
def get_incident_topic(payload: Dict[str, Any]) -> str:
return TOPIC_TEMPLATE.format(
name = payload["incident"]["name"],
description = payload["page"]["status_description"],
)
def get_component_topic(payload: Dict[str, Any]) -> str:
return TOPIC_TEMPLATE.format(
name = payload["component"]["name"],
description = payload["page"]["status_description"],
)
@api_key_only_webhook_view('Statuspage')
@has_request_variables
def api_statuspage_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
status = payload["page"]["status_indicator"]
if status == "none":
topic = get_incident_topic(payload)
body = get_incident_events_body(payload)
else:
topic = get_component_topic(payload)
body = get_components_update_body(payload)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
| apache-2.0 |
ntts-clo/mld-ryu | ryu/tests/mininet/l3/ip_ttl/test_ip_ttl.py | 63 | 2976 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import struct
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import dpset
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ether
from ryu.lib.mac import haddr_to_str
LOG = logging.getLogger(__name__)
class RunTestMininet(app_manager.RyuApp):
_CONTEXTS = {'dpset': dpset.DPSet}
OFP_VERSIONS = [ofproto_v1_2.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(RunTestMininet, self).__init__(*args, **kwargs)
def _add_flow(self, dp, match, actions):
inst = [dp.ofproto_parser.OFPInstructionActions(
dp.ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie=0, cookie_mask=0, table_id=0,
command=dp.ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0xff, buffer_id=0xffffffff,
out_port=dp.ofproto.OFPP_ANY, out_group=dp.ofproto.OFPG_ANY,
flags=0, match=match, instructions=inst)
dp.send_msg(mod)
def _define_flow(self, dp):
in_port = 1
out_port = 2
eth_IP = ether.ETH_TYPE_IP
# ICMP -> DecNwTtl
LOG.debug("--- add_flow DecNwTtl")
match = dp.ofproto_parser.OFPMatch()
match.set_in_port(in_port)
match.set_dl_type(eth_IP)
actions = [dp.ofproto_parser.OFPActionDecNwTtl(),
dp.ofproto_parser.OFPActionOutput(out_port, 0)]
self._add_flow(dp, match, actions)
@set_ev_cls(dpset.EventDP, dpset.DPSET_EV_DISPATCHER)
def handler_datapath(self, ev):
if ev.enter:
self._define_flow(ev.dp)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
dst, src, eth_type = struct.unpack_from('!6s6sH', buffer(msg.data), 0)
in_port = msg.match.fields[0].value
LOG.info("----------------------------------------")
LOG.info("* PacketIn")
LOG.info("in_port=%d, eth_type: %s", in_port, hex(eth_type))
LOG.info("packet reason=%d buffer_id=%d", msg.reason, msg.buffer_id)
LOG.info("packet in datapath_id=%s src=%s dst=%s",
msg.datapath.id, haddr_to_str(src), haddr_to_str(dst))
| apache-2.0 |
kybriainfotech/iSocioCRM | addons/website_forum/models/res_users.py | 281 | 5198 | # -*- coding: utf-8 -*-
from datetime import datetime
from urllib import urlencode
import hashlib
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
class Users(osv.Model):
_inherit = 'res.users'
def __init__(self, pool, cr):
init_res = super(Users, self).__init__(pool, cr)
self.SELF_WRITEABLE_FIELDS = list(
set(
self.SELF_WRITEABLE_FIELDS +
['country_id', 'city', 'website', 'website_description', 'website_published']))
return init_res
def _get_user_badge_level(self, cr, uid, ids, name, args, context=None):
"""Return total badge per level of users"""
result = dict.fromkeys(ids, False)
badge_user_obj = self.pool['gamification.badge.user']
for id in ids:
result[id] = {
'gold_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'gold'), ('user_id', '=', id)], context=context, count=True),
'silver_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'silver'), ('user_id', '=', id)], context=context, count=True),
'bronze_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'bronze'), ('user_id', '=', id)], context=context, count=True),
}
return result
_columns = {
'create_date': fields.datetime('Create Date', select=True, readonly=True),
'karma': fields.integer('Karma'),
'badge_ids': fields.one2many('gamification.badge.user', 'user_id', 'Badges'),
'gold_badge': fields.function(_get_user_badge_level, string="Number of gold badges", type='integer', multi='badge_level'),
'silver_badge': fields.function(_get_user_badge_level, string="Number of silver badges", type='integer', multi='badge_level'),
'bronze_badge': fields.function(_get_user_badge_level, string="Number of bronze badges", type='integer', multi='badge_level'),
}
_defaults = {
'karma': 0,
}
def _generate_forum_token(self, cr, uid, user_id, email):
"""Return a token for email validation. This token is valid for the day
and is a hash based on a (secret) uuid generated by the forum module,
the user_id, the email and currently the day (to be updated if necessary). """
forum_uuid = self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'website_forum.uuid')
return hashlib.sha256('%s-%s-%s-%s' % (
datetime.now().replace(hour=0, minute=0, second=0, microsecond=0),
forum_uuid,
user_id,
email)).hexdigest()
def send_forum_validation_email(self, cr, uid, user_id, forum_id=None, context=None):
user = self.pool['res.users'].browse(cr, uid, user_id, context=context)
token = self._generate_forum_token(cr, uid, user_id, user.email)
activation_template_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'website_forum.validation_email')
if activation_template_id:
params = {
'token': token,
'id': user_id,
'email': user.email}
if forum_id:
params['forum_id'] = forum_id
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
token_url = base_url + '/forum/validate_email?%s' % urlencode(params)
tpl_ctx = dict(context, token_url=token_url)
self.pool['email.template'].send_mail(cr, SUPERUSER_ID, activation_template_id, user_id, force_send=True, context=tpl_ctx)
return True
def process_forum_validation_token(self, cr, uid, token, user_id, email, forum_id=None, context=None):
validation_token = self.pool['res.users']._generate_forum_token(cr, uid, user_id, email)
user = self.pool['res.users'].browse(cr, SUPERUSER_ID, user_id, context=context)
if token == validation_token and user.karma == 0:
karma = 3
if not forum_id:
forum_ids = self.pool['forum.forum'].search(cr, uid, [], limit=1, context=context)
if forum_ids:
forum_id = forum_ids[0]
if forum_id:
forum = self.pool['forum.forum'].browse(cr, uid, forum_id, context=context)
# karma gained: karma to ask a question and have 2 downvotes
karma = forum.karma_ask + (-2 * forum.karma_gen_question_downvote)
return user.write({'karma': karma})
return False
def add_karma(self, cr, uid, ids, karma, context=None):
for user in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [user.id], {'karma': user.karma + karma}, context=context)
return True
def get_serialised_gamification_summary(self, cr, uid, excluded_categories=None, context=None):
if isinstance(excluded_categories, list):
if 'forum' not in excluded_categories:
excluded_categories.append('forum')
else:
excluded_categories = ['forum']
return super(Users, self).get_serialised_gamification_summary(cr, uid, excluded_categories=excluded_categories, context=context)
| agpl-3.0 |
tekapo/fabric | tests/test_network.py | 3 | 24848 | from __future__ import with_statement
from datetime import datetime
import copy
import getpass
import sys
from nose.tools import with_setup, ok_, raises
from fudge import (Fake, clear_calls, clear_expectations, patch_object, verify,
with_patched_object, patched_context, with_fakes)
from fabric.context_managers import settings, hide, show
from fabric.network import (HostConnectionCache, join_host_strings, normalize,
denormalize, key_filenames, ssh)
from fabric.io import output_loop
import fabric.network # So I can call patch_object correctly. Sigh.
from fabric.state import env, output, _get_system_username
from fabric.operations import run, sudo, prompt
from fabric.exceptions import NetworkError
from fabric.tasks import execute
from fabric.api import parallel
from fabric import utils # for patching
from utils import *
from server import (server, PORT, RESPONSES, PASSWORDS, CLIENT_PRIVKEY, USER,
CLIENT_PRIVKEY_PASSPHRASE)
#
# Subroutines, e.g. host string normalization
#
class TestNetwork(FabricTest):
def test_host_string_normalization(self):
username = _get_system_username()
for description, input, output_ in (
("Sanity check: equal strings remain equal",
'localhost', 'localhost'),
("Empty username is same as get_system_username",
'localhost', username + '@localhost'),
("Empty port is same as port 22",
'localhost', 'localhost:22'),
("Both username and port tested at once, for kicks",
'localhost', username + '@localhost:22'),
):
eq_.description = "Host-string normalization: %s" % description
yield eq_, normalize(input), normalize(output_)
del eq_.description
def test_normalization_for_ipv6(self):
"""
normalize() will accept IPv6 notation and can separate host and port
"""
username = _get_system_username()
for description, input, output_ in (
("Full IPv6 address",
'2001:DB8:0:0:0:0:0:1', (username, '2001:DB8:0:0:0:0:0:1', '22')),
("IPv6 address in short form",
'2001:DB8::1', (username, '2001:DB8::1', '22')),
("IPv6 localhost",
'::1', (username, '::1', '22')),
("Square brackets are required to separate non-standard port from IPv6 address",
'[2001:DB8::1]:1222', (username, '2001:DB8::1', '1222')),
("Username and IPv6 address",
'user@2001:DB8::1', ('user', '2001:DB8::1', '22')),
("Username and IPv6 address with non-standard port",
'user@[2001:DB8::1]:1222', ('user', '2001:DB8::1', '1222')),
):
eq_.description = "Host-string IPv6 normalization: %s" % description
yield eq_, normalize(input), output_
del eq_.description
def test_normalization_without_port(self):
"""
normalize() and join_host_strings() omit port if omit_port given
"""
eq_(
join_host_strings(*normalize('user@localhost', omit_port=True)),
'user@localhost'
)
def test_ipv6_host_strings_join(self):
"""
join_host_strings() should use square brackets only for IPv6 and if port is given
"""
eq_(
join_host_strings('user', '2001:DB8::1'),
'user@2001:DB8::1'
)
eq_(
join_host_strings('user', '2001:DB8::1', '1222'),
'user@[2001:DB8::1]:1222'
)
eq_(
join_host_strings('user', '192.168.0.0', '1222'),
'[email protected]:1222'
)
def test_nonword_character_in_username(self):
"""
normalize() will accept non-word characters in the username part
"""
eq_(
normalize('[email protected]')[0],
'user-with-hyphens'
)
def test_at_symbol_in_username(self):
"""
normalize() should allow '@' in usernames (i.e. last '@' is split char)
"""
parts = normalize('[email protected]@www.example.com')
eq_(parts[0], '[email protected]')
eq_(parts[1], 'www.example.com')
def test_normalization_of_empty_input(self):
empties = ('', '', '')
for description, input in (
("empty string", ''),
("None", None)
):
template = "normalize() returns empty strings for %s input"
eq_.description = template % description
yield eq_, normalize(input), empties
del eq_.description
def test_host_string_denormalization(self):
username = _get_system_username()
for description, string1, string2 in (
("Sanity check: equal strings remain equal",
'localhost', 'localhost'),
("Empty username is same as get_system_username",
'localhost:22', username + '@localhost:22'),
("Empty port is same as port 22",
'user@localhost', 'user@localhost:22'),
("Both username and port",
'localhost', username + '@localhost:22'),
("IPv6 address",
'2001:DB8::1', username + '@[2001:DB8::1]:22'),
):
eq_.description = "Host-string denormalization: %s" % description
yield eq_, denormalize(string1), denormalize(string2)
del eq_.description
#
# Connection caching
#
@staticmethod
@with_fakes
def check_connection_calls(host_strings, num_calls):
# Clear Fudge call stack
# Patch connect() with Fake obj set to expect num_calls calls
patched_connect = patch_object('fabric.network', 'connect',
Fake('connect', expect_call=True).times_called(num_calls)
)
try:
# Make new cache object
cache = HostConnectionCache()
# Connect to all connection strings
for host_string in host_strings:
# Obtain connection from cache, potentially calling connect()
cache[host_string]
finally:
# Restore connect()
patched_connect.restore()
def test_connection_caching(self):
for description, host_strings, num_calls in (
("Two different host names, two connections",
('localhost', 'other-system'), 2),
("Same host twice, one connection",
('localhost', 'localhost'), 1),
("Same host twice, different ports, two connections",
('localhost:22', 'localhost:222'), 2),
("Same host twice, different users, two connections",
('user1@localhost', 'user2@localhost'), 2),
):
TestNetwork.check_connection_calls.description = description
yield TestNetwork.check_connection_calls, host_strings, num_calls
def test_connection_cache_deletion(self):
"""
HostConnectionCache should delete correctly w/ non-full keys
"""
hcc = HostConnectionCache()
fake = Fake('connect', callable=True)
with patched_context('fabric.network', 'connect', fake):
for host_string in ('hostname', 'user@hostname',
'user@hostname:222'):
# Prime
hcc[host_string]
# Test
ok_(host_string in hcc)
# Delete
del hcc[host_string]
# Test
ok_(host_string not in hcc)
#
# Connection loop flow
#
@server()
def test_saved_authentication_returns_client_object(self):
cache = HostConnectionCache()
assert isinstance(cache[env.host_string], ssh.SSHClient)
@server()
@with_fakes
def test_prompts_for_password_without_good_authentication(self):
env.password = None
with password_response(PASSWORDS[env.user], times_called=1):
cache = HostConnectionCache()
cache[env.host_string]
@aborts
def test_aborts_on_prompt_with_abort_on_prompt(self):
"""
abort_on_prompt=True should abort when prompt() is used
"""
env.abort_on_prompts = True
prompt("This will abort")
@server()
@aborts
def test_aborts_on_password_prompt_with_abort_on_prompt(self):
"""
abort_on_prompt=True should abort when password prompts occur
"""
env.password = None
env.abort_on_prompts = True
with password_response(PASSWORDS[env.user], times_called=1):
cache = HostConnectionCache()
cache[env.host_string]
@mock_streams('stdout')
@server()
def test_does_not_abort_with_password_and_host_with_abort_on_prompt(self):
"""
abort_on_prompt=True should not abort if no prompts are needed
"""
env.abort_on_prompts = True
env.password = PASSWORDS[env.user]
# env.host_string is automatically filled in when using server()
run("ls /simple")
@mock_streams('stdout')
@server()
def test_trailing_newline_line_drop(self):
"""
Trailing newlines shouldn't cause last line to be dropped.
"""
# Multiline output with trailing newline
cmd = "ls /"
output_string = RESPONSES[cmd]
# TODO: fix below lines, duplicates inner workings of tested code
prefix = "[%s] out: " % env.host_string
expected = prefix + ('\n' + prefix).join(output_string.split('\n'))
# Create, tie off thread
with settings(show('everything'), hide('running')):
result = run(cmd)
# Test equivalence of expected, received output
eq_(expected, sys.stdout.getvalue())
# Also test that the captured value matches, too.
eq_(output_string, result)
@server()
def test_sudo_prompt_kills_capturing(self):
"""
Sudo prompts shouldn't screw up output capturing
"""
cmd = "ls /simple"
with hide('everything'):
eq_(sudo(cmd), RESPONSES[cmd])
@server()
def test_password_memory_on_user_switch(self):
"""
Switching users mid-session should not screw up password memory
"""
def _to_user(user):
return join_host_strings(user, env.host, env.port)
user1 = 'root'
user2 = USER
with settings(hide('everything'), password=None):
# Connect as user1 (thus populating both the fallback and
# user-specific caches)
with settings(
password_response(PASSWORDS[user1]),
host_string=_to_user(user1)
):
run("ls /simple")
# Connect as user2: * First cxn attempt will use fallback cache,
# which contains user1's password, and thus fail * Second cxn
# attempt will prompt user, and succeed due to mocked p4p * but
# will NOT overwrite fallback cache
with settings(
password_response(PASSWORDS[user2]),
host_string=_to_user(user2)
):
# Just to trigger connection
run("ls /simple")
# * Sudo call should use cached user2 password, NOT fallback cache,
# and thus succeed. (I.e. p_f_p should NOT be called here.)
with settings(
password_response('whatever', times_called=0),
host_string=_to_user(user2)
):
sudo("ls /simple")
@mock_streams('stderr')
@server()
def test_password_prompt_displays_host_string(self):
"""
Password prompt lines should include the user/host in question
"""
env.password = None
env.no_agent = env.no_keys = True
output.everything = False
with password_response(PASSWORDS[env.user], silent=False):
run("ls /simple")
regex = r'^\[%s\] Login password for \'%s\': ' % (env.host_string, env.user)
assert_contains(regex, sys.stderr.getvalue())
@mock_streams('stderr')
@server(pubkeys=True)
def test_passphrase_prompt_displays_host_string(self):
"""
Passphrase prompt lines should include the user/host in question
"""
env.password = None
env.no_agent = env.no_keys = True
env.key_filename = CLIENT_PRIVKEY
output.everything = False
with password_response(CLIENT_PRIVKEY_PASSPHRASE, silent=False):
run("ls /simple")
regex = r'^\[%s\] Login password for \'%s\': ' % (env.host_string, env.user)
assert_contains(regex, sys.stderr.getvalue())
def test_sudo_prompt_display_passthrough(self):
"""
Sudo prompt should display (via passthrough) when stdout/stderr shown
"""
TestNetwork._prompt_display(True)
def test_sudo_prompt_display_directly(self):
"""
Sudo prompt should display (manually) when stdout/stderr hidden
"""
TestNetwork._prompt_display(False)
@staticmethod
@mock_streams('both')
@server(pubkeys=True, responses={'oneliner': 'result'})
def _prompt_display(display_output):
env.password = None
env.no_agent = env.no_keys = True
env.key_filename = CLIENT_PRIVKEY
output.output = display_output
with password_response(
(CLIENT_PRIVKEY_PASSPHRASE, PASSWORDS[env.user]),
silent=False
):
sudo('oneliner')
if display_output:
expected = """
[%(prefix)s] sudo: oneliner
[%(prefix)s] Login password for '%(user)s':
[%(prefix)s] out: sudo password:
[%(prefix)s] out: Sorry, try again.
[%(prefix)s] out: sudo password:
[%(prefix)s] out: result
""" % {'prefix': env.host_string, 'user': env.user}
else:
# Note lack of first sudo prompt (as it's autoresponded to) and of
# course the actual result output.
expected = """
[%(prefix)s] sudo: oneliner
[%(prefix)s] Login password for '%(user)s':
[%(prefix)s] out: Sorry, try again.
[%(prefix)s] out: sudo password: """ % {
'prefix': env.host_string,
'user': env.user
}
eq_(expected[1:], sys.stdall.getvalue())
@mock_streams('both')
@server(
pubkeys=True,
responses={'oneliner': 'result', 'twoliner': 'result1\nresult2'}
)
def test_consecutive_sudos_should_not_have_blank_line(self):
"""
Consecutive sudo() calls should not incur a blank line in-between
"""
env.password = None
env.no_agent = env.no_keys = True
env.key_filename = CLIENT_PRIVKEY
with password_response(
(CLIENT_PRIVKEY_PASSPHRASE, PASSWORDS[USER]),
silent=False
):
sudo('oneliner')
sudo('twoliner')
expected = """
[%(prefix)s] sudo: oneliner
[%(prefix)s] Login password for '%(user)s':
[%(prefix)s] out: sudo password:
[%(prefix)s] out: Sorry, try again.
[%(prefix)s] out: sudo password:
[%(prefix)s] out: result
[%(prefix)s] sudo: twoliner
[%(prefix)s] out: sudo password:
[%(prefix)s] out: result1
[%(prefix)s] out: result2
""" % {'prefix': env.host_string, 'user': env.user}
eq_(sys.stdall.getvalue(), expected[1:])
@mock_streams('both')
@server(pubkeys=True, responses={'silent': '', 'normal': 'foo'})
def test_silent_commands_should_not_have_blank_line(self):
"""
Silent commands should not generate an extra trailing blank line
After the move to interactive I/O, it was noticed that while run/sudo
commands which had non-empty stdout worked normally (consecutive such
commands were totally adjacent), those with no stdout (i.e. silent
commands like ``test`` or ``mkdir``) resulted in spurious blank lines
after the "run:" line. This looks quite ugly in real world scripts.
"""
env.password = None
env.no_agent = env.no_keys = True
env.key_filename = CLIENT_PRIVKEY
with password_response(CLIENT_PRIVKEY_PASSPHRASE, silent=False):
run('normal')
run('silent')
run('normal')
with hide('everything'):
run('normal')
run('silent')
expected = """
[%(prefix)s] run: normal
[%(prefix)s] Login password for '%(user)s':
[%(prefix)s] out: foo
[%(prefix)s] run: silent
[%(prefix)s] run: normal
[%(prefix)s] out: foo
""" % {'prefix': env.host_string, 'user': env.user}
eq_(expected[1:], sys.stdall.getvalue())
@mock_streams('both')
@server(
pubkeys=True,
responses={'oneliner': 'result', 'twoliner': 'result1\nresult2'}
)
def test_io_should_print_prefix_if_ouput_prefix_is_true(self):
"""
run/sudo should print [host_string] if env.output_prefix == True
"""
env.password = None
env.no_agent = env.no_keys = True
env.key_filename = CLIENT_PRIVKEY
with password_response(
(CLIENT_PRIVKEY_PASSPHRASE, PASSWORDS[USER]),
silent=False
):
run('oneliner')
run('twoliner')
expected = """
[%(prefix)s] run: oneliner
[%(prefix)s] Login password for '%(user)s':
[%(prefix)s] out: result
[%(prefix)s] run: twoliner
[%(prefix)s] out: result1
[%(prefix)s] out: result2
""" % {'prefix': env.host_string, 'user': env.user}
eq_(expected[1:], sys.stdall.getvalue())
@mock_streams('both')
@server(
pubkeys=True,
responses={'oneliner': 'result', 'twoliner': 'result1\nresult2'}
)
def test_io_should_not_print_prefix_if_ouput_prefix_is_false(self):
"""
run/sudo shouldn't print [host_string] if env.output_prefix == False
"""
env.password = None
env.no_agent = env.no_keys = True
env.key_filename = CLIENT_PRIVKEY
with password_response(
(CLIENT_PRIVKEY_PASSPHRASE, PASSWORDS[USER]),
silent=False
):
with settings(output_prefix=False):
run('oneliner')
run('twoliner')
expected = """
[%(prefix)s] run: oneliner
[%(prefix)s] Login password for '%(user)s':
result
[%(prefix)s] run: twoliner
result1
result2
""" % {'prefix': env.host_string, 'user': env.user}
eq_(expected[1:], sys.stdall.getvalue())
@server()
def test_env_host_set_when_host_prompt_used(self):
"""
Ensure env.host is set during host prompting
"""
copied_host_string = str(env.host_string)
fake = Fake('raw_input', callable=True).returns(copied_host_string)
env.host_string = None
env.host = None
with settings(hide('everything'), patched_input(fake)):
run("ls /")
# Ensure it did set host_string back to old value
eq_(env.host_string, copied_host_string)
# Ensure env.host is correct
eq_(env.host, normalize(copied_host_string)[1])
def subtask():
run("This should never execute")
class TestConnections(FabricTest):
@aborts
def test_should_abort_when_cannot_connect(self):
"""
By default, connecting to a nonexistent server should abort.
"""
with hide('everything'):
execute(subtask, hosts=['nope.nonexistent.com'])
def test_should_warn_when_skip_bad_hosts_is_True(self):
"""
env.skip_bad_hosts = True => execute() skips current host
"""
with settings(hide('everything'), skip_bad_hosts=True):
execute(subtask, hosts=['nope.nonexistent.com'])
@parallel
def parallel_subtask():
run("This should never execute")
class TestParallelConnections(FabricTest):
@aborts
def test_should_abort_when_cannot_connect(self):
"""
By default, connecting to a nonexistent server should abort.
"""
with hide('everything'):
execute(parallel_subtask, hosts=['nope.nonexistent.com'])
def test_should_warn_when_skip_bad_hosts_is_True(self):
"""
env.skip_bad_hosts = True => execute() skips current host
"""
with settings(hide('everything'), skip_bad_hosts=True):
execute(parallel_subtask, hosts=['nope.nonexistent.com'])
class TestSSHConfig(FabricTest):
def env_setup(self):
super(TestSSHConfig, self).env_setup()
env.use_ssh_config = True
env.ssh_config_path = support("ssh_config")
# Undo the changes FabricTest makes to env for server support
env.user = env.local_user
env.port = env.default_port
def test_global_user_with_default_env(self):
"""
Global User should override default env.user
"""
eq_(normalize("localhost")[0], "satan")
def test_global_user_with_nondefault_env(self):
"""
Global User should NOT override nondefault env.user
"""
with settings(user="foo"):
eq_(normalize("localhost")[0], "foo")
def test_specific_user_with_default_env(self):
"""
Host-specific User should override default env.user
"""
eq_(normalize("myhost")[0], "neighbor")
def test_user_vs_host_string_value(self):
"""
SSH-config derived user should NOT override host-string user value
"""
eq_(normalize("myuser@localhost")[0], "myuser")
eq_(normalize("myuser@myhost")[0], "myuser")
def test_global_port_with_default_env(self):
"""
Global Port should override default env.port
"""
eq_(normalize("localhost")[2], "666")
def test_global_port_with_nondefault_env(self):
"""
Global Port should NOT override nondefault env.port
"""
with settings(port="777"):
eq_(normalize("localhost")[2], "777")
def test_specific_port_with_default_env(self):
"""
Host-specific Port should override default env.port
"""
eq_(normalize("myhost")[2], "664")
def test_port_vs_host_string_value(self):
"""
SSH-config derived port should NOT override host-string port value
"""
eq_(normalize("localhost:123")[2], "123")
eq_(normalize("myhost:123")[2], "123")
def test_hostname_alias(self):
"""
Hostname setting overrides host string's host value
"""
eq_(normalize("localhost")[1], "localhost")
eq_(normalize("myalias")[1], "otherhost")
@with_patched_object(utils, 'warn', Fake('warn', callable=True,
expect_call=True))
def test_warns_with_bad_config_file_path(self):
# use_ssh_config is already set in our env_setup()
with settings(hide('everything'), ssh_config_path="nope_bad_lol"):
normalize('foo')
@server()
def test_real_connection(self):
"""
Test-server connection using ssh_config values
"""
with settings(
hide('everything'),
ssh_config_path=support("testserver_ssh_config"),
host_string='testserver',
):
ok_(run("ls /simple").succeeded)
class TestKeyFilenames(FabricTest):
def test_empty_everything(self):
"""
No env.key_filename and no ssh_config = empty list
"""
with settings(use_ssh_config=False):
with settings(key_filename=""):
eq_(key_filenames(), [])
with settings(key_filename=[]):
eq_(key_filenames(), [])
def test_just_env(self):
"""
Valid env.key_filename and no ssh_config = just env
"""
with settings(use_ssh_config=False):
with settings(key_filename="mykey"):
eq_(key_filenames(), ["mykey"])
with settings(key_filename=["foo", "bar"]):
eq_(key_filenames(), ["foo", "bar"])
def test_just_ssh_config(self):
"""
No env.key_filename + valid ssh_config = ssh value
"""
with settings(use_ssh_config=True, ssh_config_path=support("ssh_config")):
for val in ["", []]:
with settings(key_filename=val):
eq_(key_filenames(), ["foobar.pub"])
def test_both(self):
"""
Both env.key_filename + valid ssh_config = both show up w/ env var first
"""
with settings(use_ssh_config=True, ssh_config_path=support("ssh_config")):
with settings(key_filename="bizbaz.pub"):
eq_(key_filenames(), ["bizbaz.pub", "foobar.pub"])
with settings(key_filename=["bizbaz.pub", "whatever.pub"]):
expected = ["bizbaz.pub", "whatever.pub", "foobar.pub"]
eq_(key_filenames(), expected)
| bsd-2-clause |
Philippe12/external_chromium_org | ppapi/native_client/src/untrusted/pnacl_support_extension/pnacl_component_crx_gen.py | 48 | 13105 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script lays out the PNaCl translator files for a
normal Chrome installer, for one platform. Once run num-of-arches times,
the result can then be packed into a multi-CRX zip file.
This script depends on and pulls in the translator nexes and libraries
from the toolchain directory (so that must be downloaded first) and
it depends on the pnacl_irt_shim.
"""
import json
import logging
import optparse
import os
import platform
import re
import shutil
import sys
J = os.path.join
######################################################################
# Target arch and build arch junk to convert between all the
# silly conventions between SCons, Chrome and PNaCl.
# The version of the arch used by NaCl manifest files.
# This is based on the machine "building" this extension.
# We also used this to identify the arch-specific different versions of
# this extension.
def CanonicalArch(arch):
if arch in ('x86_64', 'x86-64', 'x64', 'amd64'):
return 'x86-64'
# TODO(jvoung): be more specific about the arm architecture version?
if arch in ('arm', 'armv7'):
return 'arm'
if re.match('^i.86$', arch) or arch in ('x86_32', 'x86-32', 'ia32', 'x86'):
return 'x86-32'
return None
def GetBuildArch():
arch = platform.machine()
return CanonicalArch(arch)
BUILD_ARCH = GetBuildArch()
ARCHES = ['x86-32', 'x86-64', 'arm']
def IsValidArch(arch):
return arch in ARCHES
# The version of the arch used by configure and pnacl's build.sh.
def StandardArch(arch):
return {'x86-32': 'i686',
'x86-64': 'x86_64',
'arm' : 'armv7'}[arch]
######################################################################
def GetNaClRoot():
""" Find the native_client path, relative to this script.
This script is in ppapi/... and native_client is a sibling of ppapi.
"""
script_file = os.path.abspath(__file__)
def SearchForNaCl(cur_dir):
if cur_dir.endswith('ppapi'):
parent = os.path.dirname(cur_dir)
sibling = os.path.join(parent, 'native_client')
if not os.path.isdir(sibling):
raise Exception('Could not find native_client relative to %s' %
script_file)
return sibling
# Detect when we've the root (linux is /, but windows is not...)
next_dir = os.path.dirname(cur_dir)
if cur_dir == next_dir:
raise Exception('Could not find native_client relative to %s' %
script_file)
return SearchForNaCl(next_dir)
return SearchForNaCl(script_file)
NACL_ROOT = GetNaClRoot()
######################################################################
# Normalize the platform name to be the way SCons finds chrome binaries.
# This is based on the platform "building" the extension.
def GetBuildPlatform():
if sys.platform == 'darwin':
platform = 'mac'
elif sys.platform.startswith('linux'):
platform = 'linux'
elif sys.platform in ('cygwin', 'win32'):
platform = 'windows'
else:
raise Exception('Unknown platform: %s' % sys.platform)
return platform
BUILD_PLATFORM = GetBuildPlatform()
def DetermineInstallerArches(target_arch):
arch = CanonicalArch(target_arch)
if not IsValidArch(arch):
raise Exception('Unknown target_arch %s' % target_arch)
# On windows, we need x86-32 and x86-64 (assuming non-windows RT).
if BUILD_PLATFORM == 'windows':
if arch.startswith('x86'):
return ['x86-32', 'x86-64']
else:
raise Exception('Unknown target_arch on windows w/ target_arch == %s' %
target_arch)
else:
return [arch]
######################################################################
class PnaclPackaging(object):
package_base = os.path.dirname(__file__)
# File paths that are set from the command line.
pnacl_template = None
tool_revisions = None
# Agreed-upon name for pnacl-specific info.
pnacl_json = 'pnacl.json'
@staticmethod
def SetPnaclInfoTemplatePath(path):
PnaclPackaging.pnacl_template = path
@staticmethod
def SetToolsRevisionPath(path):
PnaclPackaging.tool_revisions = path
@staticmethod
def PnaclToolsRevision():
with open(PnaclPackaging.tool_revisions, 'r') as f:
for line in f.read().splitlines():
if line.startswith('PNACL_VERSION'):
_, version = line.split('=')
# CWS happens to use version quads, so make it a quad too.
# However, each component of the quad is limited to 64K max.
# Try to handle a bit more.
max_version = 2 ** 16
version = int(version)
version_more = version / max_version
version = version % max_version
return '0.1.%d.%d' % (version_more, version)
raise Exception('Cannot find PNACL_VERSION in TOOL_REVISIONS file: %s' %
PnaclPackaging.tool_revisions)
@staticmethod
def GeneratePnaclInfo(target_dir, abi_version, arch):
# A note on versions: pnacl_version is the version of translator built
# by the NaCl repo, while abi_version is bumped when the NaCl sandbox
# actually changes.
pnacl_version = PnaclPackaging.PnaclToolsRevision()
with open(PnaclPackaging.pnacl_template, 'r') as pnacl_template_fd:
pnacl_template = json.load(pnacl_template_fd)
out_name = J(target_dir, UseWhitelistedChars(PnaclPackaging.pnacl_json,
None))
with open(out_name, 'w') as output_fd:
pnacl_template['pnacl-arch'] = arch
pnacl_template['pnacl-version'] = pnacl_version
json.dump(pnacl_template, output_fd, sort_keys=True, indent=4)
######################################################################
class PnaclDirs(object):
toolchain_dir = J(NACL_ROOT, 'toolchain')
output_dir = J(toolchain_dir, 'pnacl-package')
@staticmethod
def TranslatorRoot():
return J(PnaclDirs.toolchain_dir, 'pnacl_translator')
@staticmethod
def LibDir(target_arch):
return J(PnaclDirs.TranslatorRoot(), 'lib-%s' % target_arch)
@staticmethod
def SandboxedCompilerDir(target_arch):
return J(PnaclDirs.toolchain_dir,
'pnacl_translator', StandardArch(target_arch), 'bin')
@staticmethod
def SetOutputDir(d):
PnaclDirs.output_dir = d
@staticmethod
def OutputDir():
return PnaclDirs.output_dir
@staticmethod
def OutputAllDir(version_quad):
return J(PnaclDirs.OutputDir(), version_quad)
@staticmethod
def OutputArchBase(arch):
return '%s' % arch
@staticmethod
def OutputArchDir(arch):
# Nest this in another directory so that the layout will be the same
# as the "all"/universal version.
parent_dir = J(PnaclDirs.OutputDir(), PnaclDirs.OutputArchBase(arch))
return (parent_dir, J(parent_dir, PnaclDirs.OutputArchBase(arch)))
######################################################################
def StepBanner(short_desc, long_desc):
logging.info("**** %s\t%s", short_desc, long_desc)
def Clean():
out_dir = PnaclDirs.OutputDir()
StepBanner('CLEAN', 'Cleaning out old packaging: %s' % out_dir)
if os.path.isdir(out_dir):
shutil.rmtree(out_dir)
else:
logging.info('Clean skipped -- no previous output directory!')
######################################################################
def UseWhitelistedChars(orig_basename, arch):
""" Make the filename match the pattern expected by nacl_file_host.
Currently, this assumes there is prefix "pnacl_public_" and
that the allowed chars are in the set [a-zA-Z0-9_].
"""
if arch:
target_basename = 'pnacl_public_%s_%s' % (arch, orig_basename)
else:
target_basename = 'pnacl_public_%s' % orig_basename
result = re.sub(r'[^a-zA-Z0-9_]', '_', target_basename)
logging.info('UseWhitelistedChars using: %s' % result)
return result
def CopyFlattenDirsAndPrefix(src_dir, arch, dest_dir):
""" Copy files from src_dir to dest_dir.
When copying, also rename the files such that they match the white-listing
pattern in chrome/browser/nacl_host/nacl_file_host.cc.
"""
for (root, dirs, files) in os.walk(src_dir, followlinks=True):
for f in files:
# Assume a flat directory.
assert (f == os.path.basename(f))
full_name = J(root, f)
target_name = UseWhitelistedChars(f, arch)
shutil.copy(full_name, J(dest_dir, target_name))
def BuildArchForInstaller(version_quad, arch, lib_overrides):
""" Build an architecture specific version for the chrome installer.
"""
target_dir = PnaclDirs.OutputDir()
StepBanner('BUILD INSTALLER',
'Packaging for arch %s in %s' % (arch, target_dir))
# Copy llc.nexe and ld.nexe, but with some renaming and directory flattening.
CopyFlattenDirsAndPrefix(PnaclDirs.SandboxedCompilerDir(arch),
arch,
target_dir)
# Copy native libraries, also with renaming and directory flattening.
CopyFlattenDirsAndPrefix(PnaclDirs.LibDir(arch), arch, target_dir)
# Also copy files from the list of overrides.
# This needs the arch tagged onto the name too, like the other files.
if arch in lib_overrides:
for override in lib_overrides[arch]:
override_base = os.path.basename(override)
target_name = UseWhitelistedChars(override_base, arch)
shutil.copy(override, J(target_dir, target_name))
def BuildInstallerStyle(version_quad, lib_overrides, arches):
""" Package the pnacl component for use within the chrome installer
infrastructure. These files need to be named in a special way
so that white-listing of files is easy.
"""
StepBanner("BUILD_ALL", "Packaging installer for version: %s" % version_quad)
for arch in arches:
BuildArchForInstaller(version_quad, arch, lib_overrides)
# Generate pnacl info manifest.
# Hack around the fact that there may be more than one arch, on Windows.
if len(arches) == 1:
arches = arches[0]
PnaclPackaging.GeneratePnaclInfo(PnaclDirs.OutputDir(), version_quad, arches)
######################################################################
def Main():
usage = 'usage: %prog [options] version_arg'
parser = optparse.OptionParser(usage)
# We may want to accept a target directory to dump it in the usual
# output directory (e.g., scons-out).
parser.add_option('-c', '--clean', dest='clean',
action='store_true', default=False,
help='Clean out destination directory first.')
parser.add_option('-d', '--dest', dest='dest',
help='The destination root for laying out the extension')
parser.add_option('-L', '--lib_override',
dest='lib_overrides', action='append', default=[],
help='Specify path to a fresher native library ' +
'that overrides the tarball library with ' +
'(arch:libfile) tuple.')
parser.add_option('-t', '--target_arch',
dest='target_arch', default=None,
help='Only generate the chrome installer version for arch')
parser.add_option('--info_template_path',
dest='info_template_path', default=None,
help='Path of the info template file')
parser.add_option('--tool_revisions_path', dest='tool_revisions_path',
default=None, help='Location of NaCl TOOL_REVISIONS file.')
parser.add_option('-v', '--verbose', dest='verbose', default=False,
action='store_true',
help='Print verbose debug messages.')
(options, args) = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.ERROR)
logging.info('pnacl_component_crx_gen w/ options %s and args %s\n'
% (options, args))
# Set destination directory before doing any cleaning, etc.
if options.dest:
PnaclDirs.SetOutputDir(options.dest)
if options.clean:
Clean()
if options.info_template_path:
PnaclPackaging.SetPnaclInfoTemplatePath(options.info_template_path)
if options.tool_revisions_path:
PnaclPackaging.SetToolsRevisionPath(options.tool_revisions_path)
lib_overrides = {}
for o in options.lib_overrides:
arch, override_lib = o.split(',')
arch = CanonicalArch(arch)
if not IsValidArch(arch):
raise Exception('Unknown arch for -L: %s (from %s)' % (arch, o))
if not os.path.isfile(override_lib):
raise Exception('Override native lib not a file for -L: %s (from %s)' %
(override_lib, o))
override_list = lib_overrides.get(arch, [])
override_list.append(override_lib)
lib_overrides[arch] = override_list
if len(args) != 1:
parser.print_help()
parser.error('Incorrect number of arguments')
abi_version = int(args[0])
arches = DetermineInstallerArches(options.target_arch)
BuildInstallerStyle(abi_version, lib_overrides, arches)
return 0
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause |
beatle/node-gyp | gyp/buildbot/buildbot_run.py | 270 | 8338 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import filecmp
import os
import shutil
import subprocess
import sys
if sys.platform in ['win32', 'cygwin']:
EXE_SUFFIX = '.exe'
else:
EXE_SUFFIX = ''
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
ANDROID_DIR = os.path.join(ROOT_DIR, 'android')
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
with open(os.devnull) as devnull_fd:
retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
_ANDROID_SETUP = 'source build/envsetup.sh && lunch full-eng'
def PrepareAndroidTree():
"""Prepare an Android tree to run 'android' format tests."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber Android checkout@@@'
shutil.rmtree(ANDROID_DIR)
# (Re)create the directory so that the following steps will succeed.
if not os.path.isdir(ANDROID_DIR):
os.mkdir(ANDROID_DIR)
# We use a manifest from the gyp project listing pinned revisions of AOSP to
# use, to ensure that we test against a stable target. This needs to be
# updated to pick up new build system changes sometimes, so we must test if
# it has changed.
manifest_filename = 'aosp_manifest.xml'
gyp_manifest = os.path.join(BUILDBOT_DIR, manifest_filename)
android_manifest = os.path.join(ANDROID_DIR, '.repo', 'manifests',
manifest_filename)
manifest_is_current = (os.path.isfile(android_manifest) and
filecmp.cmp(gyp_manifest, android_manifest))
if not manifest_is_current:
# It's safe to repeat these steps, so just do them again to make sure we are
# in a good state.
print '@@@BUILD_STEP Initialize Android checkout@@@'
CallSubProcess(
['repo', 'init',
'-u', 'https://android.googlesource.com/platform/manifest',
'-b', 'master',
'-g', 'all,-notdefault,-device,-darwin,-mips,-x86'],
cwd=ANDROID_DIR)
shutil.copy(gyp_manifest, android_manifest)
print '@@@BUILD_STEP Sync Android@@@'
CallSubProcess(['repo', 'sync', '-j4', '-m', manifest_filename],
cwd=ANDROID_DIR)
# If we already built the system image successfully and didn't sync to a new
# version of the source, skip running the build again as it's expensive even
# when there's nothing to do.
system_img = os.path.join(ANDROID_DIR, 'out', 'target', 'product', 'generic',
'system.img')
if manifest_is_current and os.path.isfile(system_img):
return
print '@@@BUILD_STEP Build Android@@@'
CallSubProcess(
['/bin/bash',
'-c', '%s && make -j4' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StartAndroidEmulator():
"""Start an android emulator from the built android tree."""
print '@@@BUILD_STEP Start Android emulator@@@'
CallSubProcess(['/bin/bash', '-c',
'%s && adb kill-server ' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
# If taskset is available, use it to force adbd to run only on one core, as,
# sadly, it improves its reliability (see crbug.com/268450).
adbd_wrapper = ''
with open(os.devnull, 'w') as devnull_fd:
if subprocess.call(['which', 'taskset'], stdout=devnull_fd) == 0:
adbd_wrapper = 'taskset -c 0'
CallSubProcess(['/bin/bash', '-c',
'%s && %s adb start-server ' % (_ANDROID_SETUP, adbd_wrapper)],
cwd=ANDROID_DIR)
subprocess.Popen(
['/bin/bash', '-c',
'%s && emulator -no-window' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
CallSubProcess(
['/bin/bash', '-c',
'%s && adb wait-for-device' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StopAndroidEmulator():
"""Stop all android emulators."""
print '@@@BUILD_STEP Stop Android emulator@@@'
# If this fails, it's because there is no emulator running.
subprocess.call(['pkill', 'emulator.*'])
def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'gyp/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'gyp'] + tests)
if format == 'android':
# gyptest needs the environment setup from envsetup/lunch in order to build
# using the 'android' backend, so this is done in a single shell.
retcode = subprocess.call(
['/bin/bash',
'-c', '%s && cd %s && %s' % (_ANDROID_SETUP, ROOT_DIR, command)],
cwd=ANDROID_DIR, env=env)
else:
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
# The Android gyp bot runs on linux so this must be tested first.
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-android':
PrepareAndroidTree()
StartAndroidEmulator()
try:
retcode += GypTestFormat('android')
finally:
StopAndroidEmulator()
elif sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja',
msvs_version='2013',
tests=[
r'test\generator-output\gyptest-actions.py',
r'test\generator-output\gyptest-relocate.py',
r'test\generator-output\gyptest-rules.py'])
retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
| mit |
maciejkula/glove-python | setup.py | 1 | 4688 | import glob
import os
import platform
import subprocess
import sys
from setuptools import Command, Extension, setup, find_packages
from setuptools.command.test import test as TestCommand
def define_extensions(cythonize=False):
compile_args = ['-fopenmp',
'-ffast-math']
# There are problems with illegal ASM instructions
# when using the Anaconda distribution (at least on OSX).
# This could be because Anaconda uses its own assembler?
# To work around this we do not add -march=native if we
# know we're dealing with Anaconda
if 'anaconda' not in sys.version.lower():
compile_args.append('-march=native')
if cythonize:
glove_cython = "glove/glove_cython.pyx"
glove_metrics = "glove/metrics/accuracy_cython.pyx"
glove_corpus = "glove/corpus_cython.pyx"
else:
glove_cython = "glove/glove_cython.c"
glove_metrics = "glove/metrics/accuracy_cython.c"
glove_corpus = "glove/corpus_cython.cpp"
return [Extension("glove.glove_cython", [glove_cython],
extra_link_args=["-fopenmp"],
extra_compile_args=compile_args),
Extension("glove.metrics.accuracy_cython",
[glove_metrics],
extra_link_args=["-fopenmp"],
extra_compile_args=compile_args),
Extension("glove.corpus_cython", [glove_corpus],
language='C++',
libraries=["stdc++"],
extra_link_args=compile_args,
extra_compile_args=compile_args)]
def set_gcc():
"""
Try to find and use GCC on OSX for OpenMP support.
"""
# For macports and homebrew
patterns = ['/opt/local/bin/gcc-mp-[0-9].[0-9]',
'/opt/local/bin/gcc-mp-[0-9]',
'/usr/local/bin/gcc-[0-9].[0-9]',
'/usr/local/bin/gcc-[0-9]']
if 'darwin' in platform.platform().lower():
gcc_binaries = []
for pattern in patterns:
gcc_binaries += glob.glob(pattern)
gcc_binaries.sort()
if gcc_binaries:
_, gcc = os.path.split(gcc_binaries[-1])
os.environ["CC"] = gcc
else:
raise Exception('No GCC available. Install gcc from Homebrew '
'using brew install gcc.')
class Cythonize(Command):
"""
Compile the extension .pyx files.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import Cython
from Cython.Build import cythonize
cythonize(define_extensions(cythonize=True))
class Clean(Command):
"""
Clean build files.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pth = os.path.dirname(os.path.abspath(__file__))
subprocess.call(['rm', '-rf', os.path.join(pth, 'build')])
subprocess.call(['rm', '-rf', os.path.join(pth, '*.egg-info')])
subprocess.call(['find', pth, '-name', '*.pyc', '-type', 'f', '-delete'])
subprocess.call(['rm', os.path.join(pth, 'glove', 'corpus_cython.so')])
subprocess.call(['rm', os.path.join(pth, 'glove', 'glove_cython.so')])
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ['tests/']
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name='glove_python',
version='0.1.0',
description=('Python implementation of Global Vectors '
'for Word Representation (GloVe)'),
long_description='',
packages=find_packages(),
install_requires=['numpy', 'scipy'],
tests_require=['pytest'],
cmdclass={'test': PyTest, 'cythonize': Cythonize, 'clean': Clean},
author='Maciej Kula',
url='https://github.com/maciejkula/glove-python',
download_url='https://github.com/maciejkula/glove-python/tarball/0.1.0',
license='Apache 2.0',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence'],
ext_modules=define_extensions()
)
| apache-2.0 |
xhat/micropython | tests/basics/for_range.py | 19 | 1116 | # test for+range, mostly to check optimisation of this pair
# apply args using *
for x in range(*(1, 3)):
print(x)
for x in range(1, *(6, 2)):
print(x)
# apply args using **
try:
for x in range(**{'end':1}):
print(x)
except TypeError:
print('TypeError')
try:
for x in range(0, **{'end':1}):
print(x)
except TypeError:
print('TypeError')
try:
for x in range(0, 1, **{'step':1}):
print(x)
except TypeError:
print('TypeError')
# keyword args
try:
for x in range(end=1):
print(x)
except TypeError:
print('TypeError')
try:
for x in range(0, end=1):
print(x)
except TypeError:
print('TypeError')
try:
for x in range(0, 1, step=1):
print(x)
except TypeError:
print('TypeError')
# argument is a comprehension
try:
for x in range(0 for i in []):
print(x)
except TypeError:
print('TypeError')
try:
for x in range(0, (0 for i in [])):
print(x)
except TypeError:
print('TypeError')
try:
for x in range(0, 1, (0 for i in [])):
print(x)
except TypeError:
print('TypeError')
| mit |
bennyrowland/pyflo-suspect | tests/test_processing.py | 1 | 5347 | from unittest.mock import Mock
import numpy
import pytest
import pyflo_suspect.processing as p
import pyflo.ports
import suspect
@pytest.fixture
def simple_data():
source_array = numpy.ones((4, 128), 'complex')
source_array[1, :] *= 2
source_array[2, :] *= 4
source_array[3, :] *= 8
data = suspect.MRSData(source_array, 5e-4, 123)
return data
@pytest.fixture
def complex_data():
source_array = numpy.ones((8, 4, 128), 'complex')
data = suspect.MRSData(source_array, 5e-4, 123)
return data
def test_channel_weights_no_axis(simple_data):
component = p.SVDChannelWeights({})
data_port = pyflo.ports.Outport({"name": "data"})
data_port.connect(component.inports["in"])
#target_port = Mock()
#print(component.outports)
#component.outports["out"].connect(target_port)
data_port.send_data(simple_data)
#print(target_port.call_args)
def test_channel_weights_data(simple_data):
component = p.SVDChannelWeights({})
data_port = pyflo.ports.Outport({"name": "data"})
data_port.connect(component.inports["in"])
target_port = pyflo.ports.Inport({"name": "result"})
mock = Mock()
target_port.on('data', mock)
component.outports["weights"].connect(target_port)
data_port.send_data(simple_data)
result = mock.call_args[0][0]
assert result.shape == (4,)
numpy.testing.assert_almost_equal(result[0] / result[1], simple_data[0, 0] / simple_data[1, 0])
def test_average_data_only(simple_data):
component = p.WeightedAverage({})
data_port = pyflo.ports.Outport({"name": "data"})
data_port.connect(component.inports["in"])
target_port = pyflo.ports.Inport({"name": "result"})
mock = Mock()
target_port.on('data', mock)
component.outports["out"].connect(target_port)
data_port.send_data(simple_data)
result = mock.call_args[0][0]
assert result.shape == (128,)
assert result[0] == 3.75
assert result.dt == 5e-4
assert result.f0 == 123
def test_average_data_weights(simple_data):
component = p.WeightedAverage({})
data_port = pyflo.ports.Outport({"name": "data"})
data_port.connect(component.inports["in"])
weights_port = pyflo.ports.Outport({"name": "weights"})
weights_port.connect(component.inports["weights"])
target_port = pyflo.ports.Inport({"name": "result"})
mock = Mock()
target_port.on("data", mock)
component.outports["out"].connect(target_port)
data_port.send_data(simple_data)
# component should wait for weights to be send
mock.assert_not_called()
weights_port.send_data(numpy.array([0, 0, 0, 1]))
result = mock.call_args[0][0]
assert result.shape == (128,)
assert result[0] == 8
assert result.dt == 5e-4
assert result.f0 == 123
def test_average_data_axis(complex_data):
component = p.WeightedAverage({})
data_port = pyflo.ports.Outport({"name": "data"})
data_port.connect(component.inports["in"])
axis_port = pyflo.ports.Outport({"name": "axis"})
axis_port.connect(component.inports["axis"])
target_port = pyflo.ports.Inport({"name": "result"})
mock = Mock()
target_port.on("data", mock)
component.outports["out"].connect(target_port)
data_port.send_data(complex_data)
# component should wait for weights to be send
mock.assert_not_called()
axis_port.send_data(1)
result = mock.call_args[0][0]
assert result.shape == (8, 128)
assert result[0, 0] == 1
assert result.dt == 5e-4
assert result.f0 == 123
def test_residual_water_alignment():
component = p.WaterPeakAlignment(None)
data_port = pyflo.ports.Outport({"name": "data"})
data_port.connect(component.inports["in"])
test_spectrum = numpy.zeros(128, 'complex')
test_spectrum[16] = 1
test_fid = numpy.fft.ifft(test_spectrum)
test_data = suspect.MRSData(test_fid, 1.0 / 128, 123)
target_port = pyflo.ports.Inport({"name": "target"})
component.outports["shift"].connect(target_port)
mock = Mock()
target_port.on('data', mock)
data_port.send_data(test_data)
mock.assert_called_once_with(16)
def test_frequency_shift():
component = p.FrequencyShift(None)
data_port = pyflo.ports.Outport({"name": "data"})
data_port.connect(component.inports["in"])
shift_port = pyflo.ports.Outport({"name": "shift"})
shift_port.connect(component.inports["shift"])
test_spectrum = numpy.zeros(128, 'complex')
test_spectrum[0] = 1
target_fid = numpy.fft.ifft(test_spectrum)
target_data = suspect.MRSData(target_fid, 1.0 / 128, 123)
shifted_spectrum = numpy.roll(test_spectrum, 16)
shifted_fid = numpy.fft.ifft(shifted_spectrum)
shifted_data = suspect.MRSData(shifted_fid, 1.0 / 128, 123)
target_port = pyflo.ports.Inport({"name": "result"})
component.outports["out"].connect(target_port)
mock = Mock()
target_port.on('data', mock)
data_port.send_data(shifted_data)
mock.assert_not_called()
shift_port.send_data(-16.0)
numpy.testing.assert_almost_equal(target_data, mock.call_args[0][0])
# try sending the data the other way
mock = Mock()
target_port.on('data', mock)
shift_port.send_data(-16.0)
mock.assert_not_called()
data_port.send_data(shifted_data)
numpy.testing.assert_almost_equal(target_data, mock.call_args[0][0])
| mit |
40223234/40223234 | static/Brython3.1.1-20150328-091302/Lib/getopt.py | 845 | 7488 | """Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <[email protected]>.
#
# Gerrit Holl <[email protected]> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Åstrand <[email protected]> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - a option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
try:
from gettext import gettext as _
except ImportError:
# Bootstrapping Python: gettext's dependencies not built yet
def _(s): return s
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError(_('option --%s requires argument') % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError(_('option --%s must not have an argument') % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError(_('option --%s not recognized') % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError(_('option --%s not a unique prefix') % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError(_('option -%s requires argument') % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError(_('option -%s not recognized') % opt, opt)
if __name__ == '__main__':
import sys
print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"]))
| gpl-3.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/docutils/__init__.py | 1 | 7559 | # $Id: __init__.py 7984 2016-12-09 09:48:27Z grubert $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
This is the Docutils (Python Documentation Utilities) package.
Package Structure
=================
Modules:
- __init__.py: Contains component base classes, exception classes, and
Docutils version information.
- core.py: Contains the ``Publisher`` class and ``publish_*()`` convenience
functions.
- frontend.py: Runtime settings (command-line interface, configuration files)
processing, for Docutils front-ends.
- io.py: Provides a uniform API for low-level input and output.
- nodes.py: Docutils document tree (doctree) node class library.
- statemachine.py: A finite state machine specialized for
regular-expression-based text filters.
Subpackages:
- languages: Language-specific mappings of terms.
- parsers: Syntax-specific input parser modules or packages.
- readers: Context-specific input handlers which understand the data
source and manage a parser.
- transforms: Modules used by readers and writers to modify DPS
doctrees.
- utils: Contains the ``Reporter`` system warning class and miscellaneous
utilities used by readers, writers, and transforms.
utils/urischemes.py: Contains a complete mapping of known URI addressing
scheme names to descriptions.
- utils/math: Contains functions for conversion of mathematical notation
between different formats (LaTeX, MathML, text, ...).
- writers: Format-specific output translators.
"""
__docformat__ = 'reStructuredText'
__version__ = '0.13.1'
"""``major.minor.micro`` version number. The micro number is bumped for API
changes, for new functionality, and for interim project releases. The minor
number is bumped whenever there is a significant project release. The major
number will be bumped when the project is feature-complete, and perhaps if
there is a major change in the design."""
__version_details__ = 'release'
"""Extra version details (e.g. 'snapshot 2005-05-29, r3410', 'repository',
'release'), modified automatically & manually."""
import sys
class ApplicationError(Exception):
# Workaround:
# In Python < 2.6, unicode(<exception instance>) calls `str` on the
# arg and therefore, e.g., unicode(StandardError(u'\u234')) fails
# with UnicodeDecodeError.
if sys.version_info < (2,6):
def __unicode__(self):
return ', '.join(self.args)
class DataError(ApplicationError): pass
class SettingsSpec:
"""
Runtime setting specification base class.
SettingsSpec subclass objects used by `docutils.frontend.OptionParser`.
"""
settings_spec = ()
"""Runtime settings specification. Override in subclasses.
Defines runtime settings and associated command-line options, as used by
`docutils.frontend.OptionParser`. This is a tuple of:
- Option group title (string or `None` which implies no group, just a list
of single options).
- Description (string or `None`).
- A sequence of option tuples. Each consists of:
- Help text (string)
- List of option strings (e.g. ``['-Q', '--quux']``).
- Dictionary of keyword arguments sent to the OptionParser/OptionGroup
``add_option`` method.
Runtime setting names are derived implicitly from long option names
('--a-setting' becomes ``settings.a_setting``) or explicitly from the
'dest' keyword argument.
Most settings will also have a 'validator' keyword & function. The
validator function validates setting values (from configuration files
and command-line option arguments) and converts them to appropriate
types. For example, the ``docutils.frontend.validate_boolean``
function, **required by all boolean settings**, converts true values
('1', 'on', 'yes', and 'true') to 1 and false values ('0', 'off',
'no', 'false', and '') to 0. Validators need only be set once per
setting. See the `docutils.frontend.validate_*` functions.
See the optparse docs for more details.
- More triples of group title, description, options, as many times as
needed. Thus, `settings_spec` tuples can be simply concatenated.
"""
settings_defaults = None
"""A dictionary of defaults for settings not in `settings_spec` (internal
settings, intended to be inaccessible by command-line and config file).
Override in subclasses."""
settings_default_overrides = None
"""A dictionary of auxiliary defaults, to override defaults for settings
defined in other components. Override in subclasses."""
relative_path_settings = ()
"""Settings containing filesystem paths. Override in subclasses.
Settings listed here are to be interpreted relative to the current working
directory."""
config_section = None
"""The name of the config file section specific to this component
(lowercase, no brackets). Override in subclasses."""
config_section_dependencies = None
"""A list of names of config file sections that are to be applied before
`config_section`, in order (from general to specific). In other words,
the settings in `config_section` are to be overlaid on top of the settings
from these sections. The "general" section is assumed implicitly.
Override in subclasses."""
class TransformSpec:
"""
Runtime transform specification base class.
TransformSpec subclass objects used by `docutils.transforms.Transformer`.
"""
def get_transforms(self):
"""Transforms required by this class. Override in subclasses."""
if self.default_transforms != ():
import warnings
warnings.warn('default_transforms attribute deprecated.\n'
'Use get_transforms() method instead.',
DeprecationWarning)
return list(self.default_transforms)
return []
# Deprecated; for compatibility.
default_transforms = ()
unknown_reference_resolvers = ()
"""List of functions to try to resolve unknown references. Unknown
references have a 'refname' attribute which doesn't correspond to any
target in the document. Called when the transforms in
`docutils.tranforms.references` are unable to find a correct target. The
list should contain functions which will try to resolve unknown
references, with the following signature::
def reference_resolver(node):
'''Returns boolean: true if resolved, false if not.'''
If the function is able to resolve the reference, it should also remove
the 'refname' attribute and mark the node as resolved::
del node['refname']
node.resolved = 1
Each function must have a "priority" attribute which will affect the order
the unknown_reference_resolvers are run::
reference_resolver.priority = 100
Override in subclasses."""
class Component(SettingsSpec, TransformSpec):
"""Base class for Docutils components."""
component_type = None
"""Name of the component type ('reader', 'parser', 'writer'). Override in
subclasses."""
supported = ()
"""Names for this component. Override in subclasses."""
def supports(self, format):
"""
Is `format` supported by this component?
To be used by transforms to ask the dependent component if it supports
a certain input context or output format.
"""
return format in self.supported
| gpl-3.0 |
Jet-Streaming/googletest | googlemock/scripts/generator/cpp/ast.py | 384 | 62773 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = '[email protected] (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
FUNCTION_OVERRIDE = 0x100
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparision.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparision.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
if name_tokens:
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter(end):
if default:
del default[0] # Remove flag.
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter(s.start)
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter(tokens[-1].end)
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necesary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'override':
modifiers |= FUNCTION_OVERRIDE
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
if token.name == 'default' or token.name == 'delete':
# Ignore explicitly defaulted and deleted special members
# in C++11.
token = self._GetNextToken()
else:
# Handle pure-virtual declarations.
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') # )
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, templated_types, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
norbertspiess/google-python-exercises | basic/solution/list1.py | 209 | 3656 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
# +++your code here+++
# LAB(begin solution)
count = 0
for word in words:
if len(word) >= 2 and word[0] == word[-1]:
count = count + 1
return count
# LAB(replace solution)
# return
# LAB(end solution)
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
# +++your code here+++
# LAB(begin solution)
# Put each word into the x_list or the other_list.
x_list = []
other_list = []
for w in words:
if w.startswith('x'):
x_list.append(w)
else:
other_list.append(w)
return sorted(x_list) + sorted(other_list)
# LAB(replace solution)
# return
# LAB(end solution)
# LAB(begin solution)
# Extract the last element from a tuple -- used for custom sorting below.
def last(a):
return a[-1]
# LAB(end solution)
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
# +++your code here+++
# LAB(begin solution)
return sorted(tuples, key=last)
# LAB(replace solution)
# return
# LAB(end solution)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| apache-2.0 |
ZhangXinNan/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/mvn_full_covariance_test.py | 14 | 7349 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormalFullCovariance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
ds = distributions
rng = np.random.RandomState(42)
class MultivariateNormalFullCovarianceTest(test.TestCase):
def _random_pd_matrix(self, *shape):
mat = rng.rand(*shape)
chol = ds.matrix_diag_transform(mat, transform=nn_ops.softplus)
chol = array_ops.matrix_band_part(chol, -1, 0)
return math_ops.matmul(chol, chol, adjoint_b=True).eval()
def testRaisesIfInitializedWithNonSymmetricMatrix(self):
with self.test_session():
mu = [1., 2.]
sigma = [[1., 0.], [1., 1.]] # Nonsingular, but not symmetric
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
with self.assertRaisesOpError("not symmetric"):
mvn.covariance().eval()
def testNamePropertyIsSetByInitArg(self):
with self.test_session():
mu = [1., 2.]
sigma = [[1., 0.], [0., 1.]]
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, name="Billy")
self.assertEqual(mvn.name, "Billy/")
def testDoesNotRaiseIfInitializedWithSymmetricMatrix(self):
with self.test_session():
mu = rng.rand(10)
sigma = self._random_pd_matrix(10, 10)
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
# Should not raise
mvn.covariance().eval()
def testLogPDFScalarBatch(self):
with self.test_session():
mu = rng.rand(2)
sigma = self._random_pd_matrix(2, 2)
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
x = rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testLogPDFScalarBatchCovarianceNotProvided(self):
with self.test_session():
mu = rng.rand(2)
mvn = ds.MultivariateNormalFullCovariance(
mu, covariance_matrix=None, validate_args=True)
x = rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
# Initialize a scipy_mvn with the default covariance.
scipy_mvn = stats.multivariate_normal(mean=mu, cov=np.eye(2))
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testShapes(self):
with self.test_session():
mu = rng.rand(3, 5, 2)
covariance = self._random_pd_matrix(3, 5, 2, 2)
mvn = ds.MultivariateNormalFullCovariance(
mu, covariance, validate_args=True)
# Shapes known at graph construction time.
self.assertEqual((2,), tuple(mvn.event_shape.as_list()))
self.assertEqual((3, 5), tuple(mvn.batch_shape.as_list()))
# Shapes known at runtime.
self.assertEqual((2,), tuple(mvn.event_shape_tensor().eval()))
self.assertEqual((3, 5), tuple(mvn.batch_shape_tensor().eval()))
def _random_mu_and_sigma(self, batch_shape, event_shape):
# This ensures sigma is positive def.
mat_shape = batch_shape + event_shape + event_shape
mat = rng.randn(*mat_shape)
perm = np.arange(mat.ndim)
perm[-2:] = [perm[-1], perm[-2]]
sigma = np.matmul(mat, np.transpose(mat, perm))
mu_shape = batch_shape + event_shape
mu = rng.randn(*mu_shape)
return mu, sigma
def testKLBatch(self):
batch_shape = [2]
event_shape = [3]
with self.test_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = ds.MultivariateNormalFullCovariance(
loc=mu_a,
covariance_matrix=sigma_a,
validate_args=True)
mvn_b = ds.MultivariateNormalFullCovariance(
loc=mu_b,
covariance_matrix=sigma_b,
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],
mu_b[0, :], sigma_b[0, :])
expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],
mu_b[1, :], sigma_b[1, :])
self.assertAllClose(expected_kl_0, kl_v[0])
self.assertAllClose(expected_kl_1, kl_v[1])
def testKLBatchBroadcast(self):
batch_shape = [2]
event_shape = [3]
with self.test_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
# No batch shape.
mu_b, sigma_b = self._random_mu_and_sigma([], event_shape)
mvn_a = ds.MultivariateNormalFullCovariance(
loc=mu_a,
covariance_matrix=sigma_a,
validate_args=True)
mvn_b = ds.MultivariateNormalFullCovariance(
loc=mu_b,
covariance_matrix=sigma_b,
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],
mu_b, sigma_b)
expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],
mu_b, sigma_b)
self.assertAllClose(expected_kl_0, kl_v[0])
self.assertAllClose(expected_kl_1, kl_v[1])
def _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b):
"""Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b)."""
# Check using numpy operations
# This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy.
# So it is important to also check that KL(mvn, mvn) = 0.
sigma_b_inv = np.linalg.inv(sigma_b)
t = np.trace(sigma_b_inv.dot(sigma_a))
q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a)
k = mu_a.shape[0]
l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a))
return 0.5 * (t + q - k + l)
if __name__ == "__main__":
test.main()
| apache-2.0 |
CodeWingX/yowsup | yowsup/layers/axolotl/protocolentities/receipt_outgoing_retry.py | 35 | 2457 | from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from yowsup.layers.protocol_receipts.protocolentities import OutgoingReceiptProtocolEntity
from yowsup.layers.axolotl.protocolentities.iq_keys_get_result import ResultGetKeysIqProtocolEntity
class RetryOutgoingReceiptProtocolEntity(OutgoingReceiptProtocolEntity):
'''
<receipt type="retry" to="[email protected]" id="1415389947-12" t="1432833777">
<retry count="1" t="1432833266" id="1415389947-12" v="1">
</retry>
<registration>
HEX:xxxxxxxxx
</registration>
</receipt>
'''
def __init__(self, _id, to, t, v = "1", count = "1",regData = ""):
super(RetryOutgoingReceiptProtocolEntity, self).__init__(_id,to)
self.setRetryData(t,v,count,regData)
def setRetryData(self, t,v,count,regData):
self.t = int(t)
self.v = int(v)
self.count = int(count)
self.regData = regData
def setRegData(self,regData):
'''
In axolotl layer:
regData = self.store.getLocalRegistrationId()
'''
self.regData = ResultGetKeysIqProtocolEntity._intToBytes(regData)
def toProtocolTreeNode(self):
node = super(RetryOutgoingReceiptProtocolEntity, self).toProtocolTreeNode()
node.setAttribute("type", "retry")
retry = ProtocolTreeNode("retry", {"count": str(self.count),"t":str(self.t),"id":self.getId(),"v":str(self.v)})
node.addChild(retry)
registration = ProtocolTreeNode("registration",data=self.regData)
node.addChild(registration)
return node
def __str__(self):
out = super(RetryOutgoingReceiptProtocolEntity, self).__str__()
return out
@staticmethod
def fromProtocolTreeNode(node):
entity = OutgoingReceiptProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = RetryOutgoingReceiptProtocolEntity
retryNode = node.getChild("retry")
entity.setRetryData(retryNode["t"], retryNode["v"], retryNode["count"], node.getChild("registration").data)
@staticmethod
def fromMesageNode(MessageNodeToBeRetried):
return RetryOutgoingReceiptProtocolEntity(
MessageNodeToBeRetried.getAttributeValue("id"),
MessageNodeToBeRetried.getAttributeValue("from"),
MessageNodeToBeRetried.getAttributeValue("t"),
MessageNodeToBeRetried.getChild("enc").getAttributeValue("v")
) | gpl-3.0 |
jspan/Open-Knesset | accounts/migrations/0002_add_valid_email_group.py | 11 | 4805 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.contrib.auth.models import User,Group,Permission
class Migration(DataMigration):
def forwards(self, orm):
(g,created) = Group.objects.get_or_create(name='Valid Email')
if created:
g.save()
p = Permission.objects.get(name='Can add comment')
g.permissions.add(p)
g.permissions.add(Permission.objects.get(name='Can add annotation'))
for u in User.objects.all():
if p in u.user_permissions.all():
u.groups.add(g)
u.user_permissions.remove(p)
print "user %s: permission->group" % u.username
def backwards(self, orm):
p = Permission.objects.get(name='Can add comment')
g = Group.objects.get(name='Valid Email')
for u in User.objects.all():
if g in u.groups.all():
print "user %s: group->permission" % u.username
u.user_permissions.add(p)
u.groups.remove(g)
g.delete()
models = {
'accounts.emailvalidation': {
'Meta': {'object_name': 'EmailValidation'},
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'date_requested': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts']
| bsd-3-clause |
vyvojer/ploev | tests/test_calc.py | 1 | 3140 | import unittest
from ploev import calc
from ploev.calc import Calc, GameCalc
from ploev.ppt import OddsOracle
class CalcModuleTest(unittest.TestCase):
def test_close_parenthesis(self):
self.assertEqual(calc.close_parenthesis('77,KK'), '(77,KK)')
self.assertEqual(calc.close_parenthesis('(77,KK)'), '(77,KK)')
self.assertEqual(calc.close_parenthesis('(77,KK):(ss)'), '((77,KK):(ss))')
def test_create_cumulative_ranges(self):
ranges = [
'(77,KK)',
'(74,K4,K7,44,77,KK)',
'*',
]
expected = [
'(77,KK)',
'(74,K4,K7,44,77,KK)!(77,KK)',
'*!(74,K4,K7,44,77,KK)!(77,KK)',
]
cumulative_ranges = calc.create_cumulative_ranges(ranges)
self.assertEqual(cumulative_ranges, expected)
class CalcTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
odds_oracle = OddsOracle()
odds_oracle.trials = 100000
odds_oracle.seconds = 1
cls.odds_oracle = odds_oracle
cls.calc = Calc(odds_oracle)
def test_range_distribution(self):
main_range = '75%'
board = '7c Kh 4s'
sub_ranges = [
'77,KK',
'74,K4,K7,44,77,KK',
'*'
]
hero = '8c4h6s4c'
rd = self.calc.range_distribution(main_range, sub_ranges, board, players=[hero])
self.assertAlmostEqual(rd[0].fraction, 0.041, delta= 0.01)
self.assertAlmostEqual(rd[1].fraction, 0.0733, delta=0.01)
self.assertAlmostEqual(rd[2].fraction, 0.885, delta=0.01)
self.assertAlmostEqual(rd[0].equity, 0.23, delta= 0.02)
self.assertAlmostEqual(rd[1].equity, 0.79, delta=0.02)
self.assertAlmostEqual(rd[2].equity, 0.88, delta=0.02)
def test_range_distribution_no_cumulative(self):
main_range = '75%'
board = '7c Kh 4s'
sub_ranges = [
'77,KK',
'74,K4,K7,44,77,KK',
'*'
]
players = ['8c4h6s4c']
rd = self.calc.range_distribution(main_range, sub_ranges, board, players, cumulative=False)
self.assertAlmostEqual(rd[0].fraction, 0.041, delta= 0.01)
self.assertAlmostEqual(rd[1].fraction, 0.115, delta=0.01)
self.assertAlmostEqual(rd[2].fraction, 1, delta=0.01)
self.assertAlmostEqual(rd[0].equity, 0.23, delta= 0.02)
self.assertAlmostEqual(rd[1].equity, 0.59, delta=0.02)
self.assertAlmostEqual(rd[2].equity, 0.84, delta=0.02)
def test_equity(self):
players = [ '3s4s5d6d', '10%', 'AA',]
equities = self.calc.equity(players)
self.assertAlmostEqual(equities[0], 0.34, delta=0.01)
self.assertAlmostEqual(equities[1], 0.30, delta=0.01)
self.assertAlmostEqual(equities[2], 0.36, delta=0.01)
equity = self.calc.equity(players, hero_only=True)
self.assertAlmostEqual(equity, 0.34, delta=0.01)
players = ['As2sTc7h', '60%!$3b10i']
board = 'Ks3s9d'
equity = self.calc.equity(players, board=board, hero_only=True)
self.assertAlmostEqual(equity, 0.52, delta=0.01)
| gpl-3.0 |
SoftwareDefinedBuildings/smap | python/smap/drivers/labjack/labjackpython/u6.py | 6 | 83591 | """
Name: u6.py
Desc: Defines the U6 class, which makes working with a U6 much easier. All of
the low-level functions for the U6 are implemented as functions of the U6
class. There are also a handful additional functions which improve upon
the interface provided by the low-level functions.
To learn about the low-level functions, please see Section 5.2 of the U6 User's Guide:
http://labjack.com/support/u6/users-guide/5.2
"""
from LabJackPython import *
import struct, ConfigParser
def openAllU6():
"""
A helpful function which will open all the connected U6s. Returns a
dictionary where the keys are the serialNumber, and the value is the device
object.
"""
returnDict = dict()
for i in range(deviceCount(6)):
d = U6(firstFound = False, devNumber = i+1)
returnDict[str(d.serialNumber)] = d
return returnDict
def dumpPacket(buffer):
"""
Name: dumpPacket(buffer)
Args: byte array
Desc: Returns hex value of all bytes in the buffer
"""
return repr([ hex(x) for x in buffer ])
def getBit(n, bit):
"""
Name: getBit(n, bit)
Args: n, the original integer you want the bit of
bit, the index of the bit you want
Desc: Returns the bit at position "bit" of integer "n"
>>> n = 5
>>> bit = 2
>>> getBit(n, bit)
1
>>> bit = 0
>>> getBit(n, bit)
1
"""
return int(bool((int(n) & (1 << bit)) >> bit))
def toBitList(inbyte):
"""
Name: toBitList(inbyte)
Args: a byte
Desc: Converts a byte into list for access to individual bits
>>> inbyte = 5
>>> toBitList(inbyte)
[1, 0, 1, 0, 0, 0, 0, 0]
"""
return [ getBit(inbyte, b) for b in range(8) ]
def dictAsString(d):
"""Helper function that returns a string representation of a dictionary"""
s = "{"
for key, val in sorted(d.items()):
s += "%s: %s, " % (key, val)
s = s.rstrip(", ") # Nuke the trailing comma
s += "}"
return s
class CalibrationInfo(object):
""" A class to hold the calibration info for a U6 """
def __init__(self):
# A flag to tell difference between nominal and actual values.
self.nominal = True
# Positive Channel calibration
self.ain10vSlope = 3.1580578 * (10 ** -4)
self.ain10vOffset = -10.5869565220
self.ain1vSlope = 3.1580578 * (10 ** -5)
self.ain1vOffset = -1.05869565220
self.ain100mvSlope = 3.1580578 * (10 ** -6)
self.ain100mvOffset = -0.105869565220
self.ain10mvSlope = 3.1580578 * (10 ** -7)
self.ain10mvOffset = -0.0105869565220
self.ainSlope = [self.ain10vSlope, self.ain1vSlope, self.ain100mvSlope, self.ain10mvSlope]
self.ainOffset = [ self.ain10vOffset, self.ain1vOffset, self.ain100mvOffset, self.ain10mvOffset ]
# Negative Channel calibration
self.ain10vNegSlope = -3.15805800 * (10 ** -4)
self.ain10vCenter = 33523.0
self.ain1vNegSlope = -3.15805800 * (10 ** -5)
self.ain1vCenter = 33523.0
self.ain100mvNegSlope = -3.15805800 * (10 ** -6)
self.ain100mvCenter = 33523.0
self.ain10mvNegSlope = -3.15805800 * (10 ** -7)
self.ain10mvCenter = 33523.0
self.ainNegSlope = [ self.ain10vNegSlope, self.ain1vNegSlope, self.ain100mvNegSlope, self.ain10mvNegSlope ]
self.ainCenter = [ self.ain10vCenter, self.ain1vCenter, self.ain100mvCenter, self.ain10mvCenter ]
# Miscellaneous
self.dac0Slope = 13200.0
self.dac0Offset = 0
self.dac1Slope = 13200.0
self.dac1Offset = 0
self.currentOutput0 = 0.0000100000
self.currentOutput1 = 0.0002000000
self.temperatureSlope = -92.379
self.temperatureOffset = 465.129
# Hi-Res ADC stuff
# Positive Channel calibration
self.proAin10vSlope = 3.1580578 * (10 ** -4)
self.proAin10vOffset = -10.5869565220
self.proAin1vSlope = 3.1580578 * (10 ** -5)
self.proAin1vOffset = -1.05869565220
self.proAin100mvSlope = 3.1580578 * (10 ** -6)
self.proAin100mvOffset = -0.105869565220
self.proAin10mvSlope = 3.1580578 * (10 ** -7)
self.proAin10mvOffset = -0.0105869565220
# Negative Channel calibration
self.proAin10vNegSlope = -3.15805800 * (10 ** -4)
self.proAin10vCenter = 33523.0
self.proAin1vNegSlope = -3.15805800 * (10 ** -5)
self.proAin1vCenter = 33523.0
self.proAin100mvNegSlope = -3.15805800 * (10 ** -6)
self.proAin100mvCenter = 33523.0
self.proAin10mvNegSlope = -3.15805800 * (10 ** -7)
self.proAin10mvCenter = 33523.0
def __str__(self):
return str(self.__dict__)
class U6(Device):
"""
U6 Class for all U6 specific low-level commands.
Example:
>>> import u6
>>> d = u6.U6()
>>> print d.configU6()
{'SerialNumber': 320032102, ... , 'FirmwareVersion': '1.26'}
"""
def __init__(self, debug = False, autoOpen = True, **kargs):
"""
Name: U6.__init__(self, debug = False, autoOpen = True, **kargs)
Args: debug, Do you want debug information?
autoOpen, If true, then the constructor will call open for you
**kargs, The arguments to be passed to open.
Desc: Your basic constructor.
"""
Device.__init__(self, None, devType = 6)
self.firmwareVersion = 0
self.bootloaderVersion = 0
self.hardwareVersion = 0
self.productId = 0
self.fioDirection = [None] * 8
self.fioState = [None] * 8
self.eioDirection = [None] * 8
self.eioState = [None] * 8
self.cioDirection = [None] * 8
self.cioState = [None] * 8
self.dac1Enable = 0
self.dac0 = 0
self.dac1 = 0
self.calInfo = CalibrationInfo()
self.productName = "U6"
self.debug = debug
if autoOpen:
self.open(**kargs)
def open(self, localId = None, firstFound = True, serial = None, devNumber = None, handleOnly = False, LJSocket = None):
"""
Name: U6.open(localId = None, firstFound = True, devNumber = None,
handleOnly = False, LJSocket = None)
Args: firstFound, If True, use the first found U6
serial, open a U6 with the given serial number
localId, open a U6 with the given local id.
devNumber, open a U6 with the given devNumber
handleOnly, if True, LabJackPython will only open a handle
LJSocket, set to "<ip>:<port>" to connect to LJSocket
Desc: Opens a U6 for reading and writing.
>>> myU6 = u6.U6(autoOpen = False)
>>> myU6.open()
"""
Device.open(self, 6, firstFound = firstFound, serial = serial, localId = localId, devNumber = devNumber, handleOnly = handleOnly, LJSocket = LJSocket )
def configU6(self, LocalID = None):
"""
Name: U6.configU6(LocalID = None)
Args: LocalID, if set, will write the new value to U6
Desc: Writes the Local ID, and reads some hardware information.
>>> myU6 = u6.U6()
>>> myU6.configU6()
{'BootloaderVersion': '6.15',
'FirmwareVersion': '0.88',
'HardwareVersion': '2.0',
'LocalID': 1,
'ProductID': 6,
'SerialNumber': 360005087,
'VersionInfo': 4}
"""
command = [ 0 ] * 26
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x0A
command[3] = 0x08
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
if LocalID != None:
command[6] = (1 << 3)
command[8] = LocalID
#command[7] = Reserved
#command[9-25] = Reserved
try:
result = self._writeRead(command, 38, [0xF8, 0x10, 0x08])
except LabJackException, e:
if e.errorCode is 4:
print "NOTE: ConfigU6 returned an error of 4. This probably means you are using U6 with a *really old* firmware. Please upgrade your U6's firmware as soon as possible."
result = self._writeRead(command, 38, [0xF8, 0x10, 0x08], checkBytes = False)
else:
raise e
self.firmwareVersion = "%s.%02d" % (result[10], result[9])
self.bootloaderVersion = "%s.%02d" % (result[12], result[11])
self.hardwareVersion = "%s.%02d" % (result[14], result[13])
self.serialNumber = struct.unpack("<I", struct.pack(">BBBB", *result[15:19]))[0]
self.productId = struct.unpack("<H", struct.pack(">BB", *result[19:21]))[0]
self.localId = result[21]
self.versionInfo = result[37]
self.deviceName = 'U6'
if self.versionInfo == 12:
self.deviceName = 'U6-Pro'
return { 'FirmwareVersion' : self.firmwareVersion, 'BootloaderVersion' : self.bootloaderVersion, 'HardwareVersion' : self.hardwareVersion, 'SerialNumber' : self.serialNumber, 'ProductID' : self.productId, 'LocalID' : self.localId, 'VersionInfo' : self.versionInfo, 'DeviceName' : self.deviceName }
def configIO(self, NumberTimersEnabled = None, EnableCounter1 = None, EnableCounter0 = None, TimerCounterPinOffset = None, EnableUART = None):
"""
Name: U6.configIO(NumberTimersEnabled = None, EnableCounter1 = None, EnableCounter0 = None, TimerCounterPinOffset = None)
Args: NumberTimersEnabled, Number of timers to enable
EnableCounter1, Set to True to enable counter 1, F to disable
EnableCounter0, Set to True to enable counter 0, F to disable
TimerCounterPinOffset, where should the timers/counters start
if all args are None, command just reads.
Desc: Writes and reads the current IO configuration.
>>> myU6 = u6.U6()
>>> myU6.configIO()
{'Counter0Enabled': False,
'Counter1Enabled': False,
'NumberTimersEnabled': 0,
'TimerCounterPinOffset': 0}
"""
command = [ 0 ] * 16
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x05
command[3] = 0x0B
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
if NumberTimersEnabled != None:
command[6] = 1
command[7] = NumberTimersEnabled
if EnableCounter0 != None:
command[6] = 1
if EnableCounter0:
command[8] = 1
if EnableCounter1 != None:
command[6] = 1
if EnableCounter1:
command[8] |= (1 << 1)
if TimerCounterPinOffset != None:
command[6] = 1
command[9] = TimerCounterPinOffset
if EnableUART is not None:
command[6] |= 1
command[6] |= (1 << 5)
result = self._writeRead(command, 16, [0xf8, 0x05, 0x0B])
return { 'NumberTimersEnabled' : result[8], 'Counter0Enabled' : bool(result[9] & 1), 'Counter1Enabled' : bool( (result[9] >> 1) & 1), 'TimerCounterPinOffset' : result[10] }
def configTimerClock(self, TimerClockBase = None, TimerClockDivisor = None):
"""
Name: U6.configTimerClock(TimerClockBase = None, TimerClockDivisor = None)
Args: TimerClockBase, which timer base to use
TimerClockDivisor, set the divisor
if all args are None, command just reads.
Also, if you cannot set the divisor without setting the base.
Desc: Writes and read the timer clock configuration.
>>> myU6 = u6.U6()
>>> myU6.configTimerClock()
{'TimerClockDivisor': 256, 'TimerClockBase': 2}
"""
command = [ 0 ] * 10
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x02
command[3] = 0x0A
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
#command[6] = Reserved
#command[7] = Reserved
if TimerClockBase != None:
command[8] = (1 << 7)
command[8] |= TimerClockBase & 7
if TimerClockDivisor != None:
command[9] = TimerClockDivisor
result = self._writeRead(command, 10, [0xF8, 0x2, 0x0A])
divisor = result[9]
if divisor == 0:
divisor = 256
return { 'TimerClockBase' : (result[8] & 7), 'TimerClockDivisor' : divisor }
def _buildBuffer(self, sendBuffer, readLen, commandlist):
for cmd in commandlist:
if isinstance(cmd, FeedbackCommand):
sendBuffer += cmd.cmdBytes
readLen += cmd.readLen
elif isinstance(cmd, list):
sendBuffer, readLen = self._buildBuffer(sendBuffer, readLen, cmd)
return (sendBuffer, readLen)
def _buildFeedbackResults(self, rcvBuffer, commandlist, results, i):
for cmd in commandlist:
if isinstance(cmd, FeedbackCommand):
results.append(cmd.handle(rcvBuffer[i:i+cmd.readLen]))
i += cmd.readLen
elif isinstance(cmd, list):
self._buildFeedbackResults(rcvBuffer, cmd, results, i)
return results
def getFeedback(self, *commandlist):
"""
Name: getFeedback(commandlist)
Args: the FeedbackCommands to run
Desc: Forms the commandlist into a packet, sends it to the U6, and reads the response.
>>> myU6 = U6()
>>> ledCommand = u6.LED(False)
>>> internalTempCommand = u6.AIN(30, 31, True)
>>> myU6.getFeedback(ledCommand, internalTempCommand)
[None, 23200]
OR if you like the list version better:
>>> myU6 = U6()
>>> ledCommand = u6.LED(False)
>>> internalTempCommand = u6.AIN(30, 31, True)
>>> commandList = [ ledCommand, internalTempCommand ]
>>> myU6.getFeedback(commandList)
[None, 23200]
"""
sendBuffer = [0] * 7
sendBuffer[1] = 0xF8
readLen = 9
sendBuffer, readLen = self._buildBuffer(sendBuffer, readLen, commandlist)
if len(sendBuffer) % 2:
sendBuffer += [0]
sendBuffer[2] = len(sendBuffer) / 2 - 3
if readLen % 2:
readLen += 1
if len(sendBuffer) > MAX_USB_PACKET_LENGTH:
raise LabJackException("ERROR: The feedback command you are attempting to send is bigger than 64 bytes ( %s bytes ). Break your commands up into separate calls to getFeedback()." % len(sendBuffer))
if readLen > MAX_USB_PACKET_LENGTH:
raise LabJackException("ERROR: The feedback command you are attempting to send would yield a response that is greater than 64 bytes ( %s bytes ). Break your commands up into separate calls to getFeedback()." % readLen)
rcvBuffer = self._writeRead(sendBuffer, readLen, [], checkBytes = False, stream = False, checksum = True)
# Check the response for errors
try:
self._checkCommandBytes(rcvBuffer, [0xF8])
if rcvBuffer[3] != 0x00:
raise LabJackException("Got incorrect command bytes")
except LowlevelErrorException, e:
if isinstance(commandlist[0], list):
culprit = commandlist[0][ (rcvBuffer[7] -1) ]
else:
culprit = commandlist[ (rcvBuffer[7] -1) ]
raise LowlevelErrorException("\nThis Command\n %s\nreturned an error:\n %s" % ( culprit, lowlevelErrorToString(rcvBuffer[6]) ) )
results = []
i = 9
return self._buildFeedbackResults(rcvBuffer, commandlist, results, i)
def readMem(self, BlockNum, ReadCal=False):
"""
Name: U6.readMem(BlockNum, ReadCal=False)
Args: BlockNum, which block to read
ReadCal, set to True to read the calibration data
Desc: Reads 1 block (32 bytes) from the non-volatile user or
calibration memory. Please read section 5.2.6 of the user's
guide before you do something you may regret.
>>> myU6 = U6()
>>> myU6.readMem(0)
[ < userdata stored in block 0 > ]
NOTE: Do not call this function while streaming.
"""
command = [ 0 ] * 8
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x01
command[3] = 0x2A
if ReadCal:
command[3] = 0x2D
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
command[6] = 0x00
command[7] = BlockNum
result = self._writeRead(command, 40, [ 0xF8, 0x11, command[3] ])
return result[8:]
def readCal(self, BlockNum):
return self.readMem(BlockNum, ReadCal = True)
def writeMem(self, BlockNum, Data, WriteCal=False):
"""
Name: U6.writeMem(BlockNum, Data, WriteCal=False)
Args: BlockNum, which block to write
Data, a list of bytes to write
WriteCal, set to True to write calibration.
Desc: Writes 1 block (32 bytes) from the non-volatile user or
calibration memory. Please read section 5.2.7 of the user's
guide before you do something you may regret.
>>> myU6 = U6()
>>> myU6.writeMem(0, [ < userdata to be stored in block 0 > ])
NOTE: Do not call this function while streaming.
"""
if not isinstance(Data, list):
raise LabJackException("Data must be a list of bytes")
command = [ 0 ] * 40
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x11
command[3] = 0x28
if WriteCal:
command[3] = 0x2B
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
command[6] = 0x00
command[7] = BlockNum
command[8:] = Data
self._writeRead(command, 8, [0xF8, 0x11, command[3]])
def writeCal(self, BlockNum, Data):
return self.writeMem(BlockNum, Data, WriteCal = True)
def eraseMem(self, EraseCal=False):
"""
Name: U6.eraseMem(EraseCal=False)
Args: EraseCal, set to True to erase the calibration memory.
Desc: The U6 uses flash memory that must be erased before writing.
Please read section 5.2.8 of the user's guide before you do
something you may regret.
>>> myU6 = U6()
>>> myU6.eraseMem()
NOTE: Do not call this function while streaming.
"""
if eraseCal:
command = [ 0 ] * 8
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x01
command[3] = 0x2C
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
command[6] = 0x4C
command[7] = 0x6C
else:
command = [ 0 ] * 6
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x00
command[3] = 0x29
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
self._writeRead(command, 8, [0xF8, 0x01, command[3]])
def eraseCal(self):
return self.eraseMem(EraseCal=True)
def streamConfig(self, NumChannels = 1, ResolutionIndex = 0, SamplesPerPacket = 25, SettlingFactor = 0, InternalStreamClockFrequency = 0, DivideClockBy256 = False, ScanInterval = 1, ChannelNumbers = [0], ChannelOptions = [0], SampleFrequency = None):
"""
Name: U6.streamConfig(
NumChannels = 1, ResolutionIndex = 0,
SamplesPerPacket = 25, SettlingFactor = 0,
InternalStreamClockFrequency = 0, DivideClockBy256 = False,
ScanInterval = 1, ChannelNumbers = [0],
ChannelOptions = [0], SampleFrequency = None )
Args: NumChannels, the number of channels to stream
ResolutionIndex, the resolution of the samples
SettlingFactor, the settling factor to be used
ChannelNumbers, a list of channel numbers to stream
ChannelOptions, a list of channel options bytes
Set Either:
SampleFrequency, the frequency in Hz to sample
-- OR --
SamplesPerPacket, how many samples make one packet
InternalStreamClockFrequency, 0 = 4 MHz, 1 = 48 MHz
DivideClockBy256, True = divide the clock by 256
ScanInterval, clock/ScanInterval = frequency.
Desc: Configures streaming on the U6. On a decent machine, you can
expect to stream a range of 0.238 Hz to 15 Hz. Without the
conversion, you can get up to 55 Hz.
"""
if NumChannels != len(ChannelNumbers) or NumChannels != len(ChannelOptions):
raise LabJackException("NumChannels must match length of ChannelNumbers and ChannelOptions")
if len(ChannelNumbers) != len(ChannelOptions):
raise LabJackException("len(ChannelNumbers) doesn't match len(ChannelOptions)")
if SampleFrequency != None:
if SampleFrequency < 1000:
if SampleFrequency < 25:
SamplesPerPacket = SampleFrequency
DivideClockBy256 = True
ScanInterval = 15625/SampleFrequency
else:
DivideClockBy256 = False
ScanInterval = 4000000/SampleFrequency
# Force Scan Interval into correct range
ScanInterval = min( ScanInterval, 65535 )
ScanInterval = int( ScanInterval )
ScanInterval = max( ScanInterval, 1 )
# Same with Samples per packet
SamplesPerPacket = max( SamplesPerPacket, 1)
SamplesPerPacket = int( SamplesPerPacket )
SamplesPerPacket = min ( SamplesPerPacket, 25)
command = [ 0 ] * (14 + NumChannels*2)
#command[0] = Checksum8
command[1] = 0xF8
command[2] = NumChannels+4
command[3] = 0x11
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
command[6] = NumChannels
command[7] = ResolutionIndex
command[8] = SamplesPerPacket
#command[9] = Reserved
command[10] = SettlingFactor
command[11] = (InternalStreamClockFrequency & 1) << 3
if DivideClockBy256:
command[11] |= 1 << 1
t = struct.pack("<H", ScanInterval)
command[12] = ord(t[0])
command[13] = ord(t[1])
for i in range(NumChannels):
command[14+(i*2)] = ChannelNumbers[i]
command[15+(i*2)] = ChannelOptions[i]
self._writeRead(command, 8, [0xF8, 0x01, 0x11])
# Set up the variables for future use.
self.streamSamplesPerPacket = SamplesPerPacket
self.streamChannelNumbers = ChannelNumbers
self.streamChannelOptions = ChannelOptions
self.streamConfiged = True
if InternalStreamClockFrequency == 1:
freq = float(48000000)
else:
freq = float(4000000)
if DivideClockBy256:
freq /= 256
freq = freq/ScanInterval
self.packetsPerRequest = max(1, int(freq/SamplesPerPacket))
self.packetsPerRequest = min(self.packetsPerRequest, 48)
def processStreamData(self, result, numBytes = None):
"""
Name: U6.processStreamData(result, numPackets = None)
Args: result, the string returned from streamData()
numBytes, the number of bytes per packet
Desc: Breaks stream data into individual channels and applies
calibrations.
>>> reading = d.streamData(convert = False)
>>> print proccessStreamData(reading['result'])
defaultDict(list, {'AIN0' : [3.123, 3.231, 3.232, ...]})
"""
if numBytes is None:
numBytes = 14 + (self.streamSamplesPerPacket * 2)
returnDict = collections.defaultdict(list)
j = self.streamPacketOffset
for packet in self.breakupPackets(result, numBytes):
for sample in self.samplesFromPacket(packet):
if j >= len(self.streamChannelNumbers):
j = 0
if self.streamChannelNumbers[j] in (193, 194):
value = struct.unpack('<BB', sample )
elif self.streamChannelNumbers[j] >= 200:
value = struct.unpack('<H', sample )[0]
else:
if (self.streamChannelOptions[j] >> 7) == 1:
# do signed
value = struct.unpack('<H', sample )[0]
else:
# do unsigned
value = struct.unpack('<H', sample )[0]
gainIndex = (self.streamChannelOptions[j] >> 4) & 0x3
value = self.binaryToCalibratedAnalogVoltage(gainIndex, value, is16Bits=True)
returnDict["AIN%s" % self.streamChannelNumbers[j]].append(value)
j += 1
self.streamPacketOffset = j
return returnDict
def watchdog(self, Write = False, ResetOnTimeout = False, SetDIOStateOnTimeout = False, TimeoutPeriod = 60, DIOState = 0, DIONumber = 0):
"""
Name: U6.watchdog(Write = False, ResetOnTimeout = False, SetDIOStateOnTimeout = False, TimeoutPeriod = 60, DIOState = 0, DIONumber = 0)
Args: Write, Set to True to write new values to the watchdog.
ResetOnTimeout, True means reset the device on timeout
SetDIOStateOnTimeout, True means set the sate of a DIO on timeout
TimeoutPeriod, Time, in seconds, to wait before timing out.
DIOState, 1 = High, 0 = Low
DIONumber, which DIO to set.
Desc: Controls a firmware based watchdog timer.
"""
command = [ 0 ] * 16
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x05
command[3] = 0x09
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
if Write:
command[6] = 1
if ResetOnTimeout:
command[7] = (1 << 5)
if SetDIOStateOnTimeout:
command[7] |= (1 << 4)
t = struct.pack("<H", TimeoutPeriod)
command[8] = ord(t[0])
command[9] = ord(t[1])
command[10] = ((DIOState & 1 ) << 7)
command[10] |= (DIONumber & 0xf)
result = self._writeRead(command, 16, [ 0xF8, 0x05, 0x09])
watchdogStatus = {}
if result[7] == 0:
watchdogStatus['WatchDogEnabled'] = False
watchdogStatus['ResetOnTimeout'] = False
watchdogStatus['SetDIOStateOnTimeout'] = False
else:
watchdogStatus['WatchDogEnabled'] = True
if (( result[7] >> 5 ) & 1):
watchdogStatus['ResetOnTimeout'] = True
else:
watchdogStatus['ResetOnTimeout'] = False
if (( result[7] >> 4 ) & 1):
watchdogStatus['SetDIOStateOnTimeout'] = True
else:
watchdogStatus['SetDIOStateOnTimeout'] = False
watchdogStatus['TimeoutPeriod'] = struct.unpack('<H', struct.pack("BB", *result[8:10]))
if (( result[10] >> 7 ) & 1):
watchdogStatus['DIOState'] = 1
else:
watchdogStatus['DIOState'] = 0
watchdogStatus['DIONumber'] = ( result[10] & 15 )
return watchdogStatus
SPIModes = { 'A' : 0, 'B' : 1, 'C' : 2, 'D' : 3 }
def spi(self, SPIBytes, AutoCS=True, DisableDirConfig = False, SPIMode = 'A', SPIClockFactor = 0, CSPINNum = 0, CLKPinNum = 1, MISOPinNum = 2, MOSIPinNum = 3):
"""
Name: U6.spi(SPIBytes, AutoCS=True, DisableDirConfig = False,
SPIMode = 'A', SPIClockFactor = 0, CSPINNum = 0,
CLKPinNum = 1, MISOPinNum = 2, MOSIPinNum = 3)
Args: SPIBytes, A list of bytes to send.
AutoCS, If True, the CS line is automatically driven low
during the SPI communication and brought back high
when done.
DisableDirConfig, If True, function does not set the direction
of the line.
SPIMode, 'A', 'B', 'C', or 'D'.
SPIClockFactor, Sets the frequency of the SPI clock.
CSPINNum, which pin is CS
CLKPinNum, which pin is CLK
MISOPinNum, which pin is MISO
MOSIPinNum, which pin is MOSI
Desc: Sends and receives serial data using SPI synchronous
communication. See Section 5.2.17 of the user's guide.
"""
if not isinstance(SPIBytes, list):
raise LabJackException("SPIBytes MUST be a list of bytes")
numSPIBytes = len(SPIBytes)
oddPacket = False
if numSPIBytes%2 != 0:
SPIBytes.append(0)
numSPIBytes = numSPIBytes + 1
oddPacket = True
command = [ 0 ] * (13 + numSPIBytes)
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 4 + (numSPIBytes/2)
command[3] = 0x3A
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
if AutoCS:
command[6] |= (1 << 7)
if DisableDirConfig:
command[6] |= (1 << 6)
command[6] |= ( self.SPIModes[SPIMode] & 3 )
command[7] = SPIClockFactor
#command[8] = Reserved
command[9] = CSPINNum
command[10] = CLKPinNum
command[11] = MISOPinNum
command[12] = MOSIPinNum
command[13] = numSPIBytes
if oddPacket:
command[13] = numSPIBytes - 1
command[14:] = SPIBytes
result = self._writeRead(command, 8+numSPIBytes, [ 0xF8, 1+(numSPIBytes/2), 0x3A ])
return { 'NumSPIBytesTransferred' : result[7], 'SPIBytes' : result[8:] }
def asynchConfig(self, Update = True, UARTEnable = True, DesiredBaud = None, BaudFactor = 63036):
"""
Name: U6.asynchConfig(Update = True, UARTEnable = True,
DesiredBaud = None, BaudFactor = 63036)
Args: Update, If True, new values are written.
UARTEnable, If True, UART will be enabled.
DesiredBaud, If set, will apply the formualt to
calculate BaudFactor.
BaudFactor, = 2^16 - 48000000/(2 * Desired Baud). Ignored
if DesiredBaud is set.
Desc: Configures the U6 UART for asynchronous communication. See
section 5.2.18 of the User's Guide.
"""
if UARTEnable:
self.configIO(EnableUART = True)
command = [ 0 ] * 10
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x02
command[3] = 0x14
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
#commmand[6] = 0x00
if Update:
command[7] = (1 << 7)
if UARTEnable:
command[7] |= (1 << 6)
if DesiredBaud != None:
BaudFactor = (2**16) - 48000000/(2 * DesiredBaud)
t = struct.pack("<H", BaudFactor)
command[8] = ord(t[0])
command[9] = ord(t[1])
results = self._writeRead(command, 10, [0xF8, 0x02, 0x14])
if command[8] != results[8] and command[9] != results[9]:
raise LabJackException("BaudFactor didn't stick.")
def asynchTX(self, AsynchBytes):
"""
Name: U6.asynchTX(AsynchBytes)
Args: AsynchBytes, List of bytes to send
Desc: Sends bytes to the U6 UART which will be sent asynchronously
on the transmit line. Section 5.2.19 of the User's Guide.
"""
numBytes = len(AsynchBytes)
oddPacket = False
if numBytes%2 != 0:
oddPacket = True
AsynchBytes.append(0)
numBytes = numBytes + 1
command = [ 0 ] * (8+numBytes)
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 1 + (numBytes/2)
command[3] = 0x15
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
#commmand[6] = 0x00
command[7] = numBytes
if oddPacket:
command[7] = numBytes-1
command[8:] = AsynchBytes
result = self._writeRead(command, 10, [ 0xF8, 0x02, 0x15])
return { 'NumAsynchBytesSent' : result[7], 'NumAsynchBytesInRXBuffer' : result[8] }
def asynchRX(self, Flush = False):
"""
Name: U6.asynchTX(AsynchBytes)
Args: Flush, If True, empties the entire 256-byte RX buffer.
Desc: Sends bytes to the U6 UART which will be sent asynchronously
on the transmit line. Section 5.2.20 of the User's Guide.
"""
command = [ 0, 0xF8, 0x01, 0x16, 0, 0, 0, int(Flush)]
result = self._writeRead(command, 40, [ 0xF8, 0x11, 0x16 ])
return { 'NumAsynchBytesInRXBuffer' : result[7], 'AsynchBytes' : result[8:] }
def i2c(self, Address, I2CBytes, EnableClockStretching = False, NoStopWhenRestarting = False, ResetAtStart = False, SpeedAdjust = 0, SDAPinNum = 0, SCLPinNum = 1, NumI2CBytesToReceive = 0, AddressByte = None):
"""
Name: U6.i2c(Address, I2CBytes, EnableClockStretching = False, NoStopWhenRestarting = False, ResetAtStart = False, SpeedAdjust = 0, SDAPinNum = 0, SCLPinNum = 1, NumI2CBytesToReceive = 0, AddressByte = None)
Args: Address, the address (Not shifted over)
I2CBytes, a list of bytes to send
EnableClockStretching, True enables clock stretching
NoStopWhenRestarting, True means no stop sent when restarting
ResetAtStart, if True, an I2C bus reset will be done
before communicating.
SpeedAdjust, Allows the communication frequency to be reduced.
SDAPinNum, Which pin will be data
SCLPinNum, Which pin is clock
NumI2CBytesToReceive, Number of I2C bytes to expect back.
AddressByte, The address as you would put it in the lowlevel
packet. Overrides Address. Optional.
Desc: Sends and receives serial data using I2C synchronous
communication. Section 5.2.21 of the User's Guide.
"""
numBytes = len(I2CBytes)
oddPacket = False
if numBytes%2 != 0:
oddPacket = True
I2CBytes.append(0)
numBytes = numBytes+1
command = [ 0 ] * (14+numBytes)
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 4 + (numBytes/2)
command[3] = 0x3B
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
if EnableClockStretching:
command[6] |= (1 << 3)
if NoStopWhenRestarting:
command[6] |= (1 << 2)
if ResetAtStart:
command[6] |= (1 << 1)
command[7] = SpeedAdjust
command[8] = SDAPinNum
command[9] = SCLPinNum
if AddressByte != None:
command[10] = AddressByte
else:
command[10] = Address << 1
#command[11] = Reserved
command[12] = numBytes
if oddPacket:
command[12] = numBytes-1
command[13] = NumI2CBytesToReceive
command[14:] = I2CBytes
oddResponse = False
if NumI2CBytesToReceive%2 != 0:
NumI2CBytesToReceive = NumI2CBytesToReceive+1
oddResponse = True
result = self._writeRead(command, (12+NumI2CBytesToReceive), [0xF8, (3+(NumI2CBytesToReceive/2)), 0x3B])
if NumI2CBytesToReceive != 0:
return { 'AckArray' : result[8:12], 'I2CBytes' : result[12:] }
else:
return { 'AckArray' : result[8:12] }
def sht1x(self, DataPinNum = 0, ClockPinNum = 1, SHTOptions = 0xc0):
"""
Name: U6.sht1x(DataPinNum = 0, ClockPinNum = 1, SHTOptions = 0xc0)
Args: DataPinNum, Which pin is the Data line
ClockPinNum, Which line is the Clock line
SHTOptions (and proof people read documentation):
bit 7 = Read Temperature
bit 6 = Read Realtive Humidity
bit 2 = Heater. 1 = on, 0 = off
bit 1 = Reserved at 0
bit 0 = Resolution. 1 = 8 bit RH, 12 bit T; 0 = 12 RH, 14 bit T
Desc: Reads temperature and humidity from a Sensirion SHT1X sensor.
Section 5.2.22 of the User's Guide.
"""
command = [ 0 ] * 10
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x02
command[3] = 0x39
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
command[6] = DataPinNum
command[7] = ClockPinNum
#command[8] = Reserved
command[9] = SHTOptions
result = self._writeRead(command, 16, [ 0xF8, 0x05, 0x39])
val = (result[11]*256) + result[10]
temp = -39.60 + 0.01*val
val = (result[14]*256) + result[13]
humid = -4 + 0.0405*val + -.0000028*(val*val)
humid = (temp - 25)*(0.01 + 0.00008*val) + humid
return { 'StatusReg' : result[8], 'StatusCRC' : result[9], 'Temperature' : temp, 'TemperatureCRC' : result[12], 'Humidity' : humid, 'HumidityCRC' : result[15] }
# --------------------------- Old U6 code -------------------------------
def _readCalDataBlock(self, n):
"""
Internal routine to read the specified calibration block (0-2)
"""
sendBuffer = [0] * 8
sendBuffer[1] = 0xF8 # command byte
sendBuffer[2] = 0x01 # number of data words
sendBuffer[3] = 0x2D # extended command number
sendBuffer[6] = 0x00
sendBuffer[7] = n # Blocknum = 0
self.write(sendBuffer)
buff = self.read(40)
return buff[8:]
def getCalibrationData(self):
"""
Name: getCalibrationData(self)
Args: None
Desc: Gets the slopes and offsets for AIN and DACs,
as well as other calibration data
>>> myU6 = U6()
>>> myU6.getCalibrationData()
>>> myU6.calInfo
<ainDiffOffset: -2.46886488446,...>
"""
if self.debug is True:
print "Calibration data retrieval"
self.calInfo.nominal = False
#reading block 0 from memory
rcvBuffer = self._readCalDataBlock(0)
# Positive Channel calibration
self.calInfo.ain10vSlope = toDouble(rcvBuffer[:8])
self.calInfo.ain10vOffset = toDouble(rcvBuffer[8:16])
self.calInfo.ain1vSlope = toDouble(rcvBuffer[16:24])
self.calInfo.ain1vOffset = toDouble(rcvBuffer[24:])
#reading block 1 from memory
rcvBuffer = self._readCalDataBlock(1)
self.calInfo.ain100mvSlope = toDouble(rcvBuffer[:8])
self.calInfo.ain100mvOffset = toDouble(rcvBuffer[8:16])
self.calInfo.ain10mvSlope = toDouble(rcvBuffer[16:24])
self.calInfo.ain10mvOffset = toDouble(rcvBuffer[24:])
self.calInfo.ainSlope = [self.calInfo.ain10vSlope, self.calInfo.ain1vSlope, self.calInfo.ain100mvSlope, self.calInfo.ain10mvSlope]
self.calInfo.ainOffset = [ self.calInfo.ain10vOffset, self.calInfo.ain1vOffset, self.calInfo.ain100mvOffset, self.calInfo.ain10mvOffset ]
#reading block 2 from memory
rcvBuffer = self._readCalDataBlock(2)
# Negative Channel calibration
self.calInfo.ain10vNegSlope = toDouble(rcvBuffer[:8])
self.calInfo.ain10vCenter = toDouble(rcvBuffer[8:16])
self.calInfo.ain1vNegSlope = toDouble(rcvBuffer[16:24])
self.calInfo.ain1vCenter = toDouble(rcvBuffer[24:])
#reading block 3 from memory
rcvBuffer = self._readCalDataBlock(3)
self.calInfo.ain100mvNegSlope = toDouble(rcvBuffer[:8])
self.calInfo.ain100mvCenter = toDouble(rcvBuffer[8:16])
self.calInfo.ain10mvNegSlope = toDouble(rcvBuffer[16:24])
self.calInfo.ain10mvCenter = toDouble(rcvBuffer[24:])
self.calInfo.ainNegSlope = [ self.calInfo.ain10vNegSlope, self.calInfo.ain1vNegSlope, self.calInfo.ain100mvNegSlope, self.calInfo.ain10mvNegSlope ]
self.calInfo.ainCenter = [ self.calInfo.ain10vCenter, self.calInfo.ain1vCenter, self.calInfo.ain100mvCenter, self.calInfo.ain10mvCenter ]
#reading block 4 from memory
rcvBuffer = self._readCalDataBlock(4)
# Miscellaneous
self.calInfo.dac0Slope = toDouble(rcvBuffer[:8])
self.calInfo.dac0Offset = toDouble(rcvBuffer[8:16])
self.calInfo.dac1Slope = toDouble(rcvBuffer[16:24])
self.calInfo.dac1Offset = toDouble(rcvBuffer[24:])
#reading block 5 from memory
rcvBuffer = self._readCalDataBlock(5)
self.calInfo.currentOutput0 = toDouble(rcvBuffer[:8])
self.calInfo.currentOutput1 = toDouble(rcvBuffer[8:16])
self.calInfo.temperatureSlope = toDouble(rcvBuffer[16:24])
self.calInfo.temperatureOffset = toDouble(rcvBuffer[24:])
if self.productName == "U6-Pro":
# Hi-Res ADC stuff
#reading block 6 from memory
rcvBuffer = self._readCalDataBlock(6)
# Positive Channel calibration
self.calInfo.proAin10vSlope = toDouble(rcvBuffer[:8])
self.calInfo.proAin10vOffset = toDouble(rcvBuffer[8:16])
self.calInfo.proAin1vSlope = toDouble(rcvBuffer[16:24])
self.calInfo.proAin1vOffset = toDouble(rcvBuffer[24:])
#reading block 7 from memory
rcvBuffer = self._readCalDataBlock(7)
self.calInfo.proAin100mvSlope = toDouble(rcvBuffer[:8])
self.calInfo.proAin100mvOffset = toDouble(rcvBuffer[8:16])
self.calInfo.proAin10mvSlope = toDouble(rcvBuffer[16:24])
self.calInfo.proAin10mvOffset = toDouble(rcvBuffer[24:])
self.calInfo.proAinSlope = [self.calInfo.proAin10vSlope, self.calInfo.proAin1vSlope, self.calInfo.proAin100mvSlope, self.calInfo.proAin10mvSlope]
self.calInfo.proAinOffset = [ self.calInfo.proAin10vOffset, self.calInfo.proAin1vOffset, self.calInfo.proAin100mvOffset, self.calInfo.proAin10mvOffset ]
#reading block 8 from memory
rcvBuffer = self._readCalDataBlock(8)
# Negative Channel calibration
self.calInfo.proAin10vNegSlope = toDouble(rcvBuffer[:8])
self.calInfo.proAin10vCenter = toDouble(rcvBuffer[8:16])
self.calInfo.proAin1vNegSlope = toDouble(rcvBuffer[16:24])
self.calInfo.proAin1vCenter = toDouble(rcvBuffer[24:])
#reading block 9 from memory
rcvBuffer = self._readCalDataBlock(9)
self.calInfo.proAin100mvNegSlope = toDouble(rcvBuffer[:8])
self.calInfo.proAin100mvCenter = toDouble(rcvBuffer[8:16])
self.calInfo.proAin10mvNegSlope = toDouble(rcvBuffer[16:24])
self.calInfo.proAin10mvCenter = toDouble(rcvBuffer[24:])
self.calInfo.proAinNegSlope = [ self.calInfo.proAin10vNegSlope, self.calInfo.proAin1vNegSlope, self.calInfo.proAin100mvNegSlope, self.calInfo.proAin10mvNegSlope ]
self.calInfo.proAinCenter = [ self.calInfo.proAin10vCenter, self.calInfo.proAin1vCenter, self.calInfo.proAin100mvCenter, self.calInfo.proAin10mvCenter ]
def binaryToCalibratedAnalogVoltage(self, gainIndex, bytesVoltage, is16Bits=False):
"""
Name: binaryToCalibratedAnalogVoltage(gainIndex, bytesVoltage, is16Bits = False)
Args: gainIndex, which gain did you use?
bytesVoltage, bytes returned from the U6
is16bits, set to True if bytesVolotage is 16 bits (not 24)
Desc: Converts binary voltage to an analog value.
"""
if not is16Bits:
bits = float(bytesVoltage)/256
else:
bits = float(bytesVoltage)
center = self.calInfo.ainCenter[gainIndex]
negSlope = self.calInfo.ainNegSlope[gainIndex]
posSlope = self.calInfo.ainSlope[gainIndex]
if self.productName == "U6-Pro":
center = self.calInfo.proAinCenter[gainIndex]
negSlope = self.calInfo.proAinNegSlope[gainIndex]
posSlope = self.calInfo.proAinSlope[gainIndex]
if bits < center:
return (center - bits) * negSlope
else:
return (bits - center) * posSlope
def binaryToCalibratedAnalogTemperature(self, bytesTemperature):
voltage = self.binaryToCalibratedAnalogVoltage(0, bytesTemperature)
return self.calInfo.temperatureSlope * float(voltage) + self.calInfo.temperatureOffset
def softReset(self):
"""
Name: softReset
Args: none
Desc: Send a soft reset.
>>> myU6 = U6()
>>> myU6.softReset()
"""
command = [ 0x00, 0x99, 0x01, 0x00 ]
command = setChecksum8(command, 4)
self.write(command, False, False)
results = self.read(4)
if results[3] != 0:
raise LowlevelErrorException(results[3], "The softReset command returned an error:\n %s" % lowlevelErrorToString(results[3]))
def hardReset(self):
"""
Name: hardReset
Args: none
Desc: Send a hard reset.
>>> myU6 = U6()
>>> myU6.hardReset()
"""
command = [ 0x00, 0x99, 0x02, 0x00 ]
command = setChecksum8(command, 4)
self.write(command, False, False)
results = self.read(4)
if results[3] != 0:
raise LowlevelErrorException(results[3], "The softHard command returned an error:\n %s" % lowlevelErrorToString(results[3]))
self.close()
def setLED(self, state):
"""
Name: setLED(self, state)
Args: state: 1 = On, 0 = Off
Desc: Sets the state of the LED. (5.2.5.4 of user's guide)
>>> myU6 = U6()
>>> myU6.setLED(0)
... (LED turns off) ...
"""
self.getFeedback(LED(state))
def getTemperature(self):
"""
Name: getTemperature
Args: none
Desc: Reads the U6's internal temperature sensor in Kelvin.
See Section 2.6.4 of the U6 User's Guide.
>>> myU6.getTemperature()
299.87723471224308
"""
if self.calInfo.nominal:
# Read the actual calibration constants if we haven't already.
self.getCalibrationData()
result = self.getFeedback(AIN24AR(14))
return self.binaryToCalibratedAnalogTemperature(result[0]['AIN'])
def getAIN(self, positiveChannel, resolutionIndex = 0, gainIndex = 0, settlingFactor = 0, differential = False):
"""
Name: getAIN
Args: positiveChannel, resolutionIndex = 0, gainIndex = 0, settlingFactor = 0, differential = False
Desc: Reads an AIN and applies the calibration constants to it.
>>> myU6.getAIN(14)
299.87723471224308
"""
result = self.getFeedback(AIN24AR(positiveChannel, resolutionIndex, gainIndex, settlingFactor, differential))
return self.binaryToCalibratedAnalogVoltage(result[0]['GainIndex'], result[0]['AIN'])
def readDefaultsConfig(self):
"""
Name: U6.readDefaultsConfig( )
Args: None
Desc: Reads the power-up defaults stored in flash.
"""
results = dict()
defaults = self.readDefaults(0)
results['FIODirection'] = defaults[4]
results['FIOState'] = defaults[5]
results['EIODirection'] = defaults[8]
results['EIOState'] = defaults[9]
results['CIODirection'] = defaults[12]
results['CIOState'] = defaults[13]
results['ConfigWriteMask'] = defaults[16]
results['NumOfTimersEnable'] = defaults[17]
results['CounterMask'] = defaults[18]
results['PinOffset'] = defaults[19]
defaults = self.readDefaults(1)
results['ClockSource'] = defaults[0]
results['Divisor'] = defaults[1]
results['TMR0Mode'] = defaults[16]
results['TMR0ValueL'] = defaults[17]
results['TMR0ValueH'] = defaults[18]
results['TMR1Mode'] = defaults[20]
results['TMR1ValueL'] = defaults[21]
results['TMR1ValueH'] = defaults[22]
results['TMR2Mode'] = defaults[24]
results['TMR2ValueL'] = defaults[25]
results['TMR2ValueH'] = defaults[26]
results['TMR3Mode'] = defaults[28]
results['TMR3ValueL'] = defaults[29]
results['TMR3ValueH'] = defaults[30]
defaults = self.readDefaults(2)
results['DAC0'] = struct.unpack( ">H", struct.pack("BB", *defaults[16:18]) )[0]
results['DAC1'] = struct.unpack( ">H", struct.pack("BB", *defaults[20:22]) )[0]
defaults = self.readDefaults(3)
for i in range(14):
results["AIN%sGainRes" % i] = defaults[i]
results["AIN%sOptions" % i] = defaults[i+16]
return results
def exportConfig(self):
"""
Name: U6.exportConfig( )
Args: None
Desc: Takes a configuration and puts it into a ConfigParser object.
"""
# Make a new configuration file
parser = ConfigParser.SafeConfigParser()
# Change optionxform so that options preserve their case.
parser.optionxform = str
# Local Id and name
section = "Identifiers"
parser.add_section(section)
parser.set(section, "Local ID", str(self.localId))
parser.set(section, "Name", str(self.getName()))
parser.set(section, "Device Type", str(self.devType))
# FIO Direction / State
section = "FIOs"
parser.add_section(section)
dirs, states = self.getFeedback( PortDirRead(), PortStateRead() )
for key, value in dirs.items():
parser.set(section, "%s Directions" % key, str(value))
for key, value in states.items():
parser.set(section, "%s States" % key, str(value))
# DACs
section = "DACs"
parser.add_section(section)
dac0 = self.readRegister(5000)
dac0 = max(dac0, 0)
dac0 = min(dac0, 5)
parser.set(section, "DAC0", "%0.2f" % dac0)
dac1 = self.readRegister(5002)
dac1 = max(dac1, 0)
dac1 = min(dac1, 5)
parser.set(section, "DAC1", "%0.2f" % dac1)
# Timer Clock Configuration
section = "Timer Clock Speed Configuration"
parser.add_section(section)
timerclockconfig = self.configTimerClock()
for key, value in timerclockconfig.items():
parser.set(section, key, str(value))
# Timers / Counters
section = "Timers And Counters"
parser.add_section(section)
ioconfig = self.configIO()
for key, value in ioconfig.items():
parser.set(section, key, str(value))
for i in range(ioconfig['NumberTimersEnabled']):
mode, value = self.readRegister(7100 + (2 * i), numReg = 2, format = ">HH")
parser.set(section, "Timer%s Mode" % i, str(mode))
parser.set(section, "Timer%s Value" % i, str(value))
return parser
def loadConfig(self, configParserObj):
"""
Name: U6.loadConfig( configParserObj )
Args: configParserObj, A Config Parser object to load in
Desc: Takes a configuration and updates the U6 to match it.
"""
parser = configParserObj
# Set Identifiers:
section = "Identifiers"
if parser.has_section(section):
if parser.has_option(section, "device type"):
if parser.getint(section, "device type") != self.devType:
raise Exception("Not a U6 Config file.")
if parser.has_option(section, "local id"):
self.configU6( LocalID = parser.getint(section, "local id"))
if parser.has_option(section, "name"):
self.setName( parser.get(section, "name") )
# Set FIOs:
section = "FIOs"
if parser.has_section(section):
fiodirs = 0
eiodirs = 0
ciodirs = 0
fiostates = 0
eiostates = 0
ciostates = 0
if parser.has_option(section, "fios directions"):
fiodirs = parser.getint(section, "fios directions")
if parser.has_option(section, "eios directions"):
eiodirs = parser.getint(section, "eios directions")
if parser.has_option(section, "cios directions"):
ciodirs = parser.getint(section, "cios directions")
if parser.has_option(section, "fios states"):
fiostates = parser.getint(section, "fios states")
if parser.has_option(section, "eios states"):
eiostates = parser.getint(section, "eios states")
if parser.has_option(section, "cios states"):
ciostates = parser.getint(section, "cios states")
self.getFeedback( PortStateWrite([fiostates, eiostates, ciostates]), PortDirWrite([fiodirs, eiodirs, ciodirs]) )
# Set DACs:
section = "DACs"
if parser.has_section(section):
if parser.has_option(section, "dac0"):
self.writeRegister(5000, parser.getfloat(section, "dac0"))
if parser.has_option(section, "dac1"):
self.writeRegister(5002, parser.getfloat(section, "dac1"))
# Set Timer Clock Configuration
section = "Timer Clock Speed Configuration"
if parser.has_section(section):
if parser.has_option(section, "timerclockbase") and parser.has_option(section, "timerclockdivisor"):
self.configTimerClock(TimerClockBase = parser.getint(section, "timerclockbase"), TimerClockDivisor = parser.getint(section, "timerclockdivisor"))
# Set Timers / Counters
section = "Timers And Counters"
if parser.has_section(section):
nte = None
c0e = None
c1e = None
cpo = None
if parser.has_option(section, "NumberTimersEnabled"):
nte = parser.getint(section, "NumberTimersEnabled")
if parser.has_option(section, "TimerCounterPinOffset"):
cpo = parser.getint(section, "TimerCounterPinOffset")
if parser.has_option(section, "Counter0Enabled"):
c0e = parser.getboolean(section, "Counter0Enabled")
if parser.has_option(section, "Counter1Enabled"):
c1e = parser.getboolean(section, "Counter1Enabled")
self.configIO(NumberTimersEnabled = nte, EnableCounter1 = c1e, EnableCounter0 = c0e, TimerCounterPinOffset = cpo)
mode = None
value = None
for i in range(4):
if parser.has_option(section, "timer%i mode" % i):
mode = parser.getint(section, "timer%i mode" % i)
if parser.has_option(section, "timer%i value" % i):
value = parser.getint(section, "timer%i value" % i)
self.getFeedback( TimerConfig(i, mode, value) )
class FeedbackCommand(object):
'''
The base FeedbackCommand class
Used to make Feedback easy. Make a list of these
and call getFeedback.
'''
readLen = 0
def handle(self, input):
return None
validChannels = range(144)
class AIN(FeedbackCommand):
'''
Analog Input Feedback command
AIN(PositiveChannel)
PositiveChannel : the positive channel to use
NOTE: This function kept for compatibility. Please use
the new AIN24 and AIN24AR.
returns 16-bit unsigned int sample
>>> d.getFeedback( u6.AIN( PositiveChannel ) )
[ 19238 ]
'''
def __init__(self, PositiveChannel):
if PositiveChannel not in validChannels:
raise LabJackException("Invalid Positive Channel specified")
self.positiveChannel = PositiveChannel
self.cmdBytes = [ 0x01, PositiveChannel, 0 ]
readLen = 2
def __repr__(self):
return "<u6.AIN( PositiveChannel = %s )>" % self.positiveChannel
def handle(self, input):
result = (input[1] << 8) + input[0]
return result
class AIN24(FeedbackCommand):
'''
Analog Input 24-bit Feedback command
ainCommand = AIN24(PositiveChannel, ResolutionIndex = 0, GainIndex = 0, SettlingFactor = 0, Differential = False)
See section 5.2.5.2 of the user's guide.
NOTE: If you use a gain index of 15 (autorange), you should be using
the AIN24AR command instead.
positiveChannel : The positive channel to use
resolutionIndex : 0=default, 1-8 for high-speed ADC,
9-12 for high-res ADC on U6-Pro.
gainIndex : 0=x1, 1=x10, 2=x100, 3=x1000, 15=autorange
settlingFactor : 0=5us, 1=10us, 2=100us, 3=1ms, 4=10ms
differential : If this bit is set, a differential reading is done where
the negative channel is positiveChannel+1
returns 24-bit unsigned int sample
>>> d.getFeedback( u6.AIN24(PositiveChannel, ResolutionIndex = 0,
GainIndex = 0, SettlingFactor = 0,
Differential = False ) )
[ 193847 ]
'''
def __init__(self, PositiveChannel, ResolutionIndex = 0, GainIndex = 0, SettlingFactor = 0, Differential = False):
if PositiveChannel not in validChannels:
raise LabJackException("Invalid Positive Channel specified")
self.positiveChannel = PositiveChannel
self.resolutionIndex = ResolutionIndex
self.gainIndex = GainIndex
self.settlingFactor = SettlingFactor
self.differential = Differential
byte2 = ( ResolutionIndex & 0xf )
byte2 = ( ( GainIndex & 0xf ) << 4 ) + byte2
byte3 = (int(Differential) << 7) + SettlingFactor
self.cmdBytes = [ 0x02, PositiveChannel, byte2, byte3 ]
def __repr__(self):
return "<u6.AIN24( PositiveChannel = %s, ResolutionIndex = %s, GainIndex = %s, SettlingFactor = %s, Differential = %s )>" % (self.positiveChannel, self.resolutionIndex, self.gainIndex, self.settlingFactor, self.differential)
readLen = 3
def handle(self, input):
#Put it all into an integer.
result = (input[2] << 16 ) + (input[1] << 8 ) + input[0]
return result
class AIN24AR(FeedbackCommand):
'''
Autorange Analog Input 24-bit Feedback command
ainARCommand = AIN24AR(0, ResolutionIndex = 0, GainIndex = 0, SettlingFactor = 0, Differential = False)
See section 5.2.5.3 of the user's guide
PositiveChannel : The positive channel to use
ResolutionIndex : 0=default, 1-8 for high-speed ADC,
9-13 for high-res ADC on U6-Pro.
GainIndex : 0=x1, 1=x10, 2=x100, 3=x1000, 15=autorange
SettlingFactor : 0=5us, 1=10us, 2=100us, 3=1ms, 4=10ms
Differential : If this bit is set, a differential reading is done where
the negative channel is positiveChannel+1
returns a dictionary:
{
'AIN' : < 24-bit binary reading >,
'ResolutionIndex' : < actual resolution setting used for the reading >,
'GainIndex' : < actual gain used for the reading >,
'Status' : < reserved for future use >
}
>>> d.getFeedback( u6.AIN24AR( PositiveChannel, ResolutionIndex = 0,
GainIndex = 0, SettlingFactor = 0,
Differential = False ) )
{ 'AIN' : 193847, 'ResolutionIndex' : 0, 'GainIndex' : 0, 'Status' : 0 }
'''
def __init__(self, PositiveChannel, ResolutionIndex = 0, GainIndex = 0, SettlingFactor = 0, Differential = False):
if PositiveChannel not in validChannels:
raise LabJackException("Invalid Positive Channel specified")
self.positiveChannel = PositiveChannel
self.resolutionIndex = ResolutionIndex
self.gainIndex = GainIndex
self.settlingFactor = SettlingFactor
self.differential = Differential
byte2 = ( ResolutionIndex & 0xf )
byte2 = ( ( GainIndex & 0xf ) << 4 ) + byte2
byte3 = (int(Differential) << 7) + SettlingFactor
self.cmdBytes = [ 0x03, PositiveChannel, byte2, byte3 ]
def __repr__(self):
return "<u6.AIN24AR( PositiveChannel = %s, ResolutionIndex = %s, GainIndex = %s, SettlingFactor = %s, Differential = %s )>" % (self.positiveChannel, self.resolutionIndex, self.gainIndex, self.settlingFactor, self.differential)
readLen = 5
def handle(self, input):
#Put it all into an integer.
result = (input[2] << 16 ) + (input[1] << 8 ) + input[0]
resolutionIndex = input[3] & 0xf
gainIndex = ( input[3] >> 4 ) & 0xf
status = input[4]
return { 'AIN' : result, 'ResolutionIndex' : resolutionIndex, 'GainIndex' : gainIndex, 'Status' : status }
class WaitShort(FeedbackCommand):
'''
WaitShort Feedback command
specify the number of 128us time increments to wait
>>> d.getFeedback( u6.WaitShort( Time ) )
[ None ]
'''
def __init__(self, Time):
self.time = Time % 256
self.cmdBytes = [ 5, Time % 256 ]
def __repr__(self):
return "<u6.WaitShort( Time = %s )>" % self.time
class WaitLong(FeedbackCommand):
'''
WaitLong Feedback command
specify the number of 32ms time increments to wait
>>> d.getFeedback( u6.WaitLog( Time ) )
[ None ]
'''
def __init__(self, Time):
self.time = Time
self.cmdBytes = [ 6, Time % 256 ]
def __repr__(self):
return "<u6.WaitLog( Time = %s )>" % self.time
class LED(FeedbackCommand):
'''
LED Toggle
specify whether the LED should be on or off by truth value
1 or True = On, 0 or False = Off
>>> d.getFeedback( u6.LED( State ) )
[ None ]
'''
def __init__(self, State):
self.state = State
self.cmdBytes = [ 9, int(bool(State)) ]
def __repr__(self):
return "<u6.LED( State = %s )>" % self.state
class BitStateRead(FeedbackCommand):
'''
BitStateRead Feedback command
read the state of a single bit of digital I/O. Only digital
lines return valid readings.
IONumber: 0-7=FIO, 8-15=EIO, 16-19=CIO
return 0 or 1
>>> d.getFeedback( u6.BitStateRead( IONumber ) )
[ 1 ]
'''
def __init__(self, IONumber):
self.ioNumber = IONumber
self.cmdBytes = [ 10, IONumber % 20 ]
def __repr__(self):
return "<u6.BitStateRead( IONumber = %s )>" % self.ioNumber
readLen = 1
def handle(self, input):
return int(bool(input[0]))
class BitStateWrite(FeedbackCommand):
'''
BitStateWrite Feedback command
write a single bit of digital I/O. The direction of the
specified line is forced to output.
IONumber: 0-7=FIO, 8-15=EIO, 16-19=CIO
State: 0 or 1
>>> d.getFeedback( u6.BitStateWrite( IONumber, State ) )
[ None ]
'''
def __init__(self, IONumber, State):
self.ioNumber = IONumber
self.state = State
self.cmdBytes = [ 11, (IONumber % 20) + (int(bool(State)) << 7) ]
def __repr__(self):
return "<u6.BitStateWrite( IONumber = %s, State = %s )>" % self.ioNumber
class BitDirRead(FeedbackCommand):
'''
Read the digital direction of one I/O
IONumber: 0-7=FIO, 8-15=EIO, 16-19=CIO
returns 1 = Output, 0 = Input
>>> d.getFeedback( u6.BitDirRead( IONumber ) )
[ 1 ]
'''
def __init__(self, IONumber):
self.ioNumber = IONumber
self.cmdBytes = [ 12, IONumber % 20 ]
def __repr__(self):
return "<u6.BitDirRead( IONumber = %s )>" % self.ioNumber
readLen = 1
def handle(self, input):
return int(bool(input[0]))
class BitDirWrite(FeedbackCommand):
'''
BitDirWrite Feedback command
Set the digital direction of one I/O
IONumber: 0-7=FIO, 8-15=EIO, 16-19=CIO
Direction: 1 = Output, 0 = Input
>>> d.getFeedback( u6.BitDirWrite( IONumber, Direction ) )
[ None ]
'''
def __init__(self, IONumber, Direction):
self.ioNumber = IONumber
self.direction = Direction
self.cmdBytes = [ 13, (IONumber % 20) + (int(bool(Direction)) << 7) ]
def __repr__(self):
return "<u6.BitDirWrite( IONumber = %s, Direction = %s )>" % (self.ioNumber, self.direction)
class PortStateRead(FeedbackCommand):
"""
PortStateRead Feedback command
Reads the state of all digital I/O.
>>> d.getFeedback( u6.PortStateRead() )
[ { 'FIO' : 10, 'EIO' : 0, 'CIO' : 0 } ]
"""
def __init__(self):
self.cmdBytes = [ 26 ]
def __repr__(self):
return "<u6.PortStateRead()>"
readLen = 3
def handle(self, input):
return {'FIO' : input[0], 'EIO' : input[1], 'CIO' : input[2] }
class PortStateWrite(FeedbackCommand):
"""
PortStateWrite Feedback command
State: A list of 3 bytes representing FIO, EIO, CIO
WriteMask: A list of 3 bytes, representing which to update.
The Default is all ones.
>>> d.getFeedback( u6.PortStateWrite( State,
WriteMask = [ 0xff, 0xff, 0xff] ) )
[ None ]
"""
def __init__(self, State, WriteMask = [ 0xff, 0xff, 0xff]):
self.state = State
self.writeMask = WriteMask
self.cmdBytes = [ 27 ] + WriteMask + State
def __repr__(self):
return "<u6.PortStateWrite( State = %s, WriteMask = %s )>" % (self.state, self.writeMask)
class PortDirRead(FeedbackCommand):
"""
PortDirRead Feedback command
Reads the direction of all digital I/O.
>>> d.getFeedback( u6.PortDirRead() )
[ { 'FIO' : 10, 'EIO' : 0, 'CIO' : 0 } ]
"""
def __init__(self):
self.cmdBytes = [ 28 ]
def __repr__(self):
return "<u6.PortDirRead()>"
readLen = 3
def handle(self, input):
return {'FIO' : input[0], 'EIO' : input[1], 'CIO' : input[2] }
class PortDirWrite(FeedbackCommand):
"""
PortDirWrite Feedback command
Direction: A list of 3 bytes representing FIO, EIO, CIO
WriteMask: A list of 3 bytes, representing which to update. Default is all ones.
>>> d.getFeedback( u6.PortDirWrite( Direction,
WriteMask = [ 0xff, 0xff, 0xff] ) )
[ None ]
"""
def __init__(self, Direction, WriteMask = [ 0xff, 0xff, 0xff]):
self.direction = Direction
self.writeMask = WriteMask
self.cmdBytes = [ 29 ] + WriteMask + Direction
def __repr__(self):
return "<u6.PortDirWrite( Direction = %s, WriteMask = %s )>" % (self.direction, self.writeMask)
class DAC8(FeedbackCommand):
'''
8-bit DAC Feedback command
Controls a single analog output
Dac: 0 or 1
Value: 0-255
>>> d.getFeedback( u6.DAC8( Dac, Value ) )
[ None ]
'''
def __init__(self, Dac, Value):
self.dac = Dac
self.value = Value % 256
self.cmdBytes = [ 34 + (Dac % 2), Value % 256 ]
def __repr__(self):
return "<u6.DAC8( Dac = %s, Value = %s )>" % (self.dac, self.value)
class DAC0_8(DAC8):
"""
8-bit DAC Feedback command for DAC0
Controls DAC0 in 8-bit mode.
Value: 0-255
>>> d.getFeedback( u6.DAC0_8( Value ) )
[ None ]
"""
def __init__(self, Value):
DAC8.__init__(self, 0, Value)
def __repr__(self):
return "<u6.DAC0_8( Value = %s )>" % self.value
class DAC1_8(DAC8):
"""
8-bit DAC Feedback command for DAC1
Controls DAC1 in 8-bit mode.
Value: 0-255
>>> d.getFeedback( u6.DAC1_8( Value ) )
[ None ]
"""
def __init__(self, Value):
DAC8.__init__(self, 1, Value)
def __repr__(self):
return "<u6.DAC1_8( Value = %s )>" % self.value
class DAC16(FeedbackCommand):
'''
16-bit DAC Feedback command
Controls a single analog output
Dac: 0 or 1
Value: 0-65535
>>> d.getFeedback( u6.DAC16( Dac, Value ) )
[ None ]
'''
def __init__(self, Dac, Value):
self.dac = Dac
self.value = Value
self.cmdBytes = [ 38 + (Dac % 2), Value % 256, Value >> 8 ]
def __repr__(self):
return "<u6.DAC8( Dac = %s, Value = %s )>" % (self.dac, self.value)
class DAC0_16(DAC16):
"""
16-bit DAC Feedback command for DAC0
Controls DAC0 in 16-bit mode.
Value: 0-65535
>>> d.getFeedback( u6.DAC0_16( Value ) )
[ None ]
"""
def __init__(self, Value):
DAC16.__init__(self, 0, Value)
def __repr__(self):
return "<u6.DAC0_16( Value = %s )>" % self.value
class DAC1_16(DAC16):
"""
16-bit DAC Feedback command for DAC1
Controls DAC1 in 16-bit mode.
Value: 0-65535
>>> d.getFeedback( u6.DAC1_16( Value ) )
[ None ]
"""
def __init__(self, Value):
DAC16.__init__(self, 1, Value)
def __repr__(self):
return "<u6.DAC1_16( Value = %s )>" % self.value
class Timer(FeedbackCommand):
"""
For reading the value of the Timer. It provides the ability to update/reset
a given timer, and read the timer value.
( Section 5.2.5.17 of the User's Guide)
timer: Either 0 or 1 for counter0 or counter1
UpdateReset: Set True if you want to update the value
Value: Only updated if the UpdateReset bit is 1. The meaning of this
parameter varies with the timer mode.
Mode: Set to the timer mode to handle any special processing. See classes
QuadratureInputTimer and TimerStopInput1.
Returns an unsigned integer of the timer value, unless Mode has been
specified and there are special return values. See Section 2.9.1 for
expected return values.
>>> d.getFeedback( u6.Timer( timer, UpdateReset = False, Value = 0 \
... , Mode = None ) )
[ 12314 ]
"""
def __init__(self, timer, UpdateReset = False, Value=0, Mode = None):
if timer != 0 and timer != 1:
raise LabJackException("Timer should be either 0 or 1.")
if UpdateReset and Value == None:
raise LabJackException("UpdateReset set but no value.")
self.timer = timer
self.updateReset = UpdateReset
self.value = Value
self.mode = Mode
self.cmdBytes = [ (42 + (2*timer)), UpdateReset, Value % 256, Value >> 8 ]
readLen = 4
def __repr__(self):
return "<u6.Timer( timer = %s, UpdateReset = %s, Value = %s, Mode = %s )>" % (self.timer, self.updateReset, self.value, self.mode)
def handle(self, input):
inStr = struct.pack('B' * len(input), *input)
if self.mode == 8:
return struct.unpack('<i', inStr )[0]
elif self.mode == 9:
maxCount, current = struct.unpack('<HH', inStr )
return current, maxCount
else:
return struct.unpack('<I', inStr )[0]
class Timer0(Timer):
"""
For reading the value of the Timer0. It provides the ability to
update/reset Timer0, and read the timer value.
( Section 5.2.5.17 of the User's Guide)
UpdateReset: Set True if you want to update the value
Value: Only updated if the UpdateReset bit is 1. The meaning of this
parameter varies with the timer mode.
Mode: Set to the timer mode to handle any special processing. See classes
QuadratureInputTimer and TimerStopInput1.
>>> d.getFeedback( u6.Timer0( UpdateReset = False, Value = 0, \
... Mode = None ) )
[ 12314 ]
"""
def __init__(self, UpdateReset = False, Value = 0, Mode = None):
Timer.__init__(self, 0, UpdateReset, Value, Mode)
def __repr__(self):
return "<u6.Timer0( UpdateReset = %s, Value = %s, Mode = %s )>" % (self.updateReset, self.value, self.mode)
class Timer1(Timer):
"""
For reading the value of the Timer1. It provides the ability to
update/reset Timer1, and read the timer value.
( Section 5.2.5.17 of the User's Guide)
UpdateReset: Set True if you want to update the value
Value: Only updated if the UpdateReset bit is 1. The meaning of this
parameter varies with the timer mode.
Mode: Set to the timer mode to handle any special processing. See classes
QuadratureInputTimer and TimerStopInput1.
>>> d.getFeedback( u6.Timer1( UpdateReset = False, Value = 0, \
... Mode = None ) )
[ 12314 ]
"""
def __init__(self, UpdateReset = False, Value = 0, Mode = None):
Timer.__init__(self, 1, UpdateReset, Value, Mode)
def __repr__(self):
return "<u6.Timer1( UpdateReset = %s, Value = %s, Mode = %s )>" % (self.updateReset, self.value, self.mode)
class QuadratureInputTimer(Timer):
"""
For reading Quadrature input timers. They are special because their values
are signed.
( Section 2.9.1.8 of the User's Guide)
Args:
UpdateReset: Set True if you want to reset the counter.
Value: Set to 0, and UpdateReset to True to reset the counter.
Returns a signed integer.
>>> # Setup the two timers to be quadrature
>>> d.getFeedback( u6.Timer0Config( 8 ), u6.Timer1Config( 8 ) )
[None, None]
>>> # Read the value
>>> d.getFeedback( u6.QuadratureInputTimer() )
[-21]
"""
def __init__(self, UpdateReset = False, Value = 0):
Timer.__init__(self, 0, UpdateReset, Value, Mode = 8)
def __repr__(self):
return "<u6.QuadratureInputTimer( UpdateReset = %s, Value = %s )>" % (self.updateReset, self.value)
class TimerStopInput1(Timer1):
"""
For reading a stop input timer. They are special because the value returns
the current edge count and the stop value.
( Section 2.9.1.9 of the User's Guide)
Args:
UpdateReset: Set True if you want to update the value.
Value: The stop value. Only updated if the UpdateReset bit is 1.
Returns a tuple where the first value is current edge count, and the second
value is the stop value.
>>> # Setup the timer to be Stop Input
>>> d.getFeedback( u6.Timer0Config( 9, Value = 30 ) )
[None]
>>> # Read the timer
>>> d.getFeedback( u6.TimerStopInput1() )
[(0, 30)]
"""
def __init__(self, UpdateReset = False, Value = 0):
Timer.__init__(self, 1, UpdateReset, Value, Mode = 9)
def __repr__(self):
return "<u6.TimerStopInput1( UpdateReset = %s, Value = %s )>" % (self.updateReset, self.value)
class TimerConfig(FeedbackCommand):
"""
This IOType configures a particular timer.
timer = # of the timer to configure
TimerMode = See Section 2.9 for more information about the available modes.
Value = The meaning of this parameter varies with the timer mode.
>>> d.getFeedback( u6.TimerConfig( timer, TimerMode, Value = 0 ) )
[ None ]
"""
def __init__(self, timer, TimerMode, Value=0):
'''Creates command bytes for configureing a Timer'''
#Conditions come from pages 33-34 of user's guide
if timer not in range(4):
raise LabJackException("Timer should be either 0-3.")
if TimerMode > 13 or TimerMode < 0:
raise LabJackException("Invalid Timer Mode.")
self.timer = timer
self.timerMode = TimerMode
self.value = Value
self.cmdBytes = [43 + (timer * 2), TimerMode, Value % 256, Value >> 8]
def __repr__(self):
return "<u6.TimerConfig( timer = %s, TimerMode = %s, Value = %s )>" % (self.timer, self.timerMode, self.value)
class Timer0Config(TimerConfig):
"""
This IOType configures Timer0.
TimerMode = See Section 2.9 for more information about the available modes.
Value = The meaning of this parameter varies with the timer mode.
>>> d.getFeedback( u6.Timer0Config( TimerMode, Value = 0 ) )
[ None ]
"""
def __init__(self, TimerMode, Value = 0):
TimerConfig.__init__(self, 0, TimerMode, Value)
def __repr__(self):
return "<u6.Timer0Config( TimerMode = %s, Value = %s )>" % (self.timerMode, self.value)
class Timer1Config(TimerConfig):
"""
This IOType configures Timer1.
TimerMode = See Section 2.9 for more information about the available modes.
Value = The meaning of this parameter varies with the timer mode.
>>> d.getFeedback( u6.Timer1Config( TimerMode, Value = 0 ) )
[ None ]
"""
def __init__(self, TimerMode, Value = 0):
TimerConfig.__init__(self, 1, TimerMode, Value)
def __repr__(self):
return "<u6.Timer1Config( TimerMode = %s, Value = %s )>" % (self.timerMode, self.value)
class Counter(FeedbackCommand):
'''
Counter Feedback command
Reads a hardware counter, optionally resetting it
counter: 0 or 1
Reset: True ( or 1 ) = Reset, False ( or 0 ) = Don't Reset
Returns the current count from the counter if enabled. If reset,
this is the value before the reset.
>>> d.getFeedback( u6.Counter( counter, Reset = False ) )
[ 2183 ]
'''
def __init__(self, counter, Reset):
self.counter = counter
self.reset = Reset
self.cmdBytes = [ 54 + (counter % 2), int(bool(Reset))]
def __repr__(self):
return "<u6.Counter( counter = %s, Reset = %s )>" % (self.counter, self.reset)
readLen = 4
def handle(self, input):
inStr = ''.join([chr(x) for x in input])
return struct.unpack('<I', inStr )[0]
class Counter0(Counter):
'''
Counter0 Feedback command
Reads hardware counter0, optionally resetting it
Reset: True ( or 1 ) = Reset, False ( or 0 ) = Don't Reset
Returns the current count from the counter if enabled. If reset,
this is the value before the reset.
>>> d.getFeedback( u6.Counter0( Reset = False ) )
[ 2183 ]
'''
def __init__(self, Reset = False):
Counter.__init__(self, 0, Reset)
def __repr__(self):
return "<u6.Counter0( Reset = %s )>" % self.reset
class Counter1(Counter):
'''
Counter1 Feedback command
Reads hardware counter1, optionally resetting it
Reset: True ( or 1 ) = Reset, False ( or 0 ) = Don't Reset
Returns the current count from the counter if enabled. If reset,
this is the value before the reset.
>>> d.getFeedback( u6.Counter1( Reset = False ) )
[ 2183 ]
'''
def __init__(self, Reset = False):
Counter.__init__(self, 1, Reset)
def __repr__(self):
return "<u6.Counter1( Reset = %s )>" % self.reset
class DSP(FeedbackCommand):
'''
DSP Feedback command
Acquires 1000 samples from the specified AIN at 50us intervals and performs
the specified analysis on the acquired data.
AcquireNewData: True, acquire new data; False, operate on existing data
DSPAnalysis: 1, True RMS; 2, DC Offset; 3, Peak To Peak; 4, Period (ms)
PLine: Positive Channel
Gain: The gain you would like to use
Resolution: The resolution index to use
SettlingFactor: The SettlingFactor to use
Differential: True, do differential readings; False, single-ended readings
See section 5.2.5.20 of the U3 User's Guide
(http://labjack.com/support/u6/users-guide/5.2.5.20)
>>> d.getFeedback( u6.DSP( PLine, Resolution = 0, Gain = 0,
SettlingFactor = 0, Differential = False,
DSPAnalysis = 1, AcquireNewData = True) )
[ 2183 ]
'''
def __init__(self, PLine, Resolution = 0, Gain = 0, SettlingFactor = 0, Differential = False, DSPAnalysis = 1, AcquireNewData = True):
self.pline = PLine
self.resolution = Resolution
self.gain = Gain
self.settlingFactor = SettlingFactor
self.differential = Differential
self.dspAnalysis = DSPAnalysis
self.acquireNewData = AcquireNewData
byte1 = DSPAnalysis + ( int(AcquireNewData) << 7 )
byte4 = ( Gain << 4 ) + Resolution
byte5 = ( int(Differential) << 7 ) + SettlingFactor
self.cmdBytes = [ 62, byte1, PLine, 0, byte4, byte5, 0, 0 ]
def __repr__(self):
return "<u6.DSP( PLine = %s, Resolution = %s, Gain = %s, SettlingFactor = %s, Differential = %s, DSPAnalysis = %s, AcquireNewData = %s )>" % (self.pline, self.resolution, self.gain, self.settlingFactor, self.differential, self.dspAnalysis, self.acquireNewData)
readLen = 4
def handle(self, input):
inStr = ''.join([chr(x) for x in input])
return struct.unpack('<I', inStr )[0]
| bsd-2-clause |
vivekpabani/projecteuler | python/058/problem_058.py | 1 | 1812 | #!/usr/bin/env python
# coding=utf-8
"""
Problem Definition :
Starting with 1 and spiralling anticlockwise in the following way, a square spiral with side length 7 is formed.
37 36 35 34 33 32 31
38 17 16 15 14 13 30
39 18 5 4 3 12 29
40 19 6 1 2 11 28
41 20 7 8 9 10 27
42 21 22 23 24 25 26
43 44 45 46 47 48 49
It is interesting to note that the odd squares lie along the bottom right diagonal, but what is more interesting is that 8 out of the 13 numbers lying along both diagonals are prime; that is, a ratio of 8/13 ≈ 62%.
If one complete new layer is wrapped around the spiral above, a square spiral with side length 9 will be formed. If this process is continued, what is the side length of the square spiral for which the ratio of primes along both diagonals first falls below 10%?
"""
__author__ = 'vivek'
import time
import math
def is_prime(number):
if number < 0:
return 0
elif number == 2 or number == 3:
return 1
elif number % 2 == 0 or number % 3 == 0 or number == 1:
return 0
else:
start = 5
while start <= int(math.sqrt(number)):
if number % start == 0:
return 0
break
if number % (start+2) == 0:
return 0
break
start += 6
return 1
def main():
start_time = time.clock()
num = 1
step = 2
dia = 1
prime_count = 0
while True:
for i in xrange(4):
num += step
if is_prime(num):
prime_count += 1
dia += 4
if prime_count*1.0/dia < 0.1:
break
step += 2
print(num, dia,prime_count)
print "Run time...{} secs \n".format(round(time.clock() - start_time, 4))
if __name__ == "__main__":
main()
| apache-2.0 |
e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/action/ce_config.py | 89 | 4192 | #
# Copyright 2015 Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.ce import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| bsd-3-clause |
firstjob/python-social-auth | social/tests/test_utils.py | 73 | 5018 | import sys
import unittest2 as unittest
from mock import Mock
from social.utils import sanitize_redirect, user_is_authenticated, \
user_is_active, slugify, build_absolute_uri, \
partial_pipeline_data
PY3 = sys.version_info[0] == 3
class SanitizeRedirectTest(unittest.TestCase):
def test_none_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', None), None)
def test_empty_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', ''), None)
def test_dict_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', {}), None)
def test_invalid_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', {'foo': 'bar'}), None)
def test_wrong_path_redirect(self):
self.assertEqual(
sanitize_redirect('myapp.com', 'http://notmyapp.com/path/'),
None
)
def test_valid_absolute_redirect(self):
self.assertEqual(
sanitize_redirect('myapp.com', 'http://myapp.com/path/'),
'http://myapp.com/path/'
)
def test_valid_relative_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', '/path/'), '/path/')
class UserIsAuthenticatedTest(unittest.TestCase):
def test_user_is_none(self):
self.assertEqual(user_is_authenticated(None), False)
def test_user_is_not_none(self):
self.assertEqual(user_is_authenticated(object()), True)
def test_user_has_is_authenticated(self):
class User(object):
is_authenticated = True
self.assertEqual(user_is_authenticated(User()), True)
def test_user_has_is_authenticated_callable(self):
class User(object):
def is_authenticated(self):
return True
self.assertEqual(user_is_authenticated(User()), True)
class UserIsActiveTest(unittest.TestCase):
def test_user_is_none(self):
self.assertEqual(user_is_active(None), False)
def test_user_is_not_none(self):
self.assertEqual(user_is_active(object()), True)
def test_user_has_is_active(self):
class User(object):
is_active = True
self.assertEqual(user_is_active(User()), True)
def test_user_has_is_active_callable(self):
class User(object):
def is_active(self):
return True
self.assertEqual(user_is_active(User()), True)
class SlugifyTest(unittest.TestCase):
def test_slugify_formats(self):
if PY3:
self.assertEqual(slugify('FooBar'), 'foobar')
self.assertEqual(slugify('Foo Bar'), 'foo-bar')
self.assertEqual(slugify('Foo (Bar)'), 'foo-bar')
else:
self.assertEqual(slugify('FooBar'.decode('utf-8')), 'foobar')
self.assertEqual(slugify('Foo Bar'.decode('utf-8')), 'foo-bar')
self.assertEqual(slugify('Foo (Bar)'.decode('utf-8')), 'foo-bar')
class BuildAbsoluteURITest(unittest.TestCase):
def setUp(self):
self.host = 'http://foobar.com'
def tearDown(self):
self.host = None
def test_path_none(self):
self.assertEqual(build_absolute_uri(self.host), self.host)
def test_path_empty(self):
self.assertEqual(build_absolute_uri(self.host, ''), self.host)
def test_path_http(self):
self.assertEqual(build_absolute_uri(self.host, 'http://barfoo.com'),
'http://barfoo.com')
def test_path_https(self):
self.assertEqual(build_absolute_uri(self.host, 'https://barfoo.com'),
'https://barfoo.com')
def test_host_ends_with_slash_and_path_starts_with_slash(self):
self.assertEqual(build_absolute_uri(self.host + '/', '/foo/bar'),
'http://foobar.com/foo/bar')
def test_absolute_uri(self):
self.assertEqual(build_absolute_uri(self.host, '/foo/bar'),
'http://foobar.com/foo/bar')
class PartialPipelineData(unittest.TestCase):
def test_kwargs_included_in_result(self):
backend = self._backend()
key, val = ('foo', 'bar')
_, xkwargs = partial_pipeline_data(backend, None,
*(), **dict([(key, val)]))
self.assertTrue(key in xkwargs)
self.assertEqual(xkwargs[key], val)
def test_update_user(self):
user = object()
backend = self._backend(session_kwargs={'user': None})
_, xkwargs = partial_pipeline_data(backend, user)
self.assertTrue('user' in xkwargs)
self.assertEqual(xkwargs['user'], user)
def _backend(self, session_kwargs=None):
strategy = Mock()
strategy.request = None
strategy.session_get.return_value = object()
strategy.partial_from_session.return_value = \
(0, 'mock-backend', [], session_kwargs or {})
backend = Mock()
backend.name = 'mock-backend'
backend.strategy = strategy
return backend
| bsd-3-clause |
ptmr3/GalaxyNote3-Kernel-kk- | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
aospan/linux-next-bcm4708-edgecore-ecw7220-l | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
ds-hwang/chromium-crosswalk | tools/perf/benchmarks/gpu_times.py | 8 | 2505 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from benchmarks import silk_flags
from telemetry import benchmark
from telemetry.timeline import tracing_category_filter
from telemetry.web_perf.metrics import gpu_timeline
from telemetry.web_perf import timeline_based_measurement
import page_sets
TOPLEVEL_CATEGORIES = ['disabled-by-default-gpu.device',
'disabled-by-default-gpu.service']
class _GPUTimes(perf_benchmark.PerfBenchmark):
def CreateTimelineBasedMeasurementOptions(self):
cat_string = ','.join(TOPLEVEL_CATEGORIES)
cat_filter = tracing_category_filter.TracingCategoryFilter(cat_string)
options = timeline_based_measurement.Options(overhead_level=cat_filter)
options.SetLegacyTimelineBasedMetrics([gpu_timeline.GPUTimelineMetric()])
return options
@benchmark.Disabled('all') # http://crbug.com/453131, http://crbug.com/527543
class GPUTimesKeyMobileSites(_GPUTimes):
"""Measures GPU timeline metric on key mobile sites."""
page_set = page_sets.KeyMobileSitesSmoothPageSet
@classmethod
def Name(cls):
return 'gpu_times.key_mobile_sites_smooth'
@benchmark.Disabled('all') # http://crbug.com/453131, http://crbug.com/527543
class GPUTimesGpuRasterizationKeyMobileSites(_GPUTimes):
"""Measures GPU timeline metric on key mobile sites with GPU rasterization.
"""
page_set = page_sets.KeyMobileSitesSmoothPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'gpu_times.gpu_rasterization.key_mobile_sites_smooth'
@benchmark.Disabled('all') # http://crbug.com/453131, http://crbug.com/517476
class GPUTimesTop25Sites(_GPUTimes):
"""Measures GPU timeline metric for the top 25 sites."""
page_set = page_sets.Top25SmoothPageSet
@classmethod
def Name(cls):
return 'gpu_times.top_25_smooth'
@benchmark.Disabled('all') # http://crbug.com/453131, http://crbug.com/517476
class GPUTimesGpuRasterizationTop25Sites(_GPUTimes):
"""Measures GPU timeline metric for the top 25 sites with GPU rasterization.
"""
page_set = page_sets.Top25SmoothPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'gpu_times.gpu_rasterization.top_25_smooth'
| bsd-3-clause |
bqbn/addons-server | src/olympia/files/utils.py | 1 | 52387 | import collections
import contextlib
import errno
import hashlib
import json
import os
import io
import re
import shutil
import signal
import stat
import struct
import tarfile
import tempfile
import zipfile
import fcntl
from datetime import datetime
from django import forms
from django.conf import settings
from django.core.files.storage import (
File as DjangoFile, default_storage as storage)
from django.template.defaultfilters import filesizeformat
from django.utils.encoding import force_text
from django.utils.jslex import JsLexer
from django.utils.translation import ugettext
import rdflib
from xml.parsers.expat import ExpatError
from defusedxml import minidom
from defusedxml.common import DefusedXmlException
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.addons.utils import verify_mozilla_trademark
from olympia.amo.utils import decode_json, find_language, rm_local_tmp_dir
from olympia.applications.models import AppVersion
from olympia.lib.crypto.signing import get_signer_organizational_unit_name
from olympia.lib import unicodehelper
from olympia.users.utils import (
mozilla_signed_extension_submission_allowed,
system_addon_submission_allowed)
from olympia.versions.compare import version_int as vint
log = olympia.core.logger.getLogger('z.files.utils')
class ParseError(forms.ValidationError):
pass
VERSION_RE = re.compile(r'^[-+*.\w]{,32}$')
SIGNED_RE = re.compile(r'^META\-INF/(\w+)\.(rsa|sf)$')
# This is essentially what Firefox matches
# (see toolkit/components/extensions/ExtensionUtils.jsm)
MSG_RE = re.compile(r'__MSG_(?P<msgid>[a-zA-Z0-9@_]+?)__')
# The default update URL.
default = (
'https://versioncheck.addons.mozilla.org/update/VersionCheck.php?'
'reqVersion=%REQ_VERSION%&id=%ITEM_ID%&version=%ITEM_VERSION%&'
'maxAppVersion=%ITEM_MAXAPPVERSION%&status=%ITEM_STATUS%&appID=%APP_ID%&'
'appVersion=%APP_VERSION%&appOS=%APP_OS%&appABI=%APP_ABI%&'
'locale=%APP_LOCALE%¤tAppVersion=%CURRENT_APP_VERSION%&'
'updateType=%UPDATE_TYPE%'
)
def get_filepath(fileorpath):
"""Resolve the actual file path of `fileorpath`.
This supports various input formats, a path, a django `File` object,
`olympia.files.File`, a `FileUpload` or just a regular file-like object.
"""
if isinstance(fileorpath, str):
return fileorpath
elif isinstance(fileorpath, DjangoFile):
return fileorpath
elif hasattr(fileorpath, 'file_path'): # File
return fileorpath.file_path
elif hasattr(fileorpath, 'path'): # FileUpload
return fileorpath.path
elif hasattr(fileorpath, 'name'): # file-like object
return fileorpath.name
return fileorpath
def id_to_path(pk):
"""
Generate a path from an id, to distribute folders in the file system.
1 => 1/1/1
12 => 2/12/12
123456 => 6/56/123456
"""
pk = str(pk)
path = [pk[-1]]
if len(pk) >= 2:
path.append(pk[-2:])
else:
path.append(pk)
path.append(pk)
return os.path.join(*path)
def get_file(fileorpath):
"""Get a file-like object, whether given a FileUpload object or a path."""
if hasattr(fileorpath, 'path'): # FileUpload
return storage.open(fileorpath.path, 'rb')
if hasattr(fileorpath, 'name'):
return fileorpath
return storage.open(fileorpath, 'rb')
def make_xpi(files):
file_obj = io.BytesIO()
zip_file = zipfile.ZipFile(file_obj, 'w')
for path, data in files.items():
zip_file.writestr(path, data)
zip_file.close()
file_obj.seek(0)
return file_obj
class UnsupportedFileType(forms.ValidationError):
pass
class NoManifestFound(forms.ValidationError):
pass
class InvalidManifest(forms.ValidationError):
pass
class Extractor(object):
"""Extract add-on info from a manifest file."""
App = collections.namedtuple('App', 'appdata id min max')
@classmethod
def parse(cls, xpi_fobj, minimal=False):
zip_file = SafeZip(xpi_fobj)
certificate = os.path.join('META-INF', 'mozilla.rsa')
certificate_info = None
if zip_file.exists(certificate):
certificate_info = SigningCertificateInformation(
zip_file.read(certificate))
if zip_file.exists('manifest.json'):
data = ManifestJSONExtractor(
zip_file, certinfo=certificate_info).parse(minimal=minimal)
elif zip_file.exists('install.rdf'):
# Note that RDFExtractor is a misnomer, it receives the zip_file
# object because it might need to read other files than just
# the rdf to deal with dictionaries, complete themes etc.
data = RDFExtractor(
zip_file, certinfo=certificate_info).parse(minimal=minimal)
else:
raise NoManifestFound(
'No install.rdf or manifest.json found')
return data
def get_appversions(app, min_version, max_version):
"""Return the `AppVersion`s that correspond to the given versions."""
qs = AppVersion.objects.filter(application=app.id)
min_appver = qs.get(version=min_version)
max_appver = qs.get(version=max_version)
return min_appver, max_appver
def get_simple_version(version_string):
"""Extract the version number without the ><= requirements.
This simply extracts the version number without the ><= requirement so
it will not be accurate for version requirements that are not >=, <= or
= to a version.
>>> get_simple_version('>=33.0a1')
'33.0a1'
"""
if not version_string:
return ''
return re.sub('[<=>]', '', version_string)
class RDFExtractor(object):
"""Extract add-on info from an install.rdf."""
# https://developer.mozilla.org/en-US/Add-ons/Install_Manifests#type
TYPES = {
'2': amo.ADDON_EXTENSION,
'4': amo.ADDON_EXTENSION, # Really a XUL theme but now unsupported.
'8': amo.ADDON_LPAPP,
'64': amo.ADDON_DICT,
'128': amo.ADDON_EXTENSION, # Telemetry Experiment
'256': amo.ADDON_EXTENSION, # WebExtension Experiment
}
# Langpacks and dictionaries, if the type is properly set, are always
# considered restartless.
ALWAYS_RESTARTLESS_TYPES = ('8', '64', '128', '256')
# Telemetry and Web Extension Experiments types.
# See: bug 1220097 and https://github.com/mozilla/addons-server/issues/3315
EXPERIMENT_TYPES = ('128', '256')
manifest = u'urn:mozilla:install-manifest'
is_experiment = False # Experiment extensions: bug 1220097.
def __init__(self, zip_file, certinfo=None):
self.zip_file = zip_file
self.certinfo = certinfo
self.rdf = rdflib.Graph().parse(
data=force_text(zip_file.read('install.rdf')))
self.package_type = None
self.find_root() # Will set self.package_type
def parse(self, minimal=False):
data = {
'guid': self.find('id'),
'type': self.find_type(),
'version': self.find('version'),
'is_webextension': False,
'name': self.find('name'),
'summary': self.find('description'),
}
# Populate certificate information (e.g signed by mozilla or not)
# early on to be able to verify compatibility based on it
if self.certinfo is not None:
data.update(self.certinfo.parse())
if not minimal:
data.update({
'homepage': self.find('homepageURL'),
'is_restart_required': (
self.find('bootstrap') != 'true' and
self.find('type') not in self.ALWAYS_RESTARTLESS_TYPES),
'apps': self.apps(),
})
# We used to simply use the value of 'strictCompatibility' in the
# rdf to set strict_compatibility, but now we enable it or not for
# all legacy add-ons depending on their type. This will prevent
# them from being marked as compatible with Firefox 57.
# This is not true for legacy add-ons already signed by Mozilla.
# For these add-ons we just re-use to whatever
# `strictCompatibility` is set.
if data['type'] not in amo.NO_COMPAT:
if self.certinfo and self.certinfo.is_mozilla_signed_ou:
data['strict_compatibility'] = (
self.find('strictCompatibility') == 'true')
else:
data['strict_compatibility'] = True
else:
data['strict_compatibility'] = False
# `experiment` is detected in in `find_type`.
data['is_experiment'] = self.is_experiment
return data
def find_type(self):
# If the extension declares a type that we know about, use
# that.
# https://developer.mozilla.org/en-US/Add-ons/Install_Manifests#type
self.package_type = self.find('type')
if self.package_type and self.package_type in self.TYPES:
# If it's an experiment, we need to store that for later.
self.is_experiment = self.package_type in self.EXPERIMENT_TYPES
return self.TYPES[self.package_type]
# Look for dictionaries.
is_dictionary = (
self.zip_file.exists('dictionaries/') and
any(fname.endswith('.dic') for fname in self.zip_file.namelist())
)
if is_dictionary:
return amo.ADDON_DICT
# Consult <em:type>.
return self.TYPES.get(self.package_type, amo.ADDON_EXTENSION)
def uri(self, name):
namespace = 'http://www.mozilla.org/2004/em-rdf'
return rdflib.term.URIRef('%s#%s' % (namespace, name))
def find_root(self):
# If the install-manifest root is well-defined, it'll show up when we
# search for triples with it. If not, we have to find the context that
# defines the manifest and use that as our root.
# http://www.w3.org/TR/rdf-concepts/#section-triples
manifest = rdflib.term.URIRef(self.manifest)
if list(self.rdf.triples((manifest, None, None))):
self.root = manifest
else:
self.root = next(self.rdf.subjects(None, self.manifest))
def find(self, name, ctx=None):
"""Like $() for install.rdf, where name is the selector."""
if ctx is None:
ctx = self.root
# predicate it maps to <em:{name}>.
match = list(self.rdf.objects(ctx, predicate=self.uri(name)))
# These come back as rdflib.Literal, which subclasses unicode.
if match:
return str(match[0])
def apps(self):
rv = []
seen_apps = set()
for ctx in self.rdf.objects(None, self.uri('targetApplication')):
app = amo.APP_GUIDS.get(self.find('id', ctx))
if not app:
continue
if app.guid not in amo.APP_GUIDS or app.id in seen_apps:
continue
if app not in amo.APP_USAGE:
# Ignore non-firefoxes compatibility.
continue
seen_apps.add(app.id)
try:
min_appver_text = self.find('minVersion', ctx)
max_appver_text = self.find('maxVersion', ctx)
# Rewrite '*' as '56.*' in legacy extensions, since they
# are not compatible with higher versions.
# We don't do that for legacy add-ons that are already
# signed by Mozilla to allow them for Firefox 57 onwards.
needs_max_56_star = (
app.id in (amo.FIREFOX.id, amo.ANDROID.id) and
max_appver_text == '*' and
not (self.certinfo and self.certinfo.is_mozilla_signed_ou)
)
if needs_max_56_star:
max_appver_text = '56.*'
min_appver, max_appver = get_appversions(
app, min_appver_text, max_appver_text)
except AppVersion.DoesNotExist:
continue
rv.append(Extractor.App(
appdata=app, id=app.id, min=min_appver, max=max_appver))
return rv
class ManifestJSONExtractor(object):
def __init__(self, zip_file, data='', certinfo=None):
self.zip_file = zip_file
self.certinfo = certinfo
if not data:
data = zip_file.read('manifest.json')
# Remove BOM if present.
data = unicodehelper.decode(data)
# Run through the JSON and remove all comments, then try to read
# the manifest file.
# Note that Firefox and the WebExtension spec only allow for
# line comments (starting with `//`), not block comments (starting with
# `/*`). We strip out both in AMO because the linter will flag the
# block-level comments explicitly as an error (so the developer can
# change them to line-level comments).
#
# But block level comments are not allowed. We just flag them elsewhere
# (in the linter).
json_string = ''
lexer = JsLexer()
for name, token in lexer.lex(data):
if name not in ('blockcomment', 'linecomment'):
json_string += token
try:
self.data = json.loads(json_string)
except Exception:
raise InvalidManifest(
ugettext('Could not parse the manifest file.'))
def get(self, key, default=None):
return self.data.get(key, default)
@property
def is_experiment(self):
"""Return whether or not the webextension uses
experiments or theme experiments API.
In legacy extensions this is a different type, but for webextensions
we just look at the manifest."""
experiment_keys = ('experiment_apis', 'theme_experiment')
return any(bool(self.get(key)) for key in experiment_keys)
@property
def gecko(self):
"""Return the "applications|browser_specific_settings["gecko"]" part
of the manifest."""
parent_block = self.get(
'browser_specific_settings', self.get('applications', {}))
return parent_block.get('gecko', {})
@property
def guid(self):
return self.gecko.get('id', None)
@property
def type(self):
return (
amo.ADDON_LPAPP if 'langpack_id' in self.data
else amo.ADDON_STATICTHEME if 'theme' in self.data
else amo.ADDON_DICT if 'dictionaries' in self.data
else amo.ADDON_EXTENSION
)
@property
def strict_max_version(self):
return get_simple_version(self.gecko.get('strict_max_version'))
@property
def strict_min_version(self):
return get_simple_version(self.gecko.get('strict_min_version'))
def apps(self):
"""Get `AppVersion`s for the application."""
type_ = self.type
if type_ == amo.ADDON_LPAPP:
# Langpack are only compatible with Firefox desktop at the moment.
# https://github.com/mozilla/addons-server/issues/8381
# They are all strictly compatible with a specific version, so
# the default min version here doesn't matter much.
apps = (
(amo.FIREFOX, amo.DEFAULT_WEBEXT_MIN_VERSION),
)
elif type_ == amo.ADDON_STATICTHEME:
# Static themes are only compatible with Firefox desktop >= 53
# and Firefox for Android >=65.
apps = (
(amo.FIREFOX, amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX),
(amo.ANDROID, amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID),
)
elif type_ == amo.ADDON_DICT:
# WebExt dicts are only compatible with Firefox desktop >= 61.
apps = (
(amo.FIREFOX, amo.DEFAULT_WEBEXT_DICT_MIN_VERSION_FIREFOX),
)
else:
webext_min = (
amo.DEFAULT_WEBEXT_MIN_VERSION
if self.get('browser_specific_settings', None) is None
else amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
# amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC should be 48.0,
# which is the same as amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID, so
# no specific treatment for Android.
apps = (
(amo.FIREFOX, webext_min),
(amo.ANDROID, amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID),
)
doesnt_support_no_id = (
self.strict_min_version and
(vint(self.strict_min_version) <
vint(amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID))
)
if self.guid is None and doesnt_support_no_id:
raise forms.ValidationError(
ugettext('Add-on ID is required for Firefox 47 and below.')
)
# If a minimum strict version is specified, it needs to be higher
# than the version when Firefox started supporting WebExtensions.
unsupported_no_matter_what = (
self.strict_min_version and vint(self.strict_min_version) <
vint(amo.DEFAULT_WEBEXT_MIN_VERSION))
if unsupported_no_matter_what:
msg = ugettext('Lowest supported "strict_min_version" is 42.0.')
raise forms.ValidationError(msg)
for app, default_min_version in apps:
if self.guid is None and not self.strict_min_version:
strict_min_version = max(amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID,
default_min_version)
else:
# strict_min_version for this app shouldn't be lower than the
# default min version for this app.
strict_min_version = max(
self.strict_min_version, default_min_version)
strict_max_version = (
self.strict_max_version or amo.DEFAULT_WEBEXT_MAX_VERSION)
if vint(strict_max_version) < vint(strict_min_version):
strict_max_version = strict_min_version
qs = AppVersion.objects.filter(application=app.id)
try:
min_appver = qs.get(version=strict_min_version)
except AppVersion.DoesNotExist:
# If the specified strict_min_version can't be found, raise an
# error, we can't guess an appropriate one.
msg = ugettext(
u'Unknown "strict_min_version" {appver} for {app}'.format(
app=app.pretty, appver=strict_min_version))
raise forms.ValidationError(msg)
try:
max_appver = qs.get(version=strict_max_version)
except AppVersion.DoesNotExist:
# If the specified strict_max_version can't be found, this is
# less of a problem, ignore and replace with '*'.
# https://github.com/mozilla/addons-server/issues/7160
max_appver = qs.get(version=amo.DEFAULT_WEBEXT_MAX_VERSION)
yield Extractor.App(
appdata=app, id=app.id, min=min_appver, max=max_appver)
def target_locale(self):
"""Guess target_locale for a dictionary from manifest contents."""
try:
dictionaries = self.get('dictionaries', {})
key = force_text(list(dictionaries.keys())[0])
return key[:255]
except (IndexError, UnicodeDecodeError):
# This shouldn't happen: the linter should prevent it, but
# just in case, handle the error (without bothering with
# translations as users should never see this).
raise forms.ValidationError('Invalid dictionaries object.')
def parse(self, minimal=False):
data = {
'guid': self.guid,
'type': self.type,
'version': self.get('version', ''),
'is_webextension': True,
'name': self.get('name'),
'summary': self.get('description'),
'homepage': self.get('homepage_url'),
'default_locale': self.get('default_locale'),
}
# Populate certificate information (e.g signed by mozilla or not)
# early on to be able to verify compatibility based on it
if self.certinfo is not None:
data.update(self.certinfo.parse())
if self.type == amo.ADDON_STATICTHEME:
data['theme'] = self.get('theme', {})
if not minimal:
data.update({
'is_restart_required': False,
'apps': list(self.apps()),
# Langpacks have strict compatibility enabled, rest of
# webextensions don't.
'strict_compatibility': data['type'] == amo.ADDON_LPAPP,
'is_experiment': self.is_experiment,
})
if self.type == amo.ADDON_EXTENSION:
# Only extensions have permissions and content scripts
data.update({
'optional_permissions':
self.get('optional_permissions', []),
'permissions': self.get('permissions', []),
'content_scripts': self.get('content_scripts', []),
})
if self.get('devtools_page'):
data.update({
'devtools_page': self.get('devtools_page')
})
elif self.type == amo.ADDON_DICT:
data['target_locale'] = self.target_locale()
return data
class SigningCertificateInformation(object):
"""Process the signature to determine the addon is a Mozilla Signed
extension, so is signed already with a special certificate. We want to
know this so we don't write over it later, and stop unauthorised people
from submitting them to AMO."""
def __init__(self, certificate_data):
pkcs7 = certificate_data
self.cert_ou = get_signer_organizational_unit_name(pkcs7)
@property
def is_mozilla_signed_ou(self):
return self.cert_ou == 'Mozilla Extensions'
def parse(self):
return {'is_mozilla_signed_extension': self.is_mozilla_signed_ou}
def extract_search(content):
def _text(tag):
try:
return dom.getElementsByTagName(tag)[0].childNodes[0].wholeText
except (IndexError, AttributeError):
raise forms.ValidationError(
ugettext('Could not parse uploaded file, missing or empty '
'<%s> element') % tag)
# Only catch basic errors, we don't accept any new uploads and validation
# has happened on upload in the past.
try:
dom = minidom.parse(content)
except DefusedXmlException:
raise forms.ValidationError(
ugettext('OpenSearch: XML Security error.'))
except ExpatError:
raise forms.ValidationError(ugettext('OpenSearch: XML Parse Error.'))
return {
'name': _text('ShortName'),
'description': _text('Description')
}
def parse_search(fileorpath, addon=None):
try:
f = get_file(fileorpath)
data = extract_search(f)
except forms.ValidationError:
raise
except Exception:
log.error('OpenSearch parse error', exc_info=True)
raise forms.ValidationError(ugettext('Could not parse uploaded file.'))
return {'guid': None,
'type': amo.ADDON_SEARCH,
'name': data['name'],
'is_restart_required': False,
'is_webextension': False,
'summary': data['description'],
'version': datetime.now().strftime('%Y%m%d')}
class FSyncMixin(object):
"""Mixin that implements fsync for file extractions.
This mixin uses the `_extract_member` interface used by `ziplib` and
`tarfile` so it's somewhat unversal.
We need this to make sure that on EFS / NFS all data is immediately
written to avoid any data loss on the way.
"""
def _fsync_dir(self, path):
descriptor = os.open(path, os.O_DIRECTORY)
try:
os.fsync(descriptor)
except OSError as exc:
# On some filesystem doing a fsync on a directory
# raises an EINVAL error. Ignoring it is usually safe.
if exc.errno != errno.EINVAL:
raise
os.close(descriptor)
def _fsync_file(self, path):
descriptor = os.open(path, os.O_RDONLY)
os.fsync(descriptor)
os.close(descriptor)
def _extract_member(self, member, targetpath, *args, **kwargs):
"""Extends `ZipFile._extract_member` to call fsync().
For every extracted file we are ensuring that it's data has been
written to disk. We are doing this to avoid any data inconsistencies
that we have seen in the past.
To do this correctly we are fsync()ing all directories as well
only that will ensure we have a durable write for that specific file.
This is inspired by https://github.com/2ndquadrant-it/barman/
(see backup.py -> backup_fsync_and_set_sizes and utils.py)
"""
super(FSyncMixin, self)._extract_member(
member, targetpath, *args, **kwargs)
parent_dir = os.path.dirname(os.path.normpath(targetpath))
if parent_dir:
self._fsync_dir(parent_dir)
self._fsync_file(targetpath)
class FSyncedZipFile(FSyncMixin, zipfile.ZipFile):
"""Subclass of ZipFile that calls `fsync` for file extractions."""
pass
class FSyncedTarFile(FSyncMixin, tarfile.TarFile):
"""Subclass of TarFile that calls `fsync` for file extractions."""
pass
def archive_member_validator(archive, member):
"""Validate a member of an archive member (TarInfo or ZipInfo)."""
filename = getattr(member, 'filename', getattr(member, 'name', None))
filesize = getattr(member, 'file_size', getattr(member, 'size', None))
_validate_archive_member_name_and_size(filename, filesize)
def _validate_archive_member_name_and_size(filename, filesize):
if filename is None or filesize is None:
raise forms.ValidationError(ugettext('Unsupported archive type.'))
try:
force_text(filename)
except UnicodeDecodeError:
# We can't log the filename unfortunately since it's encoding
# is obviously broken :-/
log.error('Extraction error, invalid file name encoding')
msg = ugettext('Invalid file name in archive. Please make sure '
'all filenames are utf-8 or latin1 encoded.')
raise forms.ValidationError(msg)
if '../' in filename or '..' == filename or filename.startswith('/'):
log.error('Extraction error, invalid file name: %s' % (filename))
# L10n: {0} is the name of the invalid file.
msg = ugettext('Invalid file name in archive: {0}')
raise forms.ValidationError(msg.format(filename))
if filesize > settings.FILE_UNZIP_SIZE_LIMIT:
log.error('Extraction error, file too big for file (%s): '
'%s' % (filename, filesize))
# L10n: {0} is the name of the invalid file.
msg = ugettext('File exceeding size limit in archive: {0}')
raise forms.ValidationError(msg.format(filename))
class SafeZip(object):
def __init__(self, source, mode='r', force_fsync=False):
self.source = source
self.info_list = None
self.mode = mode
self.force_fsync = force_fsync
self.initialize_and_validate()
def initialize_and_validate(self):
"""
Runs some overall archive checks.
"""
if self.force_fsync:
zip_file = FSyncedZipFile(self.source, self.mode)
else:
zip_file = zipfile.ZipFile(self.source, self.mode)
info_list = zip_file.infolist()
total_file_size = 0
for info in info_list:
total_file_size += info.file_size
archive_member_validator(self.source, info)
if total_file_size >= settings.MAX_ZIP_UNCOMPRESSED_SIZE:
raise forms.ValidationError(ugettext(
'Uncompressed size is too large'))
self.info_list = info_list
self.zip_file = zip_file
def is_signed(self):
"""Tells us if an addon is signed."""
finds = []
for info in self.info_list:
match = SIGNED_RE.match(info.filename)
if match:
name, ext = match.groups()
# If it's rsa or sf, just look for the opposite.
if (name, {'rsa': 'sf', 'sf': 'rsa'}[ext]) in finds:
return True
finds.append((name, ext))
def extract_from_manifest(self, manifest):
"""
Extracts a file given a manifest such as:
jar:chrome/de.jar!/locale/de/browser/
or
locale/de/browser
"""
type, path = manifest.split(':')
jar = self
if type == 'jar':
parts = path.split('!')
for part in parts[:-1]:
jar = self.__class__(io.BytesIO(jar.zip_file.read(part)))
path = parts[-1]
return jar.read(path[1:] if path.startswith('/') else path)
def extract_info_to_dest(self, info, dest):
"""Extracts the given info to a directory and checks the file size."""
self.zip_file.extract(info, dest)
dest = os.path.join(dest, info.filename)
if not os.path.isdir(dest):
# Directories consistently report their size incorrectly.
size = os.stat(dest)[stat.ST_SIZE]
if size != info.file_size:
log.error('Extraction error, uncompressed size: %s, %s not %s'
% (self.source, size, info.file_size))
raise forms.ValidationError(ugettext('Invalid archive.'))
def extract_to_dest(self, dest):
"""Extracts the zip file to a directory."""
for info in self.info_list:
self.extract_info_to_dest(info, dest)
def close(self):
self.zip_file.close()
@property
def filelist(self):
return self.zip_file.filelist
@property
def namelist(self):
return self.zip_file.namelist
def exists(self, path):
try:
return self.zip_file.getinfo(path)
except KeyError:
return False
def read(self, path):
return self.zip_file.read(path)
def extract_zip(source, remove=False, force_fsync=False, tempdir=None):
"""Extracts the zip file. If remove is given, removes the source file."""
if tempdir is None:
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
try:
zip_file = SafeZip(source, force_fsync=force_fsync)
zip_file.extract_to_dest(tempdir)
except Exception:
rm_local_tmp_dir(tempdir)
raise
if remove:
os.remove(source)
return tempdir
def extract_extension_to_dest(source, dest=None, force_fsync=False):
"""Extract `source` to `dest`.
`source` can be an extension or extension source, can be a zip, tar
(gzip, bzip) or a search provider (.xml file).
Note that this doesn't verify the contents of `source` except for
that it requires something valid to be extracted.
:returns: Extraction target directory, if `dest` is `None` it'll be a
temporary directory.
:raises FileNotFoundError: if the source file is not found on the filestem
:raises forms.ValidationError: if the zip is invalid
"""
target, tempdir = None, None
if dest is None:
target = tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
else:
target = dest
try:
source = force_text(source)
if source.endswith((u'.zip', u'.xpi')):
with open(source, 'rb') as source_file:
zip_file = SafeZip(source_file, force_fsync=force_fsync)
zip_file.extract_to_dest(target)
elif source.endswith((u'.tar.gz', u'.tar.bz2', u'.tgz')):
tarfile_class = (
tarfile.TarFile
if not force_fsync else FSyncedTarFile)
with tarfile_class.open(source) as archive:
archive.extractall(target)
elif source.endswith(u'.xml'):
shutil.copy(source, target)
if force_fsync:
FSyncMixin()._fsync_file(target)
except (zipfile.BadZipFile, tarfile.ReadError, IOError,
forms.ValidationError) as e:
if tempdir is not None:
rm_local_tmp_dir(tempdir)
if isinstance(e, (FileNotFoundError, forms.ValidationError)):
# We let FileNotFoundError (which are a subclass of IOError, or
# rather OSError but that's an alias) and ValidationError be
# raised, the caller will have to deal with it.
raise
# Any other exceptions we caught, we raise a generic ValidationError
# instead.
raise forms.ValidationError(
ugettext('Invalid or broken archive.'))
return target
def copy_over(source, dest):
"""
Copies from the source to the destination, removing the destination
if it exists and is a directory.
"""
if os.path.exists(dest) and os.path.isdir(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
# mkdtemp will set the directory permissions to 700
# for the webserver to read them, we need 755
os.chmod(dest, stat.S_IRWXU | stat.S_IRGRP |
stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
shutil.rmtree(source)
def get_all_files(folder, strip_prefix='', prefix=None):
"""Return all files in a file/directory tree.
:param folder: The folder of which to return the file-tree.
:param strip_prefix str: A string to strip in case we're adding a custom
`prefix` Doesn't have any implications if
`prefix` isn't given.
:param prefix: A custom prefix to add to all files and folders.
"""
all_files = []
# Not using os.path.walk so we get just the right order.
def iterate(path):
path_dirs, path_files = storage.listdir(path)
for dirname in sorted(path_dirs):
full = os.path.join(path, force_text(dirname))
all_files.append(full)
iterate(full)
for filename in sorted(path_files):
full = os.path.join(path, force_text(filename))
all_files.append(full)
iterate(folder)
if prefix is not None:
# This is magic: strip the prefix, e.g /tmp/ and prepend the prefix
all_files = [
os.path.join(prefix, fname[len(strip_prefix) + 1:])
for fname in all_files]
return all_files
def extract_xpi(xpi, path):
"""Extract all files from `xpi` to `path`.
This can be removed in favour of our already extracted git-repositories
once we land and tested them in production.
"""
tempdir = extract_zip(xpi)
all_files = get_all_files(tempdir)
copy_over(tempdir, path)
return all_files
def parse_xpi(xpi, addon=None, minimal=False, user=None):
"""Extract and parse an XPI. Returns a dict with various properties
describing the xpi.
Will raise ValidationError if something went wrong while parsing.
If minimal is True, it avoids validation as much as possible (still raising
ValidationError for hard errors like I/O or invalid json/rdf) and returns
only the minimal set of properties needed to decide what to do with the
add-on: guid, version and is_webextension.
"""
try:
xpi = get_file(xpi)
xpi_info = Extractor.parse(xpi, minimal=minimal)
except forms.ValidationError:
raise
except IOError as e:
if len(e.args) < 2:
err, strerror = None, e.args[0]
else:
err, strerror = e.args
log.error('I/O error({0}): {1}'.format(err, strerror))
# Note: we don't really know what happened, so even though we return a
# generic message about the manifest, don't raise InvalidManifest. We
# want the validation to stop there.
raise forms.ValidationError(ugettext(
'Could not parse the manifest file.'))
except Exception:
# As above, don't raise InvalidManifest here.
log.error('XPI parse error', exc_info=True)
raise forms.ValidationError(ugettext(
'Could not parse the manifest file.'))
if minimal:
return xpi_info
return check_xpi_info(xpi_info, addon, xpi, user=user)
def check_xpi_info(xpi_info, addon=None, xpi_file=None, user=None):
from olympia.addons.models import Addon, DeniedGuid
guid = xpi_info['guid']
is_webextension = xpi_info.get('is_webextension', False)
# If we allow the guid to be omitted we assume that one was generated
# or existed before and use that one.
# An example are WebExtensions that don't require a guid but we generate
# one once they're uploaded. Now, if you update that WebExtension we
# just use the original guid.
if addon and not guid and is_webextension:
xpi_info['guid'] = guid = addon.guid
if not guid and not is_webextension:
raise forms.ValidationError(ugettext('Could not find an add-on ID.'))
if guid:
if user:
deleted_guid_clashes = Addon.unfiltered.exclude(
authors__id=user.id).filter(guid=guid)
else:
deleted_guid_clashes = Addon.unfiltered.filter(guid=guid)
if addon and addon.guid != guid:
msg = ugettext(
'The add-on ID in your manifest.json or install.rdf (%s) '
'does not match the ID of your add-on on AMO (%s)')
raise forms.ValidationError(msg % (guid, addon.guid))
if (not addon and
# Non-deleted add-ons.
(Addon.objects.filter(guid=guid).exists() or
# DeniedGuid objects for deletions for Mozilla disabled add-ons
DeniedGuid.objects.filter(guid=guid).exists() or
# Deleted add-ons that don't belong to the uploader.
deleted_guid_clashes.exists())):
raise forms.ValidationError(ugettext('Duplicate add-on ID found.'))
if len(xpi_info['version']) > 32:
raise forms.ValidationError(
ugettext('Version numbers should have fewer than 32 characters.'))
if not VERSION_RE.match(xpi_info['version']):
raise forms.ValidationError(
ugettext('Version numbers should only contain letters, numbers, '
'and these punctuation characters: +*.-_.'))
if is_webextension and xpi_info.get('type') == amo.ADDON_STATICTHEME:
max_size = settings.MAX_STATICTHEME_SIZE
if xpi_file and os.path.getsize(xpi_file.name) > max_size:
raise forms.ValidationError(
ugettext(u'Maximum size for WebExtension themes is {0}.')
.format(filesizeformat(max_size)))
if xpi_file:
# Make sure we pass in a copy of `xpi_info` since
# `resolve_webext_translations` modifies data in-place
translations = Addon.resolve_webext_translations(
xpi_info.copy(), xpi_file)
verify_mozilla_trademark(translations['name'], user)
# Parse the file to get and validate package data with the addon.
if not acl.experiments_submission_allowed(user, xpi_info):
raise forms.ValidationError(
ugettext(u'You cannot submit this type of add-on'))
if not addon and not system_addon_submission_allowed(
user, xpi_info):
guids = ' or '.join(
'"' + guid + '"' for guid in amo.SYSTEM_ADDON_GUIDS)
raise forms.ValidationError(
ugettext('You cannot submit an add-on using an ID ending with '
'%s' % guids))
if not mozilla_signed_extension_submission_allowed(user, xpi_info):
raise forms.ValidationError(
ugettext(u'You cannot submit a Mozilla Signed Extension'))
if not acl.langpack_submission_allowed(user, xpi_info):
raise forms.ValidationError(
ugettext('You cannot submit a language pack'))
return xpi_info
def parse_addon(pkg, addon=None, user=None, minimal=False):
"""
Extract and parse a file path, UploadedFile or FileUpload. Returns a dict
with various properties describing the add-on.
Will raise ValidationError if something went wrong while parsing.
`addon` parameter is mandatory if the file being parsed is going to be
attached to an existing Addon instance.
`user` parameter is mandatory unless minimal `parameter` is True. It should
point to the UserProfile responsible for the upload.
If `minimal` parameter is True, it avoids validation as much as possible
(still raising ValidationError for hard errors like I/O or invalid
json/rdf) and returns only the minimal set of properties needed to decide
what to do with the add-on (the exact set depends on the add-on type, but
it should always contain at least guid, type, version and is_webextension.
"""
name = getattr(pkg, 'name', pkg)
if name.endswith('.xml'):
parsed = parse_search(pkg, addon)
elif name.endswith(amo.VALID_ADDON_FILE_EXTENSIONS):
parsed = parse_xpi(pkg, addon, minimal=minimal, user=user)
else:
valid_extensions_string = u'(%s)' % u', '.join(
amo.VALID_ADDON_FILE_EXTENSIONS)
raise UnsupportedFileType(
ugettext(
'Unsupported file type, please upload a supported '
'file {extensions}.'.format(
extensions=valid_extensions_string)))
if not minimal:
if user is None:
# This should never happen and means there is a bug in
# addons-server itself.
raise forms.ValidationError(ugettext('Unexpected error.'))
# FIXME: do the checks depending on user here.
if addon and addon.type != parsed['type']:
msg = ugettext(
'The type (%s) does not match the type of your add-on on '
'AMO (%s)')
raise forms.ValidationError(msg % (parsed['type'], addon.type))
return parsed
def get_sha256(file_obj, block_size=io.DEFAULT_BUFFER_SIZE):
"""Calculate a sha256 hash for `file_obj`.
`file_obj` must be an open file descriptor. The caller needs to take
care of closing it properly.
"""
hash_ = hashlib.sha256()
for chunk in iter(lambda: file_obj.read(block_size), b''):
hash_.update(chunk)
return hash_.hexdigest()
def update_version_number(file_obj, new_version_number):
"""Update the manifest to have the new version number."""
# Create a new xpi with the updated version.
updated = u'{0}.updated_version_number'.format(file_obj.file_path)
# Copy the original XPI, with the updated install.rdf or package.json.
with zipfile.ZipFile(file_obj.file_path, 'r') as source:
file_list = source.infolist()
with zipfile.ZipFile(updated, 'w', zipfile.ZIP_DEFLATED) as dest:
for file_ in file_list:
content = source.read(file_.filename)
if file_.filename == 'manifest.json':
content = _update_version_in_json_manifest(
content, new_version_number)
dest.writestr(file_, content)
# Move the updated file to the original file.
shutil.move(updated, file_obj.file_path)
def write_crx_as_xpi(chunks, target):
"""Extract and strip the header from the CRX, convert it to a regular ZIP
archive, then write it to `target`. Read more about the CRX file format:
https://developer.chrome.com/extensions/crx
"""
# First we open the uploaded CRX so we can see how much we need
# to trim from the header of the file to make it a valid ZIP.
with tempfile.NamedTemporaryFile('w+b', dir=settings.TMP_PATH) as tmp:
for chunk in chunks:
tmp.write(chunk)
tmp.seek(0)
header = tmp.read(16)
header_info = struct.unpack('4cHxII', header)
public_key_length = header_info[5]
signature_length = header_info[6]
# This is how far forward we need to seek to extract only a
# ZIP file from this CRX.
start_position = 16 + public_key_length + signature_length
hash = hashlib.sha256()
tmp.seek(start_position)
# Now we open the Django storage and write our real XPI file.
with storage.open(target, 'wb') as file_destination:
bytes = tmp.read(65536)
# Keep reading bytes and writing them to the XPI.
while bytes:
hash.update(bytes)
file_destination.write(bytes)
bytes = tmp.read(65536)
return hash
def _update_version_in_json_manifest(content, new_version_number):
"""Change the version number in the json manifest file provided."""
updated = json.loads(content)
if 'version' in updated:
updated['version'] = new_version_number
return json.dumps(updated)
def extract_translations(file_obj):
"""Extract all translation messages from `file_obj`.
:param locale: if not `None` the list will be restricted only to `locale`.
"""
xpi = get_filepath(file_obj)
messages = {}
try:
with zipfile.ZipFile(xpi, 'r') as source:
file_list = source.namelist()
# Fetch all locales the add-on supports
# see https://developer.chrome.com/extensions/i18n#overview-locales
# for more details on the format.
locales = {
name.split('/')[1] for name in file_list
if name.startswith('_locales/') and
name.endswith('/messages.json')}
for locale in locales:
corrected_locale = find_language(locale)
# Filter out languages we don't support.
if not corrected_locale:
continue
fname = '_locales/{0}/messages.json'.format(locale)
try:
data = source.read(fname)
messages[corrected_locale] = decode_json(data)
except (ValueError, KeyError):
# `ValueError` thrown by `decode_json` if the json is
# invalid and `KeyError` thrown by `source.read`
# usually means the file doesn't exist for some reason,
# we fail silently
continue
except IOError:
pass
return messages
def resolve_i18n_message(message, messages, locale, default_locale=None):
"""Resolve a translatable string in an add-on.
This matches ``__MSG_extensionName__`` like names and returns the correct
translation for `locale`.
:param locale: The locale to fetch the translation for, If ``None``
(default) ``settings.LANGUAGE_CODE`` is used.
:param messages: A dictionary of messages, e.g the return value
of `extract_translations`.
"""
if not message or not isinstance(message, str):
# Don't even attempt to extract invalid data.
# See https://github.com/mozilla/addons-server/issues/3067
# for more details
return message
match = MSG_RE.match(message)
if match is None:
return message
locale = find_language(locale)
if default_locale:
default_locale = find_language(default_locale)
msgid = match.group('msgid')
default = {'message': message}
if locale in messages:
message = messages[locale].get(msgid, default)
elif default_locale in messages:
message = messages[default_locale].get(msgid, default)
if not isinstance(message, dict):
# Fallback for invalid message format, should be caught by
# addons-linter in the future but we'll have to handle it.
# See https://github.com/mozilla/addons-server/issues/3485
return default['message']
return message['message']
def get_background_images(file_obj, theme_data, header_only=False):
"""Extract static theme header image from `file_obj` and return in dict."""
xpi = get_filepath(file_obj)
if not theme_data:
# we might already have theme_data, but otherwise get it from the xpi.
try:
parsed_data = parse_xpi(xpi, minimal=True)
theme_data = parsed_data.get('theme', {})
except forms.ValidationError:
# If we can't parse the existing manifest safely return.
return {}
images_dict = theme_data.get('images', {})
# Get the reference in the manifest. headerURL is the deprecated variant.
header_url = images_dict.get(
'theme_frame', images_dict.get('headerURL'))
# And any additional backgrounds too.
additional_urls = (
images_dict.get('additional_backgrounds', []) if not header_only
else [])
image_urls = [header_url] + additional_urls
images = {}
try:
with zipfile.ZipFile(xpi, 'r') as source:
for url in image_urls:
_, file_ext = os.path.splitext(str(url).lower())
if file_ext not in amo.THEME_BACKGROUND_EXTS:
# Just extract image files.
continue
try:
images[url] = source.read(url)
except KeyError:
pass
except IOError as ioerror:
log.info(ioerror)
return images
@contextlib.contextmanager
def run_with_timeout(seconds):
"""Implement timeouts via `signal`.
This is being used to implement timeout handling when acquiring locks.
"""
def timeout_handler(signum, frame):
"""
Since Python 3.5 `fcntl` is retried automatically when interrupted.
We need an exception to stop it. This exception will propagate on
to the main thread, make sure `flock` is called there.
"""
raise TimeoutError
original_handler = signal.signal(signal.SIGALRM, timeout_handler)
try:
signal.alarm(seconds)
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, original_handler)
@contextlib.contextmanager
def lock(lock_dir, lock_name, timeout=6):
"""A wrapper around fcntl to be used as a context manager.
Additionally this helper allows the caller to wait for a lock for a certain
amount of time.
Example::
with lock(settings.TMP_PATH, 'extraction-1234'):
extract_xpi(...)
The lock is properly released at the end of the context block.
This locking mechanism should work perfectly fine with NFS v4 and EFS
(which uses the NFS v4.1 protocol).
:param timeout: Timeout for how long we expect to wait for a lock in
seconds. If 0 the function returns immediately, otherwise
it blocks the execution.
:return: `True` if the lock was attained, we are owning the lock,
`False` if there is an already existing lock.
"""
lock_name = f'{lock_name}.lock'
log.info(f'Acquiring lock {lock_name}.')
lock_path = os.path.join(lock_dir, lock_name)
with open(lock_path, 'w') as lockfd:
lockfd.write(f'{os.getpid()}')
fileno = lockfd.fileno()
try:
with run_with_timeout(timeout):
fcntl.flock(fileno, fcntl.LOCK_EX)
except (BlockingIOError, TimeoutError):
# Another process already holds the lock.
# In theory, in this case we'd always catch
# `TimeoutError` but for the sake of completness let's
# catch `BlockingIOError` too to be on the safe side.
yield False
else:
# We successfully acquired the lock.
yield True
finally:
# Always release the lock after the parent context
# block has finised.
log.info(f'Releasing lock {lock_name}.')
fcntl.flock(fileno, fcntl.LOCK_UN)
lockfd.close()
try:
os.unlink(lock_path)
except FileNotFoundError:
pass
| bsd-3-clause |
jtara1/RedditImageGrab | redditdownload/plugins/parse_subreddit_list.py | 1 | 2749 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 30 15:26:13 2016
@author: jtara1
General syntax for subreddits.txt:
: (colon character) denotes folder name
subreddit url or word denotes subreddit
For more examples see https://github.com/jtara1/RedditImageGrab/commit/8e4787ef9ac43ca694fc663be026f69a568bb622
Example of expected input and output:
subreddits.txt = "
pc-wallpapers:
https://www.reddit.com/r/wallpapers/
/r/BackgroundArt/
nature_pics:
http://www.reddit.com/r/EarthPorn/
:
Mountain
"
parse_subreddit_list('/MyPath/subreddits.txt', '/MyPath/') = [
('wallpapers', '/MyPath/pc-wallpaper/wallpapers'),
('BackgroundArt', '/MyPath/pc-wallpaper/BackgroundArt'),
('EarthPorn', '/MyPath/nature-pics/EarthPorn'),
('Mountain', '/MyPath/Mountain')
]
"""
import re
import os
from os import getcwd, mkdir
def parse_subreddit_list(file_path, base_path=getcwd()):
"""Gets list of subreddits from a file & returns folder for media from each subreddit
:param file_path: path of text file to load subreddits from (relative or full path)
:param base_path: base path that gets returned with each subreddit
:return: list containing tuples of subreddit & its associated folder to get media saved to
:rtype: list
"""
try:
file = open(file_path, 'r')
except IOError as e:
print(e)
raise IOError
output = []
folder_regex = re.compile('([a-zA-Z0-9_\- ]*):\n')
subreddit_regex = re.compile('(?:https?://)?(?:www.)?reddit.com/r/([a-zA-Z0-9_]*)')
subreddit_regex2 = re.compile('(?:/r/)?([a-zA-Z0-9_]*)')
if not os.path.isdir(base_path):
mkdir(base_path)
# iterate through the lines using regex to check if line is subreddit or folder title
path = base_path
for line in file:
if line == '\n':
continue
folder_match = re.match(folder_regex, line)
if folder_match:
if folder_match.group(1) != '':
path = os.path.join(base_path, line[:-2])
if not os.path.isdir(path):
mkdir(path)
else:
path = base_path
continue
subreddit_match = re.match(subreddit_regex, line)
if not subreddit_match:
subreddit_match = re.match(subreddit_regex2, line)
if not subreddit_match:
print('No match at position %s' % file.tell() )
print('parse_subreddit_list Error: No match found, skipping this iteration.')
continue
subreddit = subreddit_match.group(1)
final_path = os.path.join(path, subreddit)
if not os.path.isdir(final_path):
mkdir(final_path)
output.append((subreddit, final_path))
return output
| gpl-3.0 |
ibethune/lammps | tools/i-pi/ipi/inputs/normalmodes.py | 41 | 3951 | """Deals with creating the normal mode representation arrays.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Classes:
InputNormalModes: Deals with creating the normal mode objects.
"""
import numpy as np
from copy import copy
from ipi.engine.normalmodes import *
from ipi.utils.inputvalue import *
from ipi.utils.units import *
__all__ = ['InputNormalModes']
class InputNormalModes(InputArray):
""" Storage class for NormalModes engine.
Describes how normal-modes transformation and integration should be
performed.
Attributes:
mode: Specifies the method by which the dynamical masses are created.
transform: Specifies whether the normal mode calculation will be
done using a FFT transform or a matrix multiplication.
"""
attribs = copy(InputArray.attribs)
attribs["mode"] = (InputAttribute, {"dtype" : str,
"default" : "rpmd",
"help" : "Specifies the technique to be used to calculate the dynamical masses. 'rpmd' simply assigns the bead masses the physical mass. 'manual' sets all the normal mode frequencies except the centroid normal mode manually. 'pa-cmd' takes an argument giving the frequency to set all the non-centroid normal modes to. 'wmax-cmd' is similar to 'pa-cmd', except instead of taking one argument it takes two ([wmax,wtarget]). The lowest-lying normal mode will be set to wtarget for a free particle, and all the normal modes will coincide at frequency wmax. ",
"options" : ['pa-cmd', 'wmax-cmd', 'manual', 'rpmd']})
attribs["transform"] = (InputValue,{"dtype" : str,
"default" : "fft",
"help" : "Specifies whether to calculate the normal mode transform using a fast Fourier transform or a matrix multiplication. For small numbers of beads the matrix multiplication may be faster.",
"options" : ['fft', 'matrix']})
default_help = "Deals with the normal mode transformations, including the adjustment of bead masses to give the desired ring polymer normal mode frequencies if appropriate. Takes as arguments frequencies, of which different numbers must be specified and which are used to scale the normal mode frequencies in different ways depending on which 'mode' is specified."
default_label = "NORMALMODES"
def __init__(self, help=None, dimension=None, default=None, dtype=None):
""" Initializes InputNormalModes.
Just calls the parent initialization function with appropriate arguments.
"""
super(InputNormalModes,self).__init__(help=help, default=default, dtype=float, dimension="frequency")
def store(self, nm):
"""Takes a normal modes instance and stores a minimal representation
of it.
Args:
nm: A normal modes object.
"""
super(InputNormalModes,self).store(nm.nm_freqs)
self.mode.store(nm.mode)
self.transform.store(nm.transform_method)
def fetch(self):
"""Creates a normal modes object.
Returns:
A normal modes object.
"""
super(InputNormalModes,self).check()
return NormalModes(self.mode.fetch(), self.transform.fetch(), super(InputNormalModes,self).fetch() )
| gpl-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Source/Python/Lib/python27/ctypes/test/test_errno.py | 115 | 2330 | import unittest, os, errno
from ctypes import *
from ctypes.util import find_library
from test import test_support
try:
import threading
except ImportError:
threading = None
class Test(unittest.TestCase):
def test_open(self):
libc_name = find_library("c")
if libc_name is None:
raise unittest.SkipTest("Unable to find C library")
libc = CDLL(libc_name, use_errno=True)
if os.name == "nt":
libc_open = libc._open
else:
libc_open = libc.open
libc_open.argtypes = c_char_p, c_int
self.assertEqual(libc_open("", 0), -1)
self.assertEqual(get_errno(), errno.ENOENT)
self.assertEqual(set_errno(32), errno.ENOENT)
self.assertEqual(get_errno(), 32)
if threading:
def _worker():
set_errno(0)
libc = CDLL(libc_name, use_errno=False)
if os.name == "nt":
libc_open = libc._open
else:
libc_open = libc.open
libc_open.argtypes = c_char_p, c_int
self.assertEqual(libc_open("", 0), -1)
self.assertEqual(get_errno(), 0)
t = threading.Thread(target=_worker)
t.start()
t.join()
self.assertEqual(get_errno(), 32)
set_errno(0)
@unittest.skipUnless(os.name == "nt", 'Test specific to Windows')
def test_GetLastError(self):
dll = WinDLL("kernel32", use_last_error=True)
GetModuleHandle = dll.GetModuleHandleA
GetModuleHandle.argtypes = [c_wchar_p]
self.assertEqual(0, GetModuleHandle("foo"))
self.assertEqual(get_last_error(), 126)
self.assertEqual(set_last_error(32), 126)
self.assertEqual(get_last_error(), 32)
def _worker():
set_last_error(0)
dll = WinDLL("kernel32", use_last_error=False)
GetModuleHandle = dll.GetModuleHandleW
GetModuleHandle.argtypes = [c_wchar_p]
GetModuleHandle("bar")
self.assertEqual(get_last_error(), 0)
t = threading.Thread(target=_worker)
t.start()
t.join()
self.assertEqual(get_last_error(), 32)
set_last_error(0)
if __name__ == "__main__":
unittest.main()
| mit |
jamslevy/gsoc | thirdparty/google_appengine/google/appengine/tools/appcfg.py | 1 | 74994 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tool for deploying apps to an app server.
Currently, the application only uploads new appversions. To do this, it first
walks the directory tree rooted at the path the user specifies, adding all the
files it finds to a list. It then uploads the application configuration
(app.yaml) to the server using HTTP, followed by uploading each of the files.
It then commits the transaction with another request.
The bulk of this work is handled by the AppVersionUpload class, which exposes
methods to add to the list of files, fetch a list of modified files, upload
files, and commit or rollback the transaction.
"""
import calendar
import datetime
import getpass
import logging
import mimetypes
import optparse
import os
import re
import sha
import sys
import tempfile
import time
import urllib2
import google
import yaml
from google.appengine.cron import groctimespecification
from google.appengine.api import appinfo
from google.appengine.api import croninfo
from google.appengine.api import validation
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_object
from google.appengine.datastore import datastore_index
from google.appengine.tools import appengine_rpc
from google.appengine.tools import bulkloader
MAX_FILES_TO_CLONE = 100
LIST_DELIMITER = "\n"
TUPLE_DELIMITER = "|"
VERSION_FILE = "../VERSION"
UPDATE_CHECK_TIMEOUT = 3
NAG_FILE = ".appcfg_nag"
MAX_LOG_LEVEL = 4
verbosity = 1
appinfo.AppInfoExternal.ATTRIBUTES[appinfo.RUNTIME] = "python"
_api_versions = os.environ.get('GOOGLE_TEST_API_VERSIONS', '1')
_options = validation.Options(*_api_versions.split(','))
appinfo.AppInfoExternal.ATTRIBUTES[appinfo.API_VERSION] = _options
del _api_versions, _options
def StatusUpdate(msg):
"""Print a status message to stderr.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print >>sys.stderr, msg
def GetMimeTypeIfStaticFile(config, filename):
"""Looks up the mime type for 'filename'.
Uses the handlers in 'config' to determine if the file should
be treated as a static file.
Args:
config: The app.yaml object to check the filename against.
filename: The name of the file.
Returns:
The mime type string. For example, 'text/plain' or 'image/gif'.
None if this is not a static file.
"""
for handler in config.handlers:
handler_type = handler.GetHandlerType()
if handler_type in ("static_dir", "static_files"):
if handler_type == "static_dir":
regex = os.path.join(re.escape(handler.GetHandler()), ".*")
else:
regex = handler.upload
if re.match(regex, filename):
if handler.mime_type is not None:
return handler.mime_type
else:
guess = mimetypes.guess_type(filename)[0]
if guess is None:
default = "application/octet-stream"
print >>sys.stderr, ("Could not guess mimetype for %s. Using %s."
% (filename, default))
return default
return guess
return None
def BuildClonePostBody(file_tuples):
"""Build the post body for the /api/clone{files,blobs} urls.
Args:
file_tuples: A list of tuples. Each tuple should contain the entries
appropriate for the endpoint in question.
Returns:
A string containing the properly delimited tuples.
"""
file_list = []
for tup in file_tuples:
path = tup[0]
tup = tup[1:]
file_list.append(TUPLE_DELIMITER.join([path] + list(tup)))
return LIST_DELIMITER.join(file_list)
class NagFile(validation.Validated):
"""A validated YAML class to represent the user's nag preferences.
Attributes:
timestamp: The timestamp of the last nag.
opt_in: True if the user wants to check for updates on dev_appserver
start. False if not. May be None if we have not asked the user yet.
"""
ATTRIBUTES = {
"timestamp": validation.TYPE_FLOAT,
"opt_in": validation.Optional(validation.TYPE_BOOL),
}
@staticmethod
def Load(nag_file):
"""Load a single NagFile object where one and only one is expected.
Args:
nag_file: A file-like object or string containing the yaml data to parse.
Returns:
A NagFile instance.
"""
return yaml_object.BuildSingleObject(NagFile, nag_file)
def GetVersionObject(isfile=os.path.isfile, open_fn=open):
"""Gets the version of the SDK by parsing the VERSION file.
Args:
isfile: used for testing.
open_fn: Used for testing.
Returns:
A Yaml object or None if the VERSION file does not exist.
"""
version_filename = os.path.join(os.path.dirname(google.__file__),
VERSION_FILE)
if not isfile(version_filename):
logging.error("Could not find version file at %s", version_filename)
return None
version_fh = open_fn(version_filename, "r")
try:
version = yaml.safe_load(version_fh)
finally:
version_fh.close()
return version
def RetryWithBackoff(initial_delay, backoff_factor, max_tries, callable):
"""Calls a function multiple times, backing off more and more each time.
Args:
initial_delay: Initial delay after first try, in seconds.
backoff_factor: Delay will be multiplied by this factor after each try.
max_tries: Maximum number of tries.
callable: The method to call, will pass no arguments.
Returns:
True if the function succeded in one of its tries.
Raises:
Whatever the function raises--an exception will immediately stop retries.
"""
delay = initial_delay
while not callable() and max_tries > 0:
StatusUpdate("Will check again in %s seconds." % delay)
time.sleep(delay)
delay *= backoff_factor
max_tries -= 1
return max_tries > 0
class UpdateCheck(object):
"""Determines if the local SDK is the latest version.
Nags the user when there are updates to the SDK. As the SDK becomes
more out of date, the language in the nagging gets stronger. We
store a little yaml file in the user's home directory so that we nag
the user only once a week.
The yaml file has the following field:
'timestamp': Last time we nagged the user in seconds since the epoch.
Attributes:
server: An AbstractRpcServer instance used to check for the latest SDK.
config: The app's AppInfoExternal. Needed to determine which api_version
the app is using.
"""
def __init__(self,
server,
config,
isdir=os.path.isdir,
isfile=os.path.isfile,
open_fn=open):
"""Create a new UpdateCheck.
Args:
server: The AbstractRpcServer to use.
config: The yaml object that specifies the configuration of this
application.
isdir: Replacement for os.path.isdir (for testing).
isfile: Replacement for os.path.isfile (for testing).
open_fn: Replacement for the open builtin (for testing).
"""
self.server = server
self.config = config
self.isdir = isdir
self.isfile = isfile
self.open = open_fn
@staticmethod
def MakeNagFilename():
"""Returns the filename for the nag file for this user."""
user_homedir = os.path.expanduser("~/")
if not os.path.isdir(user_homedir):
drive, unused_tail = os.path.splitdrive(os.__file__)
if drive:
os.environ["HOMEDRIVE"] = drive
return os.path.expanduser("~/" + NAG_FILE)
def _ParseVersionFile(self):
"""Parse the local VERSION file.
Returns:
A Yaml object or None if the file does not exist.
"""
return GetVersionObject(isfile=self.isfile, open_fn=self.open)
def CheckSupportedVersion(self):
"""Determines if the app's api_version is supported by the SDK.
Uses the api_version field from the AppInfoExternal to determine if
the SDK supports that api_version.
Raises:
SystemExit if the api_version is not supported.
"""
version = self._ParseVersionFile()
if version is None:
logging.error("Could not determine if the SDK supports the api_version "
"requested in app.yaml.")
return
if self.config.api_version not in version["api_versions"]:
logging.critical("The api_version specified in app.yaml (%s) is not "
"supported by this release of the SDK. The supported "
"api_versions are %s.",
self.config.api_version, version["api_versions"])
sys.exit(1)
def CheckForUpdates(self):
"""Queries the server for updates and nags the user if appropriate.
Queries the server for the latest SDK version at the same time reporting
the local SDK version. The server will respond with a yaml document
containing the fields:
"release": The name of the release (e.g. 1.2).
"timestamp": The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ).
"api_versions": A list of api_version strings (e.g. ['1', 'beta']).
We will nag the user with increasing severity if:
- There is a new release.
- There is a new release with a new api_version.
- There is a new release that does not support the api_version named in
self.config.
"""
version = self._ParseVersionFile()
if version is None:
logging.info("Skipping update check")
return
logging.info("Checking for updates to the SDK.")
try:
response = self.server.Send("/api/updatecheck",
timeout=UPDATE_CHECK_TIMEOUT,
release=version["release"],
timestamp=version["timestamp"],
api_versions=version["api_versions"])
except urllib2.URLError, e:
logging.info("Update check failed: %s", e)
return
latest = yaml.safe_load(response)
if latest["release"] == version["release"]:
logging.info("The SDK is up to date.")
return
api_versions = latest["api_versions"]
if self.config.api_version not in api_versions:
self._Nag(
"The api version you are using (%s) is obsolete! You should\n"
"upgrade your SDK and test that your code works with the new\n"
"api version." % self.config.api_version,
latest, version, force=True)
return
if self.config.api_version != api_versions[len(api_versions) - 1]:
self._Nag(
"The api version you are using (%s) is deprecated. You should\n"
"upgrade your SDK to try the new functionality." %
self.config.api_version, latest, version)
return
self._Nag("There is a new release of the SDK available.",
latest, version)
def _ParseNagFile(self):
"""Parses the nag file.
Returns:
A NagFile if the file was present else None.
"""
nag_filename = UpdateCheck.MakeNagFilename()
if self.isfile(nag_filename):
fh = self.open(nag_filename, "r")
try:
nag = NagFile.Load(fh)
finally:
fh.close()
return nag
return None
def _WriteNagFile(self, nag):
"""Writes the NagFile to the user's nag file.
If the destination path does not exist, this method will log an error
and fail silently.
Args:
nag: The NagFile to write.
"""
nagfilename = UpdateCheck.MakeNagFilename()
try:
fh = self.open(nagfilename, "w")
try:
fh.write(nag.ToYAML())
finally:
fh.close()
except (OSError, IOError), e:
logging.error("Could not write nag file to %s. Error: %s", nagfilename, e)
def _Nag(self, msg, latest, version, force=False):
"""Prints a nag message and updates the nag file's timestamp.
Because we don't want to nag the user everytime, we store a simple
yaml document in the user's home directory. If the timestamp in this
doc is over a week old, we'll nag the user. And when we nag the user,
we update the timestamp in this doc.
Args:
msg: The formatted message to print to the user.
latest: The yaml document received from the server.
version: The local yaml version document.
force: If True, always nag the user, ignoring the nag file.
"""
nag = self._ParseNagFile()
if nag and not force:
last_nag = datetime.datetime.fromtimestamp(nag.timestamp)
if datetime.datetime.now() - last_nag < datetime.timedelta(weeks=1):
logging.debug("Skipping nag message")
return
if nag is None:
nag = NagFile()
nag.timestamp = time.time()
self._WriteNagFile(nag)
print "****************************************************************"
print msg
print "-----------"
print "Latest SDK:"
print yaml.dump(latest)
print "-----------"
print "Your SDK:"
print yaml.dump(version)
print "-----------"
print "Please visit http://code.google.com/appengine for the latest SDK"
print "****************************************************************"
def AllowedToCheckForUpdates(self, input_fn=raw_input):
"""Determines if the user wants to check for updates.
On startup, the dev_appserver wants to check for updates to the SDK.
Because this action reports usage to Google when the user is not
otherwise communicating with Google (e.g. pushing a new app version),
the user must opt in.
If the user does not have a nag file, we will query the user and
save the response in the nag file. Subsequent calls to this function
will re-use that response.
Args:
input_fn: used to collect user input. This is for testing only.
Returns:
True if the user wants to check for updates. False otherwise.
"""
nag = self._ParseNagFile()
if nag is None:
nag = NagFile()
nag.timestamp = time.time()
if nag.opt_in is None:
answer = input_fn("Allow dev_appserver to check for updates on startup? "
"(Y/n): ")
answer = answer.strip().lower()
if answer == "n" or answer == "no":
print ("dev_appserver will not check for updates on startup. To "
"change this setting, edit %s" % UpdateCheck.MakeNagFilename())
nag.opt_in = False
else:
print ("dev_appserver will check for updates on startup. To change "
"this setting, edit %s" % UpdateCheck.MakeNagFilename())
nag.opt_in = True
self._WriteNagFile(nag)
return nag.opt_in
class IndexDefinitionUpload(object):
"""Provides facilities to upload index definitions to the hosting service."""
def __init__(self, server, config, definitions):
"""Creates a new DatastoreIndexUpload.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: The AppInfoExternal object derived from the app.yaml file.
definitions: An IndexDefinitions object.
"""
self.server = server
self.config = config
self.definitions = definitions
def DoUpload(self):
"""Uploads the index definitions."""
StatusUpdate("Uploading index definitions.")
self.server.Send("/api/datastore/index/add",
app_id=self.config.application,
version=self.config.version,
payload=self.definitions.ToYAML())
class CronEntryUpload(object):
"""Provides facilities to upload cron entries to the hosting service."""
def __init__(self, server, config, cron):
"""Creates a new CronEntryUpload.
Args:
server: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer
config: The AppInfoExternal object derived from the app.yaml file.
cron: The CronInfoExternal object loaded from the cron.yaml file.
"""
self.server = server
self.config = config
self.cron = cron
def DoUpload(self):
"""Uploads the cron entries."""
StatusUpdate("Uploading cron entries.")
self.server.Send("/api/datastore/cron/update",
app_id=self.config.application,
version=self.config.version,
payload=self.cron.ToYAML())
class IndexOperation(object):
"""Provide facilities for writing Index operation commands."""
def __init__(self, server, config):
"""Creates a new IndexOperation.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: appinfo.AppInfoExternal configuration object.
"""
self.server = server
self.config = config
def DoDiff(self, definitions):
"""Retrieve diff file from the server.
Args:
definitions: datastore_index.IndexDefinitions as loaded from users
index.yaml file.
Returns:
A pair of datastore_index.IndexDefinitions objects. The first record
is the set of indexes that are present in the index.yaml file but missing
from the server. The second record is the set of indexes that are
present on the server but missing from the index.yaml file (indicating
that these indexes should probably be vacuumed).
"""
StatusUpdate("Fetching index definitions diff.")
response = self.server.Send("/api/datastore/index/diff",
app_id=self.config.application,
payload=definitions.ToYAML())
return datastore_index.ParseMultipleIndexDefinitions(response)
def DoDelete(self, definitions):
"""Delete indexes from the server.
Args:
definitions: Index definitions to delete from datastore.
Returns:
A single datstore_index.IndexDefinitions containing indexes that were
not deleted, probably because they were already removed. This may
be normal behavior as there is a potential race condition between fetching
the index-diff and sending deletion confirmation through.
"""
StatusUpdate("Deleting selected index definitions.")
response = self.server.Send("/api/datastore/index/delete",
app_id=self.config.application,
payload=definitions.ToYAML())
return datastore_index.ParseIndexDefinitions(response)
class VacuumIndexesOperation(IndexOperation):
"""Provide facilities to request the deletion of datastore indexes."""
def __init__(self, server, config, force,
confirmation_fn=raw_input):
"""Creates a new VacuumIndexesOperation.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: appinfo.AppInfoExternal configuration object.
force: True to force deletion of indexes, else False.
confirmation_fn: Function used for getting input form user.
"""
super(VacuumIndexesOperation, self).__init__(server, config)
self.force = force
self.confirmation_fn = confirmation_fn
def GetConfirmation(self, index):
"""Get confirmation from user to delete an index.
This method will enter an input loop until the user provides a
response it is expecting. Valid input is one of three responses:
y: Confirm deletion of index.
n: Do not delete index.
a: Delete all indexes without asking for further confirmation.
If the user enters nothing at all, the default action is to skip
that index and do not delete.
If the user selects 'a', as a side effect, the 'force' flag is set.
Args:
index: Index to confirm.
Returns:
True if user enters 'y' or 'a'. False if user enter 'n'.
"""
while True:
print "This index is no longer defined in your index.yaml file."
print
print index.ToYAML()
print
confirmation = self.confirmation_fn(
"Are you sure you want to delete this index? (N/y/a): ")
confirmation = confirmation.strip().lower()
if confirmation == "y":
return True
elif confirmation == "n" or not confirmation:
return False
elif confirmation == "a":
self.force = True
return True
else:
print "Did not understand your response."
def DoVacuum(self, definitions):
"""Vacuum indexes in datastore.
This method will query the server to determine which indexes are not
being used according to the user's local index.yaml file. Once it has
made this determination, it confirms with the user which unused indexes
should be deleted. Once confirmation for each index is receives, it
deletes those indexes.
Because another user may in theory delete the same indexes at the same
time as the user, there is a potential race condition. In this rare cases,
some of the indexes previously confirmed for deletion will not be found.
The user is notified which indexes these were.
Args:
definitions: datastore_index.IndexDefinitions as loaded from users
index.yaml file.
"""
unused_new_indexes, notused_indexes = self.DoDiff(definitions)
deletions = datastore_index.IndexDefinitions(indexes=[])
if notused_indexes.indexes is not None:
for index in notused_indexes.indexes:
if self.force or self.GetConfirmation(index):
deletions.indexes.append(index)
if deletions.indexes:
not_deleted = self.DoDelete(deletions)
if not_deleted.indexes:
not_deleted_count = len(not_deleted.indexes)
if not_deleted_count == 1:
warning_message = ("An index was not deleted. Most likely this is "
"because it no longer exists.\n\n")
else:
warning_message = ("%d indexes were not deleted. Most likely this "
"is because they no longer exist.\n\n"
% not_deleted_count)
for index in not_deleted.indexes:
warning_message += index.ToYAML()
logging.warning(warning_message)
class LogsRequester(object):
"""Provide facilities to export request logs."""
def __init__(self, server, config, output_file,
num_days, append, severity, now):
"""Constructor.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: appinfo.AppInfoExternal configuration object.
output_file: Output file name.
num_days: Number of days worth of logs to export; 0 for all available.
append: True if appending to an existing file.
severity: App log severity to request (0-4); None for no app logs.
now: POSIX timestamp used for calculating valid dates for num_days.
"""
self.server = server
self.config = config
self.output_file = output_file
self.append = append
self.num_days = num_days
self.severity = severity
self.version_id = self.config.version + ".1"
self.sentinel = None
self.write_mode = "w"
if self.append:
self.sentinel = FindSentinel(self.output_file)
self.write_mode = "a"
self.valid_dates = None
if self.num_days:
patterns = []
now = PacificTime(now)
for i in xrange(self.num_days):
then = time.gmtime(now - 24*3600 * i)
patterns.append(re.escape(time.strftime("%d/%m/%Y", then)))
patterns.append(re.escape(time.strftime("%d/%b/%Y", then)))
self.valid_dates = re.compile(r"[^[]+\[(" + "|".join(patterns) + r"):")
def DownloadLogs(self):
"""Download the requested logs.
This will write the logs to the file designated by
self.output_file, or to stdout if the filename is '-'.
Multiple roundtrips to the server may be made.
"""
StatusUpdate("Downloading request logs for %s %s." %
(self.config.application, self.version_id))
tf = tempfile.TemporaryFile()
offset = None
try:
while True:
try:
offset = self.RequestLogLines(tf, offset)
if not offset:
break
except KeyboardInterrupt:
StatusUpdate("Keyboard interrupt; saving data downloaded so far.")
break
StatusUpdate("Copying request logs to %r." % self.output_file)
if self.output_file == "-":
of = sys.stdout
else:
try:
of = open(self.output_file, self.write_mode)
except IOError, err:
StatusUpdate("Can't write %r: %s." % (self.output_file, err))
sys.exit(1)
try:
line_count = CopyReversedLines(tf, of)
finally:
of.flush()
if of is not sys.stdout:
of.close()
finally:
tf.close()
StatusUpdate("Copied %d records." % line_count)
def RequestLogLines(self, tf, offset):
"""Make a single roundtrip to the server.
Args:
tf: Writable binary stream to which the log lines returned by
the server are written, stripped of headers, and excluding
lines skipped due to self.sentinel or self.valid_dates filtering.
offset: Offset string for a continued request; None for the first.
Returns:
The offset string to be used for the next request, if another
request should be issued; or None, if not.
"""
logging.info("Request with offset %r.", offset)
kwds = {"app_id": self.config.application,
"version": self.version_id,
"limit": 100,
}
if offset:
kwds["offset"] = offset
if self.severity is not None:
kwds["severity"] = str(self.severity)
response = self.server.Send("/api/request_logs", payload=None, **kwds)
response = response.replace("\r", "\0")
lines = response.splitlines()
logging.info("Received %d bytes, %d records.", len(response), len(lines))
offset = None
if lines and lines[0].startswith("#"):
match = re.match(r"^#\s*next_offset=(\S+)\s*$", lines[0])
del lines[0]
if match:
offset = match.group(1)
if lines and lines[-1].startswith("#"):
del lines[-1]
valid_dates = self.valid_dates
sentinel = self.sentinel
len_sentinel = None
if sentinel:
len_sentinel = len(sentinel)
for line in lines:
if ((sentinel and
line.startswith(sentinel) and
line[len_sentinel : len_sentinel+1] in ("", "\0")) or
(valid_dates and not valid_dates.match(line))):
return None
tf.write(line + "\n")
if not lines:
return None
return offset
def PacificTime(now):
"""Helper to return the number of seconds between UTC and Pacific time.
This is needed to compute today's date in Pacific time (more
specifically: Mountain View local time), which is how request logs
are reported. (Google servers always report times in Mountain View
local time, regardless of where they are physically located.)
This takes (post-2006) US DST into account. Pacific time is either
8 hours or 7 hours west of UTC, depending on whether DST is in
effect. Since 2007, US DST starts on the Second Sunday in March
March, and ends on the first Sunday in November. (Reference:
http://aa.usno.navy.mil/faq/docs/daylight_time.php.)
Note that the server doesn't report its local time (the HTTP Date
header uses UTC), and the client's local time is irrelevant.
Args:
now: A posix timestamp giving current UTC time.
Returns:
A pseudo-posix timestamp giving current Pacific time. Passing
this through time.gmtime() will produce a tuple in Pacific local
time.
"""
now -= 8*3600
if IsPacificDST(now):
now += 3600
return now
def IsPacificDST(now):
"""Helper for PacificTime to decide whether now is Pacific DST (PDT).
Args:
now: A pseudo-posix timestamp giving current time in PST.
Returns:
True if now falls within the range of DST, False otherwise.
"""
DAY = 24*3600
SUNDAY = 6
pst = time.gmtime(now)
year = pst[0]
assert year >= 2007
begin = calendar.timegm((year, 3, 8, 2, 0, 0, 0, 0, 0))
while time.gmtime(begin).tm_wday != SUNDAY:
begin += DAY
end = calendar.timegm((year, 11, 1, 2, 0, 0, 0, 0, 0))
while time.gmtime(end).tm_wday != SUNDAY:
end += DAY
return begin <= now < end
def CopyReversedLines(instream, outstream, blocksize=2**16):
r"""Copy lines from input stream to output stream in reverse order.
As a special feature, null bytes in the input are turned into
newlines followed by tabs in the output, but these "sub-lines"
separated by null bytes are not reversed. E.g. If the input is
"A\0B\nC\0D\n", the output is "C\n\tD\nA\n\tB\n".
Args:
instream: A seekable stream open for reading in binary mode.
outstream: A stream open for writing; doesn't have to be seekable or binary.
blocksize: Optional block size for buffering, for unit testing.
Returns:
The number of lines copied.
"""
line_count = 0
instream.seek(0, 2)
last_block = instream.tell() // blocksize
spillover = ""
for iblock in xrange(last_block + 1, -1, -1):
instream.seek(iblock * blocksize)
data = instream.read(blocksize)
lines = data.splitlines(True)
lines[-1:] = "".join(lines[-1:] + [spillover]).splitlines(True)
if lines and not lines[-1].endswith("\n"):
lines[-1] += "\n"
lines.reverse()
if lines and iblock > 0:
spillover = lines.pop()
if lines:
line_count += len(lines)
data = "".join(lines).replace("\0", "\n\t")
outstream.write(data)
return line_count
def FindSentinel(filename, blocksize=2**16):
"""Return the sentinel line from the output file.
Args:
filename: The filename of the output file. (We'll read this file.)
blocksize: Optional block size for buffering, for unit testing.
Returns:
The contents of the last line in the file that doesn't start with
a tab, with its trailing newline stripped; or None if the file
couldn't be opened or no such line could be found by inspecting
the last 'blocksize' bytes of the file.
"""
if filename == "-":
StatusUpdate("Can't combine --append with output to stdout.")
sys.exit(2)
try:
fp = open(filename, "rb")
except IOError, err:
StatusUpdate("Append mode disabled: can't read %r: %s." % (filename, err))
return None
try:
fp.seek(0, 2)
fp.seek(max(0, fp.tell() - blocksize))
lines = fp.readlines()
del lines[:1]
sentinel = None
for line in lines:
if not line.startswith("\t"):
sentinel = line
if not sentinel:
StatusUpdate("Append mode disabled: can't find sentinel in %r." %
filename)
return None
return sentinel.rstrip("\n")
finally:
fp.close()
class AppVersionUpload(object):
"""Provides facilities to upload a new appversion to the hosting service.
Attributes:
server: The AbstractRpcServer to use for the upload.
config: The AppInfoExternal object derived from the app.yaml file.
app_id: The application string from 'config'.
version: The version string from 'config'.
files: A dictionary of files to upload to the server, mapping path to
hash of the file contents.
in_transaction: True iff a transaction with the server has started.
An AppVersionUpload can do only one transaction at a time.
deployed: True iff the Deploy method has been called.
"""
def __init__(self, server, config):
"""Creates a new AppVersionUpload.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer or
TestRpcServer.
config: An AppInfoExternal object that specifies the configuration for
this application.
"""
self.server = server
self.config = config
self.app_id = self.config.application
self.version = self.config.version
self.files = {}
self.in_transaction = False
self.deployed = False
def _Hash(self, content):
"""Compute the hash of the content.
Args:
content: The data to hash as a string.
Returns:
The string representation of the hash.
"""
h = sha.new(content).hexdigest()
return "%s_%s_%s_%s_%s" % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40])
def AddFile(self, path, file_handle):
"""Adds the provided file to the list to be pushed to the server.
Args:
path: The path the file should be uploaded as.
file_handle: A stream containing data to upload.
"""
assert not self.in_transaction, "Already in a transaction."
assert file_handle is not None
reason = appinfo.ValidFilename(path)
if reason:
logging.error(reason)
return
pos = file_handle.tell()
content_hash = self._Hash(file_handle.read())
file_handle.seek(pos, 0)
self.files[path] = content_hash
def Begin(self):
"""Begins the transaction, returning a list of files that need uploading.
All calls to AddFile must be made before calling Begin().
Returns:
A list of pathnames for files that should be uploaded using UploadFile()
before Commit() can be called.
"""
assert not self.in_transaction, "Already in a transaction."
StatusUpdate("Initiating update.")
self.server.Send("/api/appversion/create", app_id=self.app_id,
version=self.version, payload=self.config.ToYAML())
self.in_transaction = True
files_to_clone = []
blobs_to_clone = []
for path, content_hash in self.files.iteritems():
mime_type = GetMimeTypeIfStaticFile(self.config, path)
if mime_type is not None:
blobs_to_clone.append((path, content_hash, mime_type))
else:
files_to_clone.append((path, content_hash))
files_to_upload = {}
def CloneFiles(url, files, file_type):
"""Sends files to the given url.
Args:
url: the server URL to use.
files: a list of files
file_type: the type of the files
"""
if not files:
return
StatusUpdate("Cloning %d %s file%s." %
(len(files), file_type, len(files) != 1 and "s" or ""))
for i in xrange(0, len(files), MAX_FILES_TO_CLONE):
if i > 0 and i % MAX_FILES_TO_CLONE == 0:
StatusUpdate("Cloned %d files." % i)
chunk = files[i:min(len(files), i + MAX_FILES_TO_CLONE)]
result = self.server.Send(url,
app_id=self.app_id, version=self.version,
payload=BuildClonePostBody(chunk))
if result:
files_to_upload.update(dict(
(f, self.files[f]) for f in result.split(LIST_DELIMITER)))
CloneFiles("/api/appversion/cloneblobs", blobs_to_clone, "static")
CloneFiles("/api/appversion/clonefiles", files_to_clone, "application")
logging.info("Files to upload: " + str(files_to_upload))
self.files = files_to_upload
return sorted(files_to_upload.iterkeys())
def UploadFile(self, path, file_handle):
"""Uploads a file to the hosting service.
Must only be called after Begin().
The path provided must be one of those that were returned by Begin().
Args:
path: The path the file is being uploaded as.
file_handle: A file-like object containing the data to upload.
Raises:
KeyError: The provided file is not amongst those to be uploaded.
"""
assert self.in_transaction, "Begin() must be called before UploadFile()."
if path not in self.files:
raise KeyError("File '%s' is not in the list of files to be uploaded."
% path)
del self.files[path]
mime_type = GetMimeTypeIfStaticFile(self.config, path)
if mime_type is not None:
self.server.Send("/api/appversion/addblob", app_id=self.app_id,
version=self.version, path=path, content_type=mime_type,
payload=file_handle.read())
else:
self.server.Send("/api/appversion/addfile", app_id=self.app_id,
version=self.version, path=path,
payload=file_handle.read())
def Commit(self):
"""Commits the transaction, making the new app version available.
All the files returned by Begin() must have been uploaded with UploadFile()
before Commit() can be called.
This tries the new 'deploy' method; if that fails it uses the old 'commit'.
Raises:
Exception: Some required files were not uploaded.
"""
assert self.in_transaction, "Begin() must be called before Commit()."
if self.files:
raise Exception("Not all required files have been uploaded.")
try:
self.Deploy()
if not RetryWithBackoff(1, 2, 8, self.IsReady):
logging.warning("Version still not ready to serve, aborting.")
raise Exception("Version not ready.")
self.StartServing()
except urllib2.HTTPError, e:
if e.code != 404:
raise
StatusUpdate("Closing update.")
self.server.Send("/api/appversion/commit", app_id=self.app_id,
version=self.version)
self.in_transaction = False
def Deploy(self):
"""Deploys the new app version but does not make it default.
All the files returned by Begin() must have been uploaded with UploadFile()
before Deploy() can be called.
Raises:
Exception: Some required files were not uploaded.
"""
assert self.in_transaction, "Begin() must be called before Deploy()."
if self.files:
raise Exception("Not all required files have been uploaded.")
StatusUpdate("Deploying new version.")
self.server.Send("/api/appversion/deploy", app_id=self.app_id,
version=self.version)
self.deployed = True
def IsReady(self):
"""Check if the new app version is ready to serve traffic.
Raises:
Exception: Deploy has not yet been called.
Returns:
True if the server returned the app is ready to serve.
"""
assert self.deployed, "Deploy() must be called before IsReady()."
StatusUpdate("Checking if new version is ready to serve.")
result = self.server.Send("/api/appversion/isready", app_id=self.app_id,
version=self.version)
return result == "1"
def StartServing(self):
"""Start serving with the newly created version.
Raises:
Exception: Deploy has not yet been called.
"""
assert self.deployed, "Deploy() must be called before IsReady()."
StatusUpdate("Closing update: new version is ready to start serving.")
self.server.Send("/api/appversion/startserving",
app_id=self.app_id, version=self.version)
self.in_transaction = False
def Rollback(self):
"""Rolls back the transaction if one is in progress."""
if not self.in_transaction:
return
StatusUpdate("Rolling back the update.")
self.server.Send("/api/appversion/rollback", app_id=self.app_id,
version=self.version)
self.in_transaction = False
self.files = {}
def DoUpload(self, paths, max_size, openfunc):
"""Uploads a new appversion with the given config and files to the server.
Args:
paths: An iterator that yields the relative paths of the files to upload.
max_size: The maximum size file to upload.
openfunc: A function that takes a path and returns a file-like object.
"""
logging.info("Reading app configuration.")
path = ""
try:
StatusUpdate("Scanning files on local disk.")
num_files = 0
for path in paths:
file_handle = openfunc(path)
try:
if self.config.skip_files.match(path):
logging.info("Ignoring file '%s': File matches ignore regex.",
path)
else:
file_length = GetFileLength(file_handle)
if file_length > max_size:
logging.error("Ignoring file '%s': Too long "
"(max %d bytes, file is %d bytes)",
path, max_size, file_length)
else:
logging.info("Processing file '%s'", path)
self.AddFile(path, file_handle)
finally:
file_handle.close()
num_files += 1
if num_files % 500 == 0:
StatusUpdate("Scanned %d files." % num_files)
except KeyboardInterrupt:
logging.info("User interrupted. Aborting.")
raise
except EnvironmentError, e:
logging.error("An error occurred processing file '%s': %s. Aborting.",
path, e)
raise
try:
missing_files = self.Begin()
if missing_files:
StatusUpdate("Uploading %d files." % len(missing_files))
num_files = 0
for missing_file in missing_files:
logging.info("Uploading file '%s'" % missing_file)
file_handle = openfunc(missing_file)
try:
self.UploadFile(missing_file, file_handle)
finally:
file_handle.close()
num_files += 1
if num_files % 500 == 0:
StatusUpdate("Uploaded %d files." % num_files)
self.Commit()
except KeyboardInterrupt:
logging.info("User interrupted. Aborting.")
self.Rollback()
raise
except:
logging.exception("An unexpected error occurred. Aborting.")
self.Rollback()
raise
logging.info("Done!")
def FileIterator(base, separator=os.path.sep):
"""Walks a directory tree, returning all the files. Follows symlinks.
Args:
base: The base path to search for files under.
separator: Path separator used by the running system's platform.
Yields:
Paths of files found, relative to base.
"""
dirs = [""]
while dirs:
current_dir = dirs.pop()
for entry in os.listdir(os.path.join(base, current_dir)):
name = os.path.join(current_dir, entry)
fullname = os.path.join(base, name)
if os.path.isfile(fullname):
if separator == "\\":
name = name.replace("\\", "/")
yield name
elif os.path.isdir(fullname):
dirs.append(name)
def GetFileLength(fh):
"""Returns the length of the file represented by fh.
This function is capable of finding the length of any seekable stream,
unlike os.fstat, which only works on file streams.
Args:
fh: The stream to get the length of.
Returns:
The length of the stream.
"""
pos = fh.tell()
fh.seek(0, 2)
length = fh.tell()
fh.seek(pos, 0)
return length
def GetUserAgent(get_version=GetVersionObject,
get_platform=appengine_rpc.GetPlatformToken):
"""Determines the value of the 'User-agent' header to use for HTTP requests.
If the 'APPCFG_SDK_NAME' environment variable is present, that will be
used as the first product token in the user-agent.
Args:
get_version: Used for testing.
get_platform: Used for testing.
Returns:
String containing the 'user-agent' header value, which includes the SDK
version, the platform information, and the version of Python;
e.g., "appcfg_py/1.0.1 Darwin/9.2.0 Python/2.5.2".
"""
product_tokens = []
sdk_name = os.environ.get("APPCFG_SDK_NAME")
if sdk_name:
product_tokens.append(sdk_name)
else:
version = get_version()
if version is None:
release = "unknown"
else:
release = version["release"]
product_tokens.append("appcfg_py/%s" % release)
product_tokens.append(get_platform())
python_version = ".".join(str(i) for i in sys.version_info)
product_tokens.append("Python/%s" % python_version)
return " ".join(product_tokens)
def GetSourceName(get_version=GetVersionObject):
"""Gets the name of this source version."""
version = get_version()
if version is None:
release = "unknown"
else:
release = version["release"]
return "Google-appcfg-%s" % (release,)
class AppCfgApp(object):
"""Singleton class to wrap AppCfg tool functionality.
This class is responsible for parsing the command line and executing
the desired action on behalf of the user. Processing files and
communicating with the server is handled by other classes.
Attributes:
actions: A dictionary mapping action names to Action objects.
action: The Action specified on the command line.
parser: An instance of optparse.OptionParser.
options: The command line options parsed by 'parser'.
argv: The original command line as a list.
args: The positional command line args left over after parsing the options.
raw_input_fn: Function used for getting raw user input, like email.
password_input_fn: Function used for getting user password.
error_fh: Unexpected HTTPErrors are printed to this file handle.
Attributes for testing:
parser_class: The class to use for parsing the command line. Because
OptionsParser will exit the program when there is a parse failure, it
is nice to subclass OptionsParser and catch the error before exiting.
"""
def __init__(self, argv, parser_class=optparse.OptionParser,
rpc_server_class=appengine_rpc.HttpRpcServer,
raw_input_fn=raw_input,
password_input_fn=getpass.getpass,
error_fh=sys.stderr,
update_check_class=UpdateCheck):
"""Initializer. Parses the cmdline and selects the Action to use.
Initializes all of the attributes described in the class docstring.
Prints help or error messages if there is an error parsing the cmdline.
Args:
argv: The list of arguments passed to this program.
parser_class: Options parser to use for this application.
rpc_server_class: RPC server class to use for this application.
raw_input_fn: Function used for getting user email.
password_input_fn: Function used for getting user password.
error_fh: Unexpected HTTPErrors are printed to this file handle.
update_check_class: UpdateCheck class (can be replaced for testing).
"""
self.parser_class = parser_class
self.argv = argv
self.rpc_server_class = rpc_server_class
self.raw_input_fn = raw_input_fn
self.password_input_fn = password_input_fn
self.error_fh = error_fh
self.update_check_class = update_check_class
self.parser = self._GetOptionParser()
for action in self.actions.itervalues():
action.options(self, self.parser)
self.options, self.args = self.parser.parse_args(argv[1:])
if len(self.args) < 1:
self._PrintHelpAndExit()
if self.args[0] not in self.actions:
self.parser.error("Unknown action '%s'\n%s" %
(self.args[0], self.parser.get_description()))
action_name = self.args.pop(0)
self.action = self.actions[action_name]
self.parser, self.options = self._MakeSpecificParser(self.action)
if self.options.help:
self._PrintHelpAndExit()
if self.options.verbose == 2:
logging.getLogger().setLevel(logging.INFO)
elif self.options.verbose == 3:
logging.getLogger().setLevel(logging.DEBUG)
global verbosity
verbosity = self.options.verbose
def Run(self):
"""Executes the requested action.
Catches any HTTPErrors raised by the action and prints them to stderr.
"""
try:
self.action(self)
except urllib2.HTTPError, e:
body = e.read()
print >>self.error_fh, ("Error %d: --- begin server output ---\n"
"%s\n--- end server output ---" %
(e.code, body.rstrip("\n")))
return 1
except yaml_errors.EventListenerError, e:
print >>self.error_fh, ("Error parsing yaml file:\n%s" % e)
return 1
return 0
def _GetActionDescriptions(self):
"""Returns a formatted string containing the short_descs for all actions."""
action_names = self.actions.keys()
action_names.sort()
desc = ""
for action_name in action_names:
desc += " %s: %s\n" % (action_name, self.actions[action_name].short_desc)
return desc
def _GetOptionParser(self):
"""Creates an OptionParser with generic usage and description strings.
Returns:
An OptionParser instance.
"""
class Formatter(optparse.IndentedHelpFormatter):
"""Custom help formatter that does not reformat the description."""
def format_description(self, description):
"""Very simple formatter."""
return description + "\n"
desc = self._GetActionDescriptions()
desc = ("Action must be one of:\n%s"
"Use 'help <action>' for a detailed description.") % desc
parser = self.parser_class(usage="%prog [options] <action>",
description=desc,
formatter=Formatter(),
conflict_handler="resolve")
parser.add_option("-h", "--help", action="store_true",
dest="help", help="Show the help message and exit.")
parser.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
parser.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs.")
parser.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
parser.add_option("-s", "--server", action="store", dest="server",
default="appengine.google.com",
metavar="SERVER", help="The server to connect to.")
parser.add_option("--secure", action="store_true", dest="secure",
default=False,
help="Use SSL when communicating with the server.")
parser.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
parser.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
parser.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
parser.add_option("--passin", action="store_true",
dest="passin", default=False,
help="Read the login password from stdin.")
return parser
def _MakeSpecificParser(self, action):
"""Creates a new parser with documentation specific to 'action'.
Args:
action: An Action instance to be used when initializing the new parser.
Returns:
A tuple containing:
parser: An instance of OptionsParser customized to 'action'.
options: The command line options after re-parsing.
"""
parser = self._GetOptionParser()
parser.set_usage(action.usage)
parser.set_description("%s\n%s" % (action.short_desc, action.long_desc))
action.options(self, parser)
options, unused_args = parser.parse_args(self.argv[1:])
return parser, options
def _PrintHelpAndExit(self, exit_code=2):
"""Prints the parser's help message and exits the program.
Args:
exit_code: The integer code to pass to sys.exit().
"""
self.parser.print_help()
sys.exit(exit_code)
def _GetRpcServer(self):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = self.options.email
if email is None:
email = self.raw_input_fn("Email: ")
password_prompt = "Password for %s: " % email
if self.options.passin:
password = self.raw_input_fn(password_prompt)
else:
password = self.password_input_fn(password_prompt)
return (email, password)
if self.options.host and self.options.host == "localhost":
email = self.options.email
if email is None:
email = "[email protected]"
logging.info("Using debug user %s. Override with --email" % email)
server = self.rpc_server_class(
self.options.server,
lambda: (email, "password"),
GetUserAgent(),
GetSourceName(),
host_override=self.options.host,
save_cookies=self.options.save_cookies)
server.authenticated = True
return server
if self.options.passin:
auth_tries = 1
else:
auth_tries = 3
return self.rpc_server_class(self.options.server, GetUserCredentials,
GetUserAgent(), GetSourceName(),
host_override=self.options.host,
save_cookies=self.options.save_cookies,
auth_tries=auth_tries,
account_type="HOSTED_OR_GOOGLE",
secure=self.options.secure)
def _FindYaml(self, basepath, file_name):
"""Find yaml files in application directory.
Args:
basepath: Base application directory.
file_name: Filename without extension to search for.
Returns:
Path to located yaml file if one exists, else None.
"""
if not os.path.isdir(basepath):
self.parser.error("Not a directory: %s" % basepath)
for yaml_file in (file_name + ".yaml", file_name + ".yml"):
yaml_path = os.path.join(basepath, yaml_file)
if os.path.isfile(yaml_path):
return yaml_path
return None
def _ParseAppYaml(self, basepath):
"""Parses the app.yaml file.
Args:
basepath: the directory of the application.
Returns:
An AppInfoExternal object.
"""
appyaml_filename = self._FindYaml(basepath, "app")
if appyaml_filename is None:
self.parser.error("Directory does not contain an app.yaml "
"configuration file.")
fh = open(appyaml_filename, "r")
try:
appyaml = appinfo.LoadSingleAppInfo(fh)
finally:
fh.close()
return appyaml
def _ParseIndexYaml(self, basepath):
"""Parses the index.yaml file.
Args:
basepath: the directory of the application.
Returns:
A single parsed yaml file or None if the file does not exist.
"""
file_name = self._FindYaml(basepath, "index")
if file_name is not None:
fh = open(file_name, "r")
try:
index_defs = datastore_index.ParseIndexDefinitions(fh)
finally:
fh.close()
return index_defs
return None
def _ParseCronYaml(self, basepath):
"""Parses the cron.yaml file.
Args:
basepath: the directory of the application.
Returns:
A CronInfoExternal object.
"""
file_name = self._FindYaml(basepath, "cron")
if file_name is not None:
fh = open(file_name, "r")
try:
cron_info = croninfo.LoadSingleCron(fh)
finally:
fh.close()
return cron_info
return None
def Help(self):
"""Prints help for a specific action.
Expects self.args[0] to contain the name of the action in question.
Exits the program after printing the help message.
"""
if len(self.args) != 1 or self.args[0] not in self.actions:
self.parser.error("Expected a single action argument. Must be one of:\n" +
self._GetActionDescriptions())
action = self.actions[self.args[0]]
self.parser, unused_options = self._MakeSpecificParser(action)
self._PrintHelpAndExit(exit_code=0)
def Update(self):
"""Updates and deploys a new appversion."""
if len(self.args) != 1:
self.parser.error("Expected a single <directory> argument.")
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
updatecheck = self.update_check_class(rpc_server, appyaml)
updatecheck.CheckForUpdates()
appversion = AppVersionUpload(rpc_server, appyaml)
appversion.DoUpload(FileIterator(basepath), self.options.max_size,
lambda path: open(os.path.join(basepath, path), "rb"))
index_defs = self._ParseIndexYaml(basepath)
if index_defs:
index_upload = IndexDefinitionUpload(rpc_server, appyaml, index_defs)
try:
index_upload.DoUpload()
except urllib2.HTTPError, e:
StatusUpdate("Error %d: --- begin server output ---\n"
"%s\n--- end server output ---" %
(e.code, e.read().rstrip("\n")))
print >> self.error_fh, (
"Your app was updated, but there was an error updating your "
"indexes. Please retry later with appcfg.py update_indexes.")
cron_entries = self._ParseCronYaml(basepath)
if cron_entries:
cron_upload = CronEntryUpload(rpc_server, appyaml, cron_entries)
cron_upload.DoUpload()
def _UpdateOptions(self, parser):
"""Adds update-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option("-S", "--max_size", type="int", dest="max_size",
default=10485760, metavar="SIZE",
help="Maximum size of a file to upload.")
def VacuumIndexes(self):
"""Deletes unused indexes."""
if len(self.args) != 1:
self.parser.error("Expected a single <directory> argument.")
basepath = self.args[0]
config = self._ParseAppYaml(basepath)
index_defs = self._ParseIndexYaml(basepath)
if index_defs is None:
index_defs = datastore_index.IndexDefinitions()
rpc_server = self._GetRpcServer()
vacuum = VacuumIndexesOperation(rpc_server,
config,
self.options.force_delete)
vacuum.DoVacuum(index_defs)
def _VacuumIndexesOptions(self, parser):
"""Adds vacuum_indexes-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option("-f", "--force", action="store_true", dest="force_delete",
default=False,
help="Force deletion without being prompted.")
def UpdateCron(self):
"""Updates any new or changed cron definitions."""
if len(self.args) != 1:
self.parser.error("Expected a single <directory> argument.")
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
cron_entries = self._ParseCronYaml(basepath)
if cron_entries:
cron_upload = CronEntryUpload(rpc_server, appyaml, cron_entries)
cron_upload.DoUpload()
def UpdateIndexes(self):
"""Updates indexes."""
if len(self.args) != 1:
self.parser.error("Expected a single <directory> argument.")
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
index_defs = self._ParseIndexYaml(basepath)
if index_defs:
index_upload = IndexDefinitionUpload(rpc_server, appyaml, index_defs)
index_upload.DoUpload()
def Rollback(self):
"""Does a rollback of any existing transaction for this app version."""
if len(self.args) != 1:
self.parser.error("Expected a single <directory> argument.")
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
appversion = AppVersionUpload(self._GetRpcServer(), appyaml)
appversion.in_transaction = True
appversion.Rollback()
def RequestLogs(self):
"""Write request logs to a file."""
if len(self.args) != 2:
self.parser.error(
"Expected a <directory> argument and an <output_file> argument.")
if (self.options.severity is not None and
not 0 <= self.options.severity <= MAX_LOG_LEVEL):
self.parser.error(
"Severity range is 0 (DEBUG) through %s (CRITICAL)." % MAX_LOG_LEVEL)
if self.options.num_days is None:
self.options.num_days = int(not self.options.append)
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
logs_requester = LogsRequester(rpc_server, appyaml, self.args[1],
self.options.num_days,
self.options.append,
self.options.severity,
time.time())
logs_requester.DownloadLogs()
def _RequestLogsOptions(self, parser):
"""Adds request_logs-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option("-n", "--num_days", type="int", dest="num_days",
action="store", default=None,
help="Number of days worth of log data to get. "
"The cut-off point is midnight UTC. "
"Use 0 to get all available logs. "
"Default is 1, unless --append is also given; "
"then the default is 0.")
parser.add_option("-a", "--append", dest="append",
action="store_true", default=False,
help="Append to existing file.")
parser.add_option("--severity", type="int", dest="severity",
action="store", default=None,
help="Severity of app-level log messages to get. "
"The range is 0 (DEBUG) through 4 (CRITICAL). "
"If omitted, only request logs are returned.")
def CronInfo(self, now=None, output=sys.stdout):
"""Displays information about cron definitions.
Args:
now: used for testing.
output: Used for testing.
"""
if len(self.args) != 1:
self.parser.error("Expected a single <directory> argument.")
if now is None:
now = datetime.datetime.now()
basepath = self.args[0]
cron_entries = self._ParseCronYaml(basepath)
if cron_entries and cron_entries.cron:
for entry in cron_entries.cron:
description = entry.description
if not description:
description = "<no description>"
print >>output, "\n%s:\nURL: %s\nSchedule: %s" % (description,
entry.schedule,
entry.url)
schedule = groctimespecification.GrocTimeSpecification(entry.schedule)
matches = schedule.GetMatches(now, self.options.num_runs)
for match in matches:
print >>output, "%s, %s from now" % (
match.strftime("%Y-%m-%d %H:%M:%S"), match - now)
def _CronInfoOptions(self, parser):
"""Adds cron_info-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option("-n", "--num_runs", type="int", dest="num_runs",
action="store", default=5,
help="Number of runs of each cron job to display"
"Default is 5")
def _CheckRequiredUploadOptions(self):
"""Checks that upload options are present."""
for option in ["filename", "kind", "config_file"]:
if getattr(self.options, option) is None:
self.parser.error("Option '%s' is required." % option)
if not self.options.url:
self.parser.error("You must have google.appengine.ext.remote_api.handler "
"assigned to an endpoint in app.yaml, or provide "
"the url of the handler via the 'url' option.")
def InferUploadUrl(self, appyaml):
"""Uses app.yaml to determine the remote_api endpoint.
Args:
appyaml: A parsed app.yaml file.
Returns:
The url of the remote_api endpoint as a string, or None
"""
handlers = appyaml.handlers
handler_suffix = "remote_api/handler.py"
app_id = appyaml.application
for handler in handlers:
if hasattr(handler, "script") and handler.script:
if handler.script.endswith(handler_suffix):
server = self.options.server
if server == "appengine.google.com":
return "http://%s.appspot.com%s" % (app_id, handler.url)
else:
return "http://%s%s" % (server, handler.url)
return None
def RunBulkloader(self, **kwargs):
"""Invokes the bulkloader with the given keyword arguments.
Args:
kwargs: Keyword arguments to pass to bulkloader.Run().
"""
try:
import sqlite3
except ImportError:
logging.error("upload_data action requires SQLite3 and the python "
"sqlite3 module (included in python since 2.5).")
sys.exit(1)
sys.exit(bulkloader.Run(kwargs))
def PerformUpload(self, run_fn=None):
"""Performs a datastore upload via the bulkloader.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
if len(self.args) != 1:
self.parser.error("Expected <directory> argument.")
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
self.options.app_id = appyaml.application
if not self.options.url:
url = self.InferUploadUrl(appyaml)
if url is not None:
self.options.url = url
self._CheckRequiredUploadOptions()
if self.options.batch_size < 1:
self.parser.error("batch_size must be 1 or larger.")
if verbosity == 1:
logging.getLogger().setLevel(logging.INFO)
self.options.debug = False
else:
logging.getLogger().setLevel(logging.DEBUG)
self.options.debug = True
StatusUpdate("Uploading data records.")
run_fn(app_id=self.options.app_id,
url=self.options.url,
filename=self.options.filename,
batch_size=self.options.batch_size,
kind=self.options.kind,
num_threads=self.options.num_threads,
bandwidth_limit=self.options.bandwidth_limit,
rps_limit=self.options.rps_limit,
http_limit=self.options.http_limit,
db_filename=self.options.db_filename,
config_file=self.options.config_file,
auth_domain=self.options.auth_domain,
has_header=self.options.has_header,
loader_opts=self.options.loader_opts,
log_file=self.options.log_file,
passin=self.options.passin,
email=self.options.email,
debug=self.options.debug,
exporter_opts=None,
download=False,
result_db_filename=None,
)
def _PerformUploadOptions(self, parser):
"""Adds 'upload_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option("--filename", type="string", dest="filename",
action="store",
help="The name of the file containing the input data."
" (Required)")
parser.add_option("--config_file", type="string", dest="config_file",
action="store",
help="Name of the configuration file. (Required)")
parser.add_option("--kind", type="string", dest="kind",
action="store",
help="The kind of the entities to store. (Required)")
parser.add_option("--url", type="string", dest="url",
action="store",
help="The location of the remote_api endpoint.")
parser.add_option("--num_threads", type="int", dest="num_threads",
action="store", default=10,
help="Number of threads to upload records with.")
parser.add_option("--batch_size", type="int", dest="batch_size",
action="store", default=10,
help="Number of records to post in each request.")
parser.add_option("--bandwidth_limit", type="int", dest="bandwidth_limit",
action="store", default=250000,
help="The maximum bytes/second bandwidth for transfers.")
parser.add_option("--rps_limit", type="int", dest="rps_limit",
action="store", default=20,
help="The maximum records/second for transfers.")
parser.add_option("--http_limit", type="int", dest="http_limit",
action="store", default=8,
help="The maximum requests/second for transfers.")
parser.add_option("--db_filename", type="string", dest="db_filename",
action="store",
help="Name of the progress database file.")
parser.add_option("--auth_domain", type="string", dest="auth_domain",
action="store", default="gmail.com",
help="The name of the authorization domain to use.")
parser.add_option("--has_header", dest="has_header",
action="store_true", default=False,
help="Whether the first line of the input file should be"
" skipped")
parser.add_option("--loader_opts", type="string", dest="loader_opts",
help="A string to pass to the Loader.Initialize method.")
parser.add_option("--log_file", type="string", dest="log_file",
help="File to write bulkloader logs. If not supplied "
"then a new log file will be created, named: "
"bulkloader-log-TIMESTAMP.")
class Action(object):
"""Contains information about a command line action.
Attributes:
function: The name of a function defined on AppCfg or its subclasses
that will perform the appropriate action.
usage: A command line usage string.
short_desc: A one-line description of the action.
long_desc: A detailed description of the action. Whitespace and
formatting will be preserved.
options: A function that will add extra options to a given OptionParser
object.
"""
def __init__(self, function, usage, short_desc, long_desc="",
options=lambda obj, parser: None):
"""Initializer for the class attributes."""
self.function = function
self.usage = usage
self.short_desc = short_desc
self.long_desc = long_desc
self.options = options
def __call__(self, appcfg):
"""Invoke this Action on the specified AppCfg.
This calls the function of the appropriate name on AppCfg, and
respects polymophic overrides."""
method = getattr(appcfg, self.function)
return method()
actions = {
"help": Action(
function="Help",
usage="%prog help <action>",
short_desc="Print help for a specific action."),
"update": Action(
function="Update",
usage="%prog [options] update <directory>",
options=_UpdateOptions,
short_desc="Create or update an app version.",
long_desc="""
Specify a directory that contains all of the files required by
the app, and appcfg.py will create/update the app version referenced
in the app.yaml file at the top level of that directory. appcfg.py
will follow symlinks and recursively upload all files to the server.
Temporary or source control files (e.g. foo~, .svn/*) will be skipped."""),
"update_cron": Action(
function="UpdateCron",
usage="%prog [options] update_cron <directory>",
short_desc="Update application cron definitions.",
long_desc="""
The 'update_cron' command will update any new, removed or changed cron
definitions from the cron.yaml file."""),
"update_indexes": Action(
function="UpdateIndexes",
usage="%prog [options] update_indexes <directory>",
short_desc="Update application indexes.",
long_desc="""
The 'update_indexes' command will add additional indexes which are not currently
in production as well as restart any indexes that were not completed."""),
"vacuum_indexes": Action(
function="VacuumIndexes",
usage="%prog [options] vacuum_indexes <directory>",
options=_VacuumIndexesOptions,
short_desc="Delete unused indexes from application.",
long_desc="""
The 'vacuum_indexes' command will help clean up indexes which are no longer
in use. It does this by comparing the local index configuration with
indexes that are actually defined on the server. If any indexes on the
server do not exist in the index configuration file, the user is given the
option to delete them."""),
"rollback": Action(
function="Rollback",
usage="%prog [options] rollback <directory>",
short_desc="Rollback an in-progress update.",
long_desc="""
The 'update' command requires a server-side transaction. Use 'rollback'
if you get an error message about another transaction being in progress
and you are sure that there is no such transaction."""),
"request_logs": Action(
function="RequestLogs",
usage="%prog [options] request_logs <directory> <output_file>",
options=_RequestLogsOptions,
short_desc="Write request logs in Apache common log format.",
long_desc="""
The 'request_logs' command exports the request logs from your application
to a file. It will write Apache common log format records ordered
chronologically. If output file is '-' stdout will be written."""),
"cron_info": Action(
function="CronInfo",
usage="%prog [options] cron_info <directory>",
options=_CronInfoOptions,
short_desc="Display information about cron jobs.",
long_desc="""
The 'cron_info' command will display the next 'number' runs (default 5) for
each cron job defined in the cron.yaml file."""),
"upload_data": Action(
function="PerformUpload",
usage="%prog [options] upload_data <directory>",
options=_PerformUploadOptions,
short_desc="Upload CSV records to datastore",
long_desc="""
The 'upload_data' command translates CSV records into datastore entities and
uploads them into your application's datastore."""),
}
def main(argv):
logging.basicConfig(format=("%(asctime)s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
try:
result = AppCfgApp(argv).Run()
if result:
sys.exit(result)
except KeyboardInterrupt:
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 |
olea/PyConES-2016 | pycones/schedule/migrations/0007_auto_20150930_1149.py | 2 | 4593 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import markupfield.fields
class Migration(migrations.Migration):
dependencies = [
('schedule', '0006_slotkind_plenary'),
]
operations = [
migrations.AlterField(
model_name='presentation',
name='abstract',
field=markupfield.fields.MarkupField(rendered_field=True, default='', blank=True),
),
migrations.AlterField(
model_name='presentation',
name='abstract_ca',
field=markupfield.fields.MarkupField(null=True, default='', blank=True, rendered_field=True),
),
migrations.AlterField(
model_name='presentation',
name='abstract_en',
field=markupfield.fields.MarkupField(null=True, default='', blank=True, rendered_field=True),
),
migrations.AlterField(
model_name='presentation',
name='abstract_es',
field=markupfield.fields.MarkupField(null=True, default='', blank=True, rendered_field=True),
),
migrations.AlterField(
model_name='presentation',
name='abstract_eu',
field=markupfield.fields.MarkupField(null=True, default='', blank=True, rendered_field=True),
),
migrations.AlterField(
model_name='presentation',
name='abstract_gl',
field=markupfield.fields.MarkupField(null=True, default='', blank=True, rendered_field=True),
),
migrations.AlterField(
model_name='presentation',
name='abstract_markup_type',
field=models.CharField(blank=True, default=None, choices=[('', '--'), ('markdown', 'markdown'), ('ReST', 'ReST')], max_length=30),
),
migrations.AlterField(
model_name='presentation',
name='description',
field=markupfield.fields.MarkupField(rendered_field=True, default='', blank=True),
),
migrations.AlterField(
model_name='presentation',
name='description_ca',
field=markupfield.fields.MarkupField(null=True, default='', blank=True, rendered_field=True),
),
migrations.AlterField(
model_name='presentation',
name='description_en',
field=markupfield.fields.MarkupField(null=True, default='', blank=True, rendered_field=True),
),
migrations.AlterField(
model_name='presentation',
name='description_es',
field=markupfield.fields.MarkupField(null=True, default='', blank=True, rendered_field=True),
),
migrations.AlterField(
model_name='presentation',
name='description_eu',
field=markupfield.fields.MarkupField(null=True, default='', blank=True, rendered_field=True),
),
migrations.AlterField(
model_name='presentation',
name='description_gl',
field=markupfield.fields.MarkupField(null=True, default='', blank=True, rendered_field=True),
),
migrations.AlterField(
model_name='presentation',
name='description_markup_type',
field=models.CharField(blank=True, default=None, choices=[('', '--'), ('markdown', 'markdown'), ('ReST', 'ReST')], max_length=30),
),
migrations.AlterField(
model_name='presentation',
name='title',
field=models.CharField(default='', blank=True, max_length=100),
),
migrations.AlterField(
model_name='presentation',
name='title_ca',
field=models.CharField(null=True, default='', blank=True, max_length=100),
),
migrations.AlterField(
model_name='presentation',
name='title_en',
field=models.CharField(null=True, default='', blank=True, max_length=100),
),
migrations.AlterField(
model_name='presentation',
name='title_es',
field=models.CharField(null=True, default='', blank=True, max_length=100),
),
migrations.AlterField(
model_name='presentation',
name='title_eu',
field=models.CharField(null=True, default='', blank=True, max_length=100),
),
migrations.AlterField(
model_name='presentation',
name='title_gl',
field=models.CharField(null=True, default='', blank=True, max_length=100),
),
]
| mit |
jyotikamboj/container | django/contrib/gis/tests/geoapp/test_sitemaps.py | 8 | 3286 | from __future__ import unicode_literals
from io import BytesIO
from xml.dom import minidom
import zipfile
from django.conf import settings
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.sites.models import Site
from django.test import (
TestCase, ignore_warnings, modify_settings, override_settings, skipUnlessDBFeature
)
from django.utils.deprecation import RemovedInDjango20Warning
if HAS_GEOS:
from .models import City, Country
@modify_settings(INSTALLED_APPS={'append': ['django.contrib.sites', 'django.contrib.sitemaps']})
@override_settings(ROOT_URLCONF='django.contrib.gis.tests.geoapp.urls')
@skipUnlessDBFeature("gis_enabled")
class GeoSitemapTest(TestCase):
def setUp(self):
super(GeoSitemapTest, self).setUp()
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
def assertChildNodes(self, elem, expected):
"Taken from syndication/tests.py."
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_geositemap_kml(self):
"Tests KML/KMZ geographic sitemaps."
for kml_type in ('kml', 'kmz'):
# The URL for the sitemaps in urls.py have been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.gis.sitemaps.views.(kml|kmz)', we need
# to silence the erroneous warning until reversing by dotted
# path is removed. The test will work without modification when
# it's removed.
doc = minidom.parseString(self.client.get('/sitemaps/%s.xml' % kml_type).content)
# Ensuring the right sitemaps namespace is present.
urlset = doc.firstChild
self.assertEqual(urlset.getAttribute('xmlns'), 'http://www.sitemaps.org/schemas/sitemap/0.9')
urls = urlset.getElementsByTagName('url')
self.assertEqual(2, len(urls)) # Should only be 2 sitemaps.
for url in urls:
self.assertChildNodes(url, ['loc'])
# Getting the relative URL since we don't have a real site.
kml_url = url.getElementsByTagName('loc')[0].childNodes[0].data.split('http://example.com')[1]
if kml_type == 'kml':
kml_doc = minidom.parseString(self.client.get(kml_url).content)
elif kml_type == 'kmz':
# Have to decompress KMZ before parsing.
buf = BytesIO(self.client.get(kml_url).content)
zf = zipfile.ZipFile(buf)
self.assertEqual(1, len(zf.filelist))
self.assertEqual('doc.kml', zf.filelist[0].filename)
kml_doc = minidom.parseString(zf.read('doc.kml'))
# Ensuring the correct number of placemarks are in the KML doc.
if 'city' in kml_url:
model = City
elif 'country' in kml_url:
model = Country
self.assertEqual(model.objects.count(), len(kml_doc.getElementsByTagName('Placemark')))
| mit |
jhonnyam123/hangoutsbot | hangupsbot/plugins/xkcd.py | 3 | 1350 | import aiohttp, asyncio, io, logging, os, re, urllib.request, urllib.error
from bs4 import BeautifulSoup
import plugins
logger = logging.getLogger(__name__)
def _initialise(bot):
plugins.register_handler(_watch_xkcd_link, type="message")
@asyncio.coroutine
def _watch_xkcd_link(bot, event, command):
if event.user.is_self:
return
if " " in event.text:
return
if re.match("^https?://(www\.)?xkcd.com(/([0-9]+/)?)?$", event.text.lower(), re.IGNORECASE):
url = event.text.lower()
try:
response = urllib.request.urlopen(url)
except urllib.error.URLError as e:
logger.info("Tried and failed to get the xkcd comic :(")
logger.info(e.read())
return
body = response.read()
soup = BeautifulSoup(body.decode("utf-8"), "lxml")
comic = soup.find(src=re.compile('//imgs.xkcd.com/comics/.+'))
alttext = comic.attrs['title']
imgurl = comic.attrs['src']
title = comic.attrs['alt']
link_image = "http:{}".format(imgurl)
filename = os.path.basename(link_image)
r = yield from aiohttp.request('get', link_image)
raw = yield from r.read()
image_data = io.BytesIO(raw)
image_id = yield from bot._client.upload_image(image_data, filename=filename)
yield from bot.coro_send_message(event.conv, "<b><u>{}</u></b><br>{}".format(title, alttext), image_id=image_id)
| agpl-3.0 |
PySCeS/PyscesToolbox | psctb/analyse/_symca/ccobjects.py | 1 | 18252 | from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
from numpy import array, nanmin, nanmax
from sympy import Symbol
from pysces import ModelMap, Scanner, ParScanner
from numpy import NaN, abs
from ...utils.model_graph import ModelGraph
from ...utils.misc import silence_print, DotDict, formatter_factory, \
do_safe_state, find_min, find_max, get_value, stringify, \
scanner_range_setup
from ...utils.plotting import Data2D
def cctype(obj):
return 'ccobjects' in str(type(obj))
@silence_print
def get_state(mod, do_state=False):
if do_state:
mod.doState()
ss = [getattr(mod, 'J_' + r) for r in mod.reactions] + \
[getattr(mod, s + '_ss') for s in mod.species]
return ss
class CCBase(object):
"""The base object for the control coefficients and control patterns"""
def __init__(self, mod, name, expression, ltxe):
super(CCBase, self).__init__()
self.expression = expression
self.mod = mod
self._ltxe = ltxe
self.name = name
self._latex_name = '\\Sigma'
self._analysis_method = 'symca'
self._str_expression_ = None
self._value = None
self._latex_expression = None
@property
def latex_expression(self):
if not self._latex_expression:
self._latex_expression = self._ltxe.expression_to_latex(
self.expression
)
return self._latex_expression
@property
def latex_name(self):
return self._latex_name
@property
def _str_expression(self):
if not self._str_expression_:
self._str_expression_ = str(self.expression)
return self._str_expression_
@property
def value(self):
"""The value property. Calls self._calc_value() when self._value
is None and returns self._value"""
self._calc_value()
return self._value
def _repr_latex_(self):
return '$%s = %s = %.3f$' % (self.latex_name,
self.latex_expression,
self.value)
def _calc_value(self):
"""Calculates the value of the expression"""
keys = self.expression.atoms(Symbol)
subsdict = {}
for key in keys:
str_key = str(key)
subsdict[str_key] = getattr(self.mod, str_key)
self._value = get_value(self._str_expression, subsdict)
def __repr__(self):
return self.expression.__repr__()
def __add__(self, other):
if cctype(other):
return self.expression.__add__(other.expression)
else:
return self.expression.__add__(other)
def __mul__(self, other):
if cctype(other):
return self.expression.__mul__(other.expression)
else:
return self.expression.__mul__(other)
def __div__(self, other):
if cctype(other):
return self.expression.__div__(other.expression)
else:
return self.expression.__div__(other)
def __pow__(self, other):
if cctype(other):
return self.expression.__pow__(other.expression)
else:
return self.expression.__pow__(other)
class CCoef(CCBase):
"""The object the stores control coefficients. Inherits from CCBase"""
def __init__(self, mod, name, expression, denominator, ltxe):
super(CCoef, self).__init__(mod, name, expression, ltxe)
self.numerator = expression
self.denominator = denominator.expression
self.expression = self.numerator / denominator.expression
self.denominator_object = denominator
self._latex_numerator = None
self._latex_expression_full = None
self._latex_expression = None
self._latex_name = None
self._abs_value = None
self.control_patterns = None
self._set_control_patterns()
@property
def abs_value(self):
self._calc_abs_value()
return self._abs_value
@property
def latex_numerator(self):
if not self._latex_numerator:
self._latex_numerator = self._ltxe.expression_to_latex(
self.numerator
)
return self._latex_numerator
@property
def latex_expression_full(self):
if not self._latex_expression_full:
full_expr = '\\frac{' + self.latex_numerator + '}{' \
+ self.denominator_object.latex_expression + '}'
self._latex_expression_full = full_expr
return self._latex_expression_full
@property
def latex_expression(self):
if not self._latex_expression:
self._latex_expression = '(' + \
self.latex_numerator + ')' + '/~\\Sigma'
return self._latex_expression
@property
def latex_name(self):
if not self._latex_name:
self._latex_name = self._ltxe.expression_to_latex(
self.name
)
return self._latex_name
def _perscan_legacy(self, parameter, scan_range):
scan_res = [list()
for i in range(len(list(self.control_patterns.values())) + 1)]
scan_res[0] = scan_range
for parvalue in scan_range:
state_valid = do_safe_state(
self.mod, parameter, parvalue, type='mca')
cc_abs_value = 0
for i, cp in enumerate(self.control_patterns.values()):
if state_valid:
cp_abs = abs(cp.value)
scan_res[i + 1].append(cp_abs)
cc_abs_value += cp_abs
else:
scan_res[i + 1].append(NaN)
for i, cp in enumerate(self.control_patterns.values()):
if state_valid:
scan_res[i + 1][-1] = (scan_res[i + 1]
[-1] / cc_abs_value) * 100
return scan_res
def _valscan_legacy(self, parameter, scan_range):
control_pattern_range = list(range(len(list(self.control_patterns.values())) + 2))
scan_res = [list() for i in control_pattern_range]
scan_res[0] = scan_range
for parvalue in scan_range:
state_valid = do_safe_state(self.mod,
parameter,
parvalue,
type='mca')
cc_value = 0
for i, cp in enumerate(self.control_patterns.values()):
if state_valid:
cp_value = cp.value
scan_res[i + 1].append(cp_value)
cc_value += cp_value
else:
scan_res[i + 1].append(NaN)
if state_valid:
scan_res[i + 2].append(cc_value)
else:
scan_res[i + 2].append(NaN)
return scan_res
def _perscan(self,
parameter,
scan_range,
par_scan=False,
par_engine='multiproc'):
val_scan_res = self._valscan(parameter,
scan_range,
par_scan,
par_engine)
points = len(scan_range)
parameter = val_scan_res[:, 0].reshape(points, 1)
cp_abs_vals = np.abs(val_scan_res[:, 1:-1])
cp_abs_sum = np.sum(cp_abs_vals, 1).reshape(points, 1)
cp_abs_perc = (cp_abs_vals / cp_abs_sum) * 100
scan_res = np.hstack([parameter, cp_abs_perc])
return scan_res
def _valscan(self,
parameter,
scan_range,
par_scan=False,
par_engine='multiproc'):
needed_symbols = [parameter] + \
stringify(list(self.expression.atoms(Symbol)))
# This is experimental
if par_scan:
scanner = ParScanner(self.mod, par_engine)
else:
scanner = Scanner(self.mod)
scanner.quietRun = True
start, end, points, log = scanner_range_setup(scan_range)
scanner.addScanParameter(parameter,
start=start,
end=end,
points=points,
log=log)
scanner.addUserOutput(*needed_symbols)
scanner.Run()
subs_dict = {}
for i, symbol in enumerate(scanner.UserOutputList):
subs_dict[symbol] = scanner.UserOutputResults[:, i]
control_pattern_names = list(self.control_patterns.keys())
denom_expr = str(self.denominator)
cp_numerators = [self.control_patterns[cp_name].numerator for
cp_name in control_pattern_names]
column_exprs = stringify(cp_numerators)
parameter = subs_dict[parameter].reshape(points, 1)
scan_res = []
denom_val = get_value(denom_expr, subs_dict)
for expr in column_exprs:
scan_res.append(get_value(expr, subs_dict) / denom_val)
scan_res = np.array(scan_res).transpose()
cc_vals = np.sum(scan_res, 1).reshape(points, 1)
scan_res = np.hstack([parameter, scan_res, cc_vals])
return scan_res
def do_par_scan(self,
parameter,
scan_range,
scan_type='percentage',
init_return=True,
par_scan=False,
par_engine='multiproc',
force_legacy=False):
assert scan_type in ['percentage', 'value']
init = getattr(self.mod, parameter)
column_names = [parameter] + \
[cp.name for cp in list(self.control_patterns.values())]
if scan_type == 'percentage':
y_label = 'Control pattern percentage contribution'
try:
assert not force_legacy, 'Legacy scan requested'
scan_res = self._perscan(parameter,
scan_range,
par_scan,
par_engine)
data_array = scan_res
except Exception as exception:
print('The parameter scan yielded the following error:')
print(exception)
print('Switching over to slower scan method and replacing')
print('invalid steady states with NaN values.')
scan_res = self._perscan_legacy(parameter, scan_range)
data_array = array(scan_res, dtype=np.float).transpose()
ylim = [nanmin(data_array[:, 1:]), nanmax(data_array[:, 1:]) * 1.1]
elif scan_type == 'value':
column_names = column_names + [self.name]
y_label = 'Control coefficient/pattern value'
try:
assert not force_legacy, 'Legacy scan requested'
scan_res = self._valscan(parameter,
scan_range,
par_scan,
par_engine)
data_array = scan_res
except Exception as exception:
print('The parameter scan yielded the following error:')
print(exception)
print('Switching over to slower scan method and replacing')
print('invalid steady states with NaN values.')
scan_res = self._valscan_legacy(parameter, scan_range)
data_array = array(scan_res, dtype=np.float).transpose()
ylim = [nanmin(data_array[:, 1:]), nanmax(data_array[:, 1:]) * 1.1]
# print data_array.shape
if init_return:
self.mod.SetQuiet()
setattr(self.mod, parameter, init)
self.mod.doMca()
self.mod.SetLoud()
mm = ModelMap(self.mod)
species = mm.hasSpecies()
if parameter in species:
x_label = '[%s]' % parameter.replace('_', ' ')
else:
x_label = parameter
ax_properties = {'ylabel': y_label,
'xlabel': x_label,
'xscale': 'linear',
'yscale': 'linear',
'xlim': [find_min(scan_range), find_max(scan_range)],
'ylim': ylim}
data = Data2D(mod=self.mod,
column_names=column_names,
data_array=data_array,
ltxe=self._ltxe,
analysis_method='symca',
ax_properties=ax_properties,
file_name=self.name)
return data
def _calc_abs_value(self):
"""Calculates the absolute numeric value of the control coefficient from the
values of its control patterns."""
keys = self.expression.atoms(Symbol)
subsdict = {}
if len(keys) == 0:
subsdict = None
for key in keys:
str_key = str(key)
subsdict[str_key] = getattr(self.mod, str_key)
for pattern in list(self.control_patterns.values()):
pattern._calc_value(subsdict)
self._abs_value = sum(
[abs(pattern._value) for pattern in list(self.control_patterns.values())])
def _calc_value(self):
"""Calculates the numeric value of the control coefficient from the
values of its control patterns."""
keys = self.expression.atoms(Symbol)
subsdict = {}
if len(keys) == 0:
subsdict = None
for key in keys:
str_key = str(key)
subsdict[str_key] = getattr(self.mod, str_key)
for pattern in list(self.control_patterns.values()):
pattern._calc_value(subsdict)
self._value = sum(
[pattern._value for pattern in list(self.control_patterns.values())])
def _set_control_patterns(self):
"""Divides control coefficient into control patterns and saves
results in self.CPx where x is a number is the number of the
control pattern as it appears in in control coefficient
expression"""
patterns = self.numerator.as_coeff_add()[1]
if len(patterns) == 0:
patterns = [self.numerator.as_coeff_add()[0]]
cps = DotDict()
cps._make_repr('v.name', 'v.value', formatter_factory())
for i, pattern in enumerate(patterns):
name = 'CP{:3}'.format(i + 1).replace(' ', '0')
cp = CPattern(self.mod,
name,
pattern,
self.denominator_object,
self,
self._ltxe)
setattr(self, name, cp)
cps[name] = cp
self.control_patterns = cps
# assert self._check_control_patterns == True
def _check_control_patterns(self):
"""Checks that all control patterns are either positive or negative"""
all_same = False
poscomp = [i.value > 0 for i in list(self.control_patterns.values())]
negcomp = [i.value < 0 for i in list(self.control_patterns.values())]
if all(poscomp):
all_same = True
elif all(negcomp):
all_same = True
return all_same
def highlight_patterns(self, width=None, height=None,
show_dummy_sinks=False,
show_external_modifier_links=False,
pos_dic=None):
mg = ModelGraph(mod=self.mod, pos_dic=pos_dic,
analysis_method=self._analysis_method)
if height:
mg.height = height
if width:
mg.width = width
mg.highlight_cc(self, show_dummy_sinks, show_external_modifier_links)
class CPattern(CCBase):
"""docstring for CPattern"""
def __init__(self,
mod,
name,
expression,
denominator,
parent,
ltxe):
super(CPattern, self).__init__(mod,
name,
expression,
ltxe)
self.numerator = expression
self.denominator = denominator.expression
self.expression = self.numerator / denominator.expression
self.denominator_object = denominator
self.parent = parent
self._latex_numerator = None
self._latex_expression_full = None
self._latex_expression = None
self._latex_name = None
self._percentage = None
def _calc_value(self, subsdict=None):
"""Calculates the value of the expression"""
if subsdict is None:
keys = self.expression.atoms(Symbol)
subsdict = {}
for key in keys:
str_key = str(key)
subsdict[str_key] = getattr(self.mod, str_key)
self._value = get_value(self._str_expression, subsdict)
@property
def latex_numerator(self):
if not self._latex_numerator:
self._latex_numerator = self._ltxe.expression_to_latex(
self.numerator
)
return self._latex_numerator
@property
def latex_expression_full(self):
if not self._latex_expression_full:
full_expr = '\\frac{' + self.latex_numerator + '}{' \
+ self.denominator_object.latex_expression + '}'
self._latex_expression_full = full_expr
return self._latex_expression_full
@property
def latex_expression(self):
if not self._latex_expression:
self._latex_expression = self.latex_numerator + '/~\\Sigma'
return self._latex_expression
@property
def latex_name(self):
if not self._latex_name:
self._latex_name = self.name
return self._latex_name
@property
def percentage(self):
self._percentage = (abs(self.value) / self.parent.abs_value) * 100
return self._percentage
| bsd-3-clause |
alexsmx/djangoAppengineSrcTemplate | django/db/backends/postgresql_psycopg2/base.py | 239 | 8346 | """
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import sys
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.postgresql.operations import DatabaseOperations as PostgresqlDatabaseOperations
from django.db.backends.postgresql.client import DatabaseClient
from django.db.backends.postgresql.creation import DatabaseCreation
from django.db.backends.postgresql.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.utils.safestring import SafeUnicode, SafeString
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeUnicode, psycopg2.extensions.QuotedString)
class CursorWrapper(object):
"""
A thin wrapper around psycopg2's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
"""
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = False
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
can_defer_constraint_checks = True
class DatabaseOperations(PostgresqlDatabaseOperations):
def last_executed_query(self, cursor, sql, params):
# With psycopg2, cursor objects have a "query" attribute that is the
# exact query sent to the database. See docs here:
# http://www.initd.org/tracker/psycopg/wiki/psycopg2_documentation#postgresql-status-message-and-executed-query
return cursor.query
def return_insert_id(self):
return "RETURNING %s", ()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
autocommit = self.settings_dict["OPTIONS"].get('autocommit', False)
self.features.uses_autocommit = autocommit
self._set_isolation_level(int(not autocommit))
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
new_connection = False
set_tz = False
settings_dict = self.settings_dict
if self.connection is None:
new_connection = True
set_tz = settings_dict.get('TIME_ZONE')
if settings_dict['NAME'] == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You need to specify NAME in your Django settings file.")
conn_params = {
'database': settings_dict['NAME'],
}
conn_params.update(settings_dict['OPTIONS'])
if 'autocommit' in conn_params:
del conn_params['autocommit']
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = settings_dict['PASSWORD']
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
self.connection = Database.connect(**conn_params)
self.connection.set_client_encoding('UTF8')
self.connection.set_isolation_level(self.isolation_level)
connection_created.send(sender=self.__class__, connection=self)
cursor = self.connection.cursor()
cursor.tzinfo_factory = None
if new_connection:
if set_tz:
cursor.execute("SET TIME ZONE %s", [settings_dict['TIME_ZONE']])
if not hasattr(self, '_version'):
self.__class__._version = get_version(cursor)
if self._version[0:2] < (8, 0):
# No savepoint support for earlier version of PostgreSQL.
self.features.uses_savepoints = False
if self.features.uses_autocommit:
if self._version[0:2] < (8, 2):
# FIXME: Needs extra code to do reliable model insert
# handling, so we forbid it for now.
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You cannot use autocommit=True with PostgreSQL prior to 8.2 at the moment.")
else:
# FIXME: Eventually we're enable this by default for
# versions that support it, but, right now, that's hard to
# do without breaking other things (#10509).
self.features.can_return_id_from_insert = True
return CursorWrapper(cursor)
def _enter_transaction_management(self, managed):
"""
Switch the isolation level when needing transaction support, so that
the same transaction is visible across all the queries.
"""
if self.features.uses_autocommit and managed and not self.isolation_level:
self._set_isolation_level(1)
def _leave_transaction_management(self, managed):
"""
If the normal operating mode is "autocommit", switch back to that when
leaving transaction management.
"""
if self.features.uses_autocommit and not managed and self.isolation_level:
self._set_isolation_level(0)
def _set_isolation_level(self, level):
"""
Do all the related feature configurations for changing isolation
levels. This doesn't touch the uses_autocommit feature, since that
controls the movement *between* isolation levels.
"""
assert level in (0, 1)
try:
if self.connection is not None:
self.connection.set_isolation_level(level)
finally:
self.isolation_level = level
self.features.uses_savepoints = bool(level)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
| bsd-3-clause |
xiandiancloud/edxplaltfom-xusong | common/lib/extract_tar.py | 40 | 2149 | """
Safe version of tarfile.extractall which does not extract any files that would
be, or symlink to a file that is, outside of the directory extracted in.
Adapted from:
http://stackoverflow.com/questions/10060069/safely-extract-zip-or-tar-using-python
"""
from os.path import abspath, realpath, dirname, join as joinpath
from django.core.exceptions import SuspiciousOperation
import logging
log = logging.getLogger(__name__) # pylint: disable=C0103
def resolved(rpath):
"""
Returns the canonical absolute path of `rpath`.
"""
return realpath(abspath(rpath))
def _is_bad_path(path, base):
"""
Is (the canonical absolute path of) `path` outside `base`?
"""
return not resolved(joinpath(base, path)).startswith(base)
def _is_bad_link(info, base):
"""
Does the file sym- ord hard-link to files outside `base`?
"""
# Links are interpreted relative to the directory containing the link
tip = resolved(joinpath(base, dirname(info.name)))
return _is_bad_path(info.linkname, base=tip)
def safemembers(members):
"""
Check that all elements of a tar file are safe.
"""
base = resolved(".")
for finfo in members:
if _is_bad_path(finfo.name, base):
log.debug("File %r is blocked (illegal path)", finfo.name)
raise SuspiciousOperation("Illegal path")
elif finfo.issym() and _is_bad_link(finfo, base):
log.debug( "File %r is blocked: Hard link to %r", finfo.name, finfo.linkname)
raise SuspiciousOperation("Hard link")
elif finfo.islnk() and _is_bad_link(finfo, base):
log.debug("File %r is blocked: Symlink to %r", finfo.name,
finfo.linkname)
raise SuspiciousOperation("Symlink")
elif finfo.isdev():
log.debug("File %r is blocked: FIFO, device or character file",
finfo.name)
raise SuspiciousOperation("Dev file")
return members
def safetar_extractall(tarf, *args, **kwargs):
"""
Safe version of `tarf.extractall()`.
"""
return tarf.extractall(members=safemembers(tarf), *args, **kwargs)
| agpl-3.0 |
voussoir/praw | tests/unit/models/reddit/test_subreddit.py | 2 | 3857 | import pickle
import pytest
from praw.models import Subreddit, WikiPage
from ... import UnitTest
class TestSubreddit(UnitTest):
def test_equality(self):
subreddit1 = Subreddit(self.reddit,
_data={'display_name': 'dummy1', 'n': 1})
subreddit2 = Subreddit(self.reddit,
_data={'display_name': 'Dummy1', 'n': 2})
subreddit3 = Subreddit(self.reddit,
_data={'display_name': 'dummy3', 'n': 2})
assert subreddit1 == subreddit1
assert subreddit2 == subreddit2
assert subreddit3 == subreddit3
assert subreddit1 == subreddit2
assert subreddit2 != subreddit3
assert subreddit1 != subreddit3
assert 'dummy1' == subreddit1
assert subreddit2 == 'dummy1'
def test_construct_failure(self):
message = 'Either `display_name` or `_data` must be provided.'
with pytest.raises(TypeError) as excinfo:
Subreddit(self.reddit)
assert str(excinfo.value) == message
with pytest.raises(TypeError) as excinfo:
Subreddit(self.reddit, 'dummy', {'id': 'dummy'})
assert str(excinfo.value) == message
def test_fullname(self):
subreddit = Subreddit(self.reddit, _data={'display_name': 'name',
'id': 'dummy'})
assert subreddit.fullname == 't5_dummy'
def test_hash(self):
subreddit1 = Subreddit(self.reddit,
_data={'display_name': 'dummy1', 'n': 1})
subreddit2 = Subreddit(self.reddit,
_data={'display_name': 'Dummy1', 'n': 2})
subreddit3 = Subreddit(self.reddit,
_data={'display_name': 'dummy3', 'n': 2})
assert hash(subreddit1) == hash(subreddit1)
assert hash(subreddit2) == hash(subreddit2)
assert hash(subreddit3) == hash(subreddit3)
assert hash(subreddit1) == hash(subreddit2)
assert hash(subreddit2) != hash(subreddit3)
assert hash(subreddit1) != hash(subreddit3)
def test_pickle(self):
subreddit = Subreddit(self.reddit, _data={'display_name': 'name',
'id': 'dummy'})
for level in range(pickle.HIGHEST_PROTOCOL + 1):
other = pickle.loads(pickle.dumps(subreddit, protocol=level))
assert subreddit == other
def test_repr(self):
subreddit = Subreddit(self.reddit, display_name='name')
assert repr(subreddit) == 'Subreddit(display_name=\'name\')'
def test_search__params_not_modified(self):
params = {'dummy': 'value'}
subreddit = Subreddit(self.reddit, display_name='name')
generator = subreddit.search(None, params=params)
assert generator.params['dummy'] == 'value'
assert params == {'dummy': 'value'}
def test_str(self):
subreddit = Subreddit(self.reddit, _data={'display_name': 'name',
'id': 'dummy'})
assert str(subreddit) == 'name'
def test_submit_failure(self):
message = 'Either `selftext` or `url` must be provided.'
subreddit = Subreddit(self.reddit, display_name='name')
with pytest.raises(TypeError) as excinfo:
subreddit.submit('Cool title')
assert str(excinfo.value) == message
with pytest.raises(TypeError) as excinfo:
subreddit.submit('Cool title', selftext='a', url='b')
assert str(excinfo.value) == message
class TestSubredditWiki(UnitTest):
def test__getitem(self):
subreddit = Subreddit(self.reddit, display_name='name')
wikipage = subreddit.wiki['Foo']
assert isinstance(wikipage, WikiPage)
assert 'foo' == wikipage.name
| gpl-3.0 |
CXQERP/ODOOERP | openerp/report/render/rml2txt/__init__.py | 381 | 1351 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from rml2txt import parseString, parseNode
""" This engine is the minimalistic renderer of RML documents into text files,
using spaces and newlines to format.
It was needed in some special applications, where legal reports need to be
printed in special (dot-matrix) printers.
"""
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gmt/portage | pym/_emerge/BinpkgPrefetcher.py | 8 | 1240 | # Copyright 1999-2009 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.BinpkgFetcher import BinpkgFetcher
from _emerge.CompositeTask import CompositeTask
from _emerge.BinpkgVerifier import BinpkgVerifier
from portage import os
class BinpkgPrefetcher(CompositeTask):
__slots__ = ("pkg",) + \
("pkg_path", "_bintree",)
def _start(self):
self._bintree = self.pkg.root_config.trees["bintree"]
fetcher = BinpkgFetcher(background=self.background,
logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
scheduler=self.scheduler)
self.pkg_path = fetcher.pkg_path
self._start_task(fetcher, self._fetcher_exit)
def _fetcher_exit(self, fetcher):
if self._default_exit(fetcher) != os.EX_OK:
self.wait()
return
verifier = BinpkgVerifier(background=self.background,
logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
scheduler=self.scheduler, _pkg_path=self.pkg_path)
self._start_task(verifier, self._verifier_exit)
def _verifier_exit(self, verifier):
if self._default_exit(verifier) != os.EX_OK:
self.wait()
return
self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
self._current_task = None
self.returncode = os.EX_OK
self.wait()
| gpl-2.0 |
seann1/portfolio5 | .meteor/dev_bundle/python/Lib/lib-tk/test/test_ttk/test_widgets.py | 13 | 57791 | import unittest
import Tkinter as tkinter
from Tkinter import TclError
import ttk
from test.test_support import requires, run_unittest
import sys
from test_functions import MockTclObj
from support import (AbstractTkTest, tcl_version, get_tk_patchlevel,
simulate_mouse_click)
from widget_tests import (add_standard_options, noconv, noconv_meth,
AbstractWidgetTest, StandardOptionsTests,
IntegerSizeTests, PixelSizeTests,
setUpModule)
requires('gui')
class StandardTtkOptionsTests(StandardOptionsTests):
def test_class(self):
widget = self.create()
self.assertEqual(widget['class'], '')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0, 'beta', 3):
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'class', 'Foo', errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
def test_padding(self):
widget = self.create()
self.checkParam(widget, 'padding', 0, expected=('0',))
self.checkParam(widget, 'padding', 5, expected=('5',))
self.checkParam(widget, 'padding', (5, 6), expected=('5', '6'))
self.checkParam(widget, 'padding', (5, 6, 7),
expected=('5', '6', '7'))
self.checkParam(widget, 'padding', (5, 6, 7, 8),
expected=('5', '6', '7', '8'))
self.checkParam(widget, 'padding', ('5p', '6p', '7p', '8p'))
self.checkParam(widget, 'padding', (), expected='')
def test_style(self):
widget = self.create()
self.assertEqual(widget['style'], '')
errmsg = 'Layout Foo not found'
if hasattr(self, 'default_orient'):
errmsg = ('Layout %s.Foo not found' %
getattr(self, 'default_orient').title())
self.checkInvalidParam(widget, 'style', 'Foo',
errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
# XXX
pass
class WidgetTest(AbstractTkTest, unittest.TestCase):
"""Tests methods available in every ttk widget."""
def setUp(self):
super(WidgetTest, self).setUp()
self.widget = ttk.Button(self.root, width=0, text="Text")
self.widget.pack()
self.widget.wait_visibility()
def test_identify(self):
self.widget.update_idletasks()
self.assertEqual(self.widget.identify(
self.widget.winfo_width() // 2,
self.widget.winfo_height() // 2
), "label")
self.assertEqual(self.widget.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.widget.identify, None, 5)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, None)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, '')
def test_widget_state(self):
# XXX not sure about the portability of all these tests
self.assertEqual(self.widget.state(), ())
self.assertEqual(self.widget.instate(['!disabled']), True)
# changing from !disabled to disabled
self.assertEqual(self.widget.state(['disabled']), ('!disabled', ))
# no state change
self.assertEqual(self.widget.state(['disabled']), ())
# change back to !disable but also active
self.assertEqual(self.widget.state(['!disabled', 'active']),
('!active', 'disabled'))
# no state changes, again
self.assertEqual(self.widget.state(['!disabled', 'active']), ())
self.assertEqual(self.widget.state(['active', '!disabled']), ())
def test_cb(arg1, **kw):
return arg1, kw
self.assertEqual(self.widget.instate(['!disabled'],
test_cb, "hi", **{"msg": "there"}),
('hi', {'msg': 'there'}))
# attempt to set invalid statespec
currstate = self.widget.state()
self.assertRaises(tkinter.TclError, self.widget.instate,
['badstate'])
self.assertRaises(tkinter.TclError, self.widget.instate,
['disabled', 'badstate'])
# verify that widget didn't change its state
self.assertEqual(currstate, self.widget.state())
# ensuring that passing None as state doesn't modify current state
self.widget.state(['active', '!disabled'])
self.assertEqual(self.widget.state(), ('active', ))
class AbstractToplevelTest(AbstractWidgetTest, PixelSizeTests):
_conv_pixels = noconv_meth
@add_standard_options(StandardTtkOptionsTests)
class FrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'padding', 'relief', 'style', 'takefocus',
'width',
)
def create(self, **kwargs):
return ttk.Frame(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class LabelFrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'labelanchor', 'labelwidget',
'padding', 'relief', 'style', 'takefocus',
'text', 'underline', 'width',
)
def create(self, **kwargs):
return ttk.LabelFrame(self.root, **kwargs)
def test_labelanchor(self):
widget = self.create()
self.checkEnumParam(widget, 'labelanchor',
'e', 'en', 'es', 'n', 'ne', 'nw', 's', 'se', 'sw', 'w', 'wn', 'ws',
errmsg='Bad label anchor specification {}')
self.checkInvalidParam(widget, 'labelanchor', 'center')
def test_labelwidget(self):
widget = self.create()
label = ttk.Label(self.root, text='Mupp', name='foo')
self.checkParam(widget, 'labelwidget', label, expected='.foo')
label.destroy()
class AbstractLabelTest(AbstractWidgetTest):
def checkImageParam(self, widget, name):
image = tkinter.PhotoImage(master=self.root, name='image1')
image2 = tkinter.PhotoImage(master=self.root, name='image2')
self.checkParam(widget, name, image, expected=('image1',))
self.checkParam(widget, name, 'image1', expected=('image1',))
self.checkParam(widget, name, (image,), expected=('image1',))
self.checkParam(widget, name, (image, 'active', image2),
expected=('image1', 'active', 'image2'))
self.checkParam(widget, name, 'image1 active image2',
expected=('image1', 'active', 'image2'))
self.checkInvalidParam(widget, name, 'spam',
errmsg='image "spam" doesn\'t exist')
def test_compound(self):
widget = self.create()
self.checkEnumParam(widget, 'compound',
'none', 'text', 'image', 'center',
'top', 'bottom', 'left', 'right')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def test_width(self):
widget = self.create()
self.checkParams(widget, 'width', 402, -402, 0)
@add_standard_options(StandardTtkOptionsTests)
class LabelTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'anchor', 'background',
'class', 'compound', 'cursor', 'font', 'foreground',
'image', 'justify', 'padding', 'relief', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength',
)
_conv_pixels = noconv_meth
def create(self, **kwargs):
return ttk.Label(self.root, **kwargs)
def test_font(self):
widget = self.create()
self.checkParam(widget, 'font',
'-Adobe-Helvetica-Medium-R-Normal--*-120-*-*-*-*-*-*')
@add_standard_options(StandardTtkOptionsTests)
class ButtonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor', 'default',
'image', 'state', 'style', 'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Button(self.root, **kwargs)
def test_default(self):
widget = self.create()
self.checkEnumParam(widget, 'default', 'normal', 'active', 'disabled')
def test_invoke(self):
success = []
btn = ttk.Button(self.root, command=lambda: success.append(1))
btn.invoke()
self.assertTrue(success)
@add_standard_options(StandardTtkOptionsTests)
class CheckbuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'offvalue', 'onvalue',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Checkbutton(self.root, **kwargs)
def test_offvalue(self):
widget = self.create()
self.checkParams(widget, 'offvalue', 1, 2.3, '', 'any string')
def test_onvalue(self):
widget = self.create()
self.checkParams(widget, 'onvalue', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
cbtn = ttk.Checkbutton(self.root, command=cb_test)
# the variable automatically created by ttk.Checkbutton is actually
# undefined till we invoke the Checkbutton
self.assertEqual(cbtn.state(), ('alternate', ))
self.assertRaises(tkinter.TclError, cbtn.tk.globalgetvar,
cbtn['variable'])
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['onvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn['command'] = ''
res = cbtn.invoke()
self.assertFalse(str(res))
self.assertLessEqual(len(success), 1)
self.assertEqual(cbtn['offvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class ComboboxTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'exportselection', 'height',
'justify', 'postcommand', 'state', 'style',
'takefocus', 'textvariable', 'values', 'width',
)
def setUp(self):
super(ComboboxTest, self).setUp()
self.combo = self.create()
def create(self, **kwargs):
return ttk.Combobox(self.root, **kwargs)
def test_height(self):
widget = self.create()
self.checkParams(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def _show_drop_down_listbox(self):
width = self.combo.winfo_width()
self.combo.event_generate('<ButtonPress-1>', x=width - 5, y=5)
self.combo.event_generate('<ButtonRelease-1>', x=width - 5, y=5)
self.combo.update_idletasks()
def test_virtual_event(self):
success = []
self.combo['values'] = [1]
self.combo.bind('<<ComboboxSelected>>',
lambda evt: success.append(True))
self.combo.pack()
self.combo.wait_visibility()
height = self.combo.winfo_height()
self._show_drop_down_listbox()
self.combo.update()
self.combo.event_generate('<Return>')
self.combo.update()
self.assertTrue(success)
def test_postcommand(self):
success = []
self.combo['postcommand'] = lambda: success.append(True)
self.combo.pack()
self.combo.wait_visibility()
self._show_drop_down_listbox()
self.assertTrue(success)
# testing postcommand removal
self.combo['postcommand'] = ''
self._show_drop_down_listbox()
self.assertEqual(len(success), 1)
def test_values(self):
def check_get_current(getval, currval):
self.assertEqual(self.combo.get(), getval)
self.assertEqual(self.combo.current(), currval)
self.assertEqual(self.combo['values'],
() if tcl_version < (8, 5) else '')
check_get_current('', -1)
self.checkParam(self.combo, 'values', 'mon tue wed thur',
expected=('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', ('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', (42, 3.14, '', 'any string'))
self.checkParam(self.combo, 'values', () if tcl_version < (8, 5) else '')
self.combo['values'] = ['a', 1, 'c']
self.combo.set('c')
check_get_current('c', 2)
self.combo.current(0)
check_get_current('a', 0)
self.combo.set('d')
check_get_current('d', -1)
# testing values with empty string
self.combo.set('')
self.combo['values'] = (1, 2, '', 3)
check_get_current('', 2)
# testing values with empty string set through configure
self.combo.configure(values=[1, '', 2])
self.assertEqual(self.combo['values'],
('1', '', '2') if self.wantobjects else
'1 {} 2')
# testing values with spaces
self.combo['values'] = ['a b', 'a\tb', 'a\nb']
self.assertEqual(self.combo['values'],
('a b', 'a\tb', 'a\nb') if self.wantobjects else
'{a b} {a\tb} {a\nb}')
# testing values with special characters
self.combo['values'] = [r'a\tb', '"a"', '} {']
self.assertEqual(self.combo['values'],
(r'a\tb', '"a"', '} {') if self.wantobjects else
r'a\\tb {"a"} \}\ \{')
# out of range
self.assertRaises(tkinter.TclError, self.combo.current,
len(self.combo['values']))
# it expects an integer (or something that can be converted to int)
self.assertRaises(tkinter.TclError, self.combo.current, '')
# testing creating combobox with empty string in values
combo2 = ttk.Combobox(self.root, values=[1, 2, ''])
self.assertEqual(combo2['values'],
('1', '2', '') if self.wantobjects else '1 2 {}')
combo2.destroy()
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class EntryTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'class', 'cursor',
'exportselection', 'font',
'invalidcommand', 'justify',
'show', 'state', 'style', 'takefocus', 'textvariable',
'validate', 'validatecommand', 'width', 'xscrollcommand',
)
def setUp(self):
super(EntryTest, self).setUp()
self.entry = self.create()
def create(self, **kwargs):
return ttk.Entry(self.root, **kwargs)
def test_invalidcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'invalidcommand')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', '*')
self.checkParam(widget, 'show', '')
self.checkParam(widget, 'show', ' ')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state',
'disabled', 'normal', 'readonly')
def test_validate(self):
widget = self.create()
self.checkEnumParam(widget, 'validate',
'all', 'key', 'focus', 'focusin', 'focusout', 'none')
def test_validatecommand(self):
widget = self.create()
self.checkCommandParam(widget, 'validatecommand')
def test_bbox(self):
self.assertIsBoundingBox(self.entry.bbox(0))
self.assertRaises(tkinter.TclError, self.entry.bbox, 'noindex')
self.assertRaises(tkinter.TclError, self.entry.bbox, None)
def test_identify(self):
self.entry.pack()
self.entry.wait_visibility()
self.entry.update_idletasks()
self.assertEqual(self.entry.identify(5, 5), "textarea")
self.assertEqual(self.entry.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.entry.identify, None, 5)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, None)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, '')
def test_validation_options(self):
success = []
test_invalid = lambda: success.append(True)
self.entry['validate'] = 'none'
self.entry['validatecommand'] = lambda: False
self.entry['invalidcommand'] = test_invalid
self.entry.validate()
self.assertTrue(success)
self.entry['invalidcommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['invalidcommand'] = test_invalid
self.entry['validatecommand'] = lambda: True
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = True
self.assertRaises(tkinter.TclError, self.entry.validate)
def test_validation(self):
validation = []
def validate(to_insert):
if not 'a' <= to_insert.lower() <= 'z':
validation.append(False)
return False
validation.append(True)
return True
self.entry['validate'] = 'key'
self.entry['validatecommand'] = self.entry.register(validate), '%S'
self.entry.insert('end', 1)
self.entry.insert('end', 'a')
self.assertEqual(validation, [False, True])
self.assertEqual(self.entry.get(), 'a')
def test_revalidation(self):
def validate(content):
for letter in content:
if not 'a' <= letter.lower() <= 'z':
return False
return True
self.entry['validatecommand'] = self.entry.register(validate), '%P'
self.entry.insert('end', 'avocado')
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
self.entry.delete(0, 'end')
self.assertEqual(self.entry.get(), '')
self.entry.insert('end', 'a1b')
self.assertEqual(self.entry.validate(), False)
self.assertEqual(self.entry.state(), ('invalid', ))
self.entry.delete(1)
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height',
'orient', 'style', 'takefocus', 'width',
)
def setUp(self):
super(PanedWindowTest, self).setUp()
self.paned = self.create()
def create(self, **kwargs):
return ttk.PanedWindow(self.root, **kwargs)
def test_orient(self):
widget = self.create()
self.assertEqual(str(widget['orient']), 'vertical')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0, 'beta', 3):
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'orient', 'horizontal',
errmsg=errmsg)
widget2 = self.create(orient='horizontal')
self.assertEqual(str(widget2['orient']), 'horizontal')
def test_add(self):
# attempt to add a child that is not a direct child of the paned window
label = ttk.Label(self.paned)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
label.destroy()
child.destroy()
# another attempt
label = ttk.Label(self.root)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
child.destroy()
label.destroy()
good_child = ttk.Label(self.root)
self.paned.add(good_child)
# re-adding a child is not accepted
self.assertRaises(tkinter.TclError, self.paned.add, good_child)
other_child = ttk.Label(self.paned)
self.paned.add(other_child)
self.assertEqual(self.paned.pane(0), self.paned.pane(1))
self.assertRaises(tkinter.TclError, self.paned.pane, 2)
good_child.destroy()
other_child.destroy()
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.paned.forget, None)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
self.paned.add(ttk.Label(self.root))
self.paned.forget(0)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
def test_insert(self):
self.assertRaises(tkinter.TclError, self.paned.insert, None, 0)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, None)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, 0)
child = ttk.Label(self.root)
child2 = ttk.Label(self.root)
child3 = ttk.Label(self.root)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, child)
self.paned.insert('end', child2)
self.paned.insert(0, child)
self.assertEqual(self.paned.panes(), (str(child), str(child2)))
self.paned.insert(0, child2)
self.assertEqual(self.paned.panes(), (str(child2), str(child)))
self.paned.insert('end', child3)
self.assertEqual(self.paned.panes(),
(str(child2), str(child), str(child3)))
# reinserting a child should move it to its current position
panes = self.paned.panes()
self.paned.insert('end', child3)
self.assertEqual(panes, self.paned.panes())
# moving child3 to child2 position should result in child2 ending up
# in previous child position and child ending up in previous child3
# position
self.paned.insert(child2, child3)
self.assertEqual(self.paned.panes(),
(str(child3), str(child2), str(child)))
def test_pane(self):
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
child = ttk.Label(self.root)
self.paned.add(child)
self.assertIsInstance(self.paned.pane(0), dict)
self.assertEqual(self.paned.pane(0, weight=None),
0 if self.wantobjects else '0')
# newer form for querying a single option
self.assertEqual(self.paned.pane(0, 'weight'),
0 if self.wantobjects else '0')
self.assertEqual(self.paned.pane(0), self.paned.pane(str(child)))
self.assertRaises(tkinter.TclError, self.paned.pane, 0,
badoption='somevalue')
def test_sashpos(self):
self.assertRaises(tkinter.TclError, self.paned.sashpos, None)
self.assertRaises(tkinter.TclError, self.paned.sashpos, '')
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child = ttk.Label(self.paned, text='a')
self.paned.add(child, weight=1)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child2 = ttk.Label(self.paned, text='b')
self.paned.add(child2)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 1)
self.paned.pack(expand=True, fill='both')
self.paned.wait_visibility()
curr_pos = self.paned.sashpos(0)
self.paned.sashpos(0, 1000)
self.assertNotEqual(curr_pos, self.paned.sashpos(0))
self.assertIsInstance(self.paned.sashpos(0), int)
@add_standard_options(StandardTtkOptionsTests)
class RadiobuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'value', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Radiobutton(self.root, **kwargs)
def test_value(self):
widget = self.create()
self.checkParams(widget, 'value', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
myvar = tkinter.IntVar(self.root)
cbtn = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=0)
cbtn2 = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=1)
if self.wantobjects:
conv = lambda x: x
else:
conv = int
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(conv(cbtn['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertTrue(success)
cbtn2['command'] = ''
res = cbtn2.invoke()
self.assertEqual(str(res), '')
self.assertLessEqual(len(success), 1)
self.assertEqual(conv(cbtn2['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertEqual(str(cbtn['variable']), str(cbtn2['variable']))
class MenubuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'compound', 'cursor', 'direction',
'image', 'menu', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Menubutton(self.root, **kwargs)
def test_direction(self):
widget = self.create()
self.checkEnumParam(widget, 'direction',
'above', 'below', 'left', 'right', 'flush')
def test_menu(self):
widget = self.create()
menu = tkinter.Menu(widget, name='menu')
self.checkParam(widget, 'menu', menu, conv=str)
menu.destroy()
@add_standard_options(StandardTtkOptionsTests)
class ScaleTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'from', 'length',
'orient', 'style', 'takefocus', 'to', 'value', 'variable',
)
_conv_pixels = noconv_meth
default_orient = 'horizontal'
def setUp(self):
super(ScaleTest, self).setUp()
self.scale = self.create()
self.scale.pack()
self.scale.update()
def create(self, **kwargs):
return ttk.Scale(self.root, **kwargs)
def test_from(self):
widget = self.create()
self.checkFloatParam(widget, 'from', 100, 14.9, 15.1, conv=False)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 130, 131.2, 135.6, '5i')
def test_to(self):
widget = self.create()
self.checkFloatParam(widget, 'to', 300, 14.9, 15.1, -10, conv=False)
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 300, 14.9, 15.1, -10, conv=False)
def test_custom_event(self):
failure = [1, 1, 1] # will need to be empty
funcid = self.scale.bind('<<RangeChanged>>', lambda evt: failure.pop())
self.scale['from'] = 10
self.scale['from_'] = 10
self.scale['to'] = 3
self.assertFalse(failure)
failure = [1, 1, 1]
self.scale.configure(from_=2, to=5)
self.scale.configure(from_=0, to=-2)
self.scale.configure(to=10)
self.assertFalse(failure)
def test_get(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
scale_width = self.scale.winfo_width()
self.assertEqual(self.scale.get(scale_width, 0), self.scale['to'])
self.assertEqual(conv(self.scale.get(0, 0)), conv(self.scale['from']))
self.assertEqual(self.scale.get(), self.scale['value'])
self.scale['value'] = 30
self.assertEqual(self.scale.get(), self.scale['value'])
self.assertRaises(tkinter.TclError, self.scale.get, '', 0)
self.assertRaises(tkinter.TclError, self.scale.get, 0, '')
def test_set(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
# set restricts the max/min values according to the current range
max = conv(self.scale['to'])
new_max = max + 10
self.scale.set(new_max)
self.assertEqual(conv(self.scale.get()), max)
min = conv(self.scale['from'])
self.scale.set(min - 1)
self.assertEqual(conv(self.scale.get()), min)
# changing directly the variable doesn't impose this limitation tho
var = tkinter.DoubleVar(self.root)
self.scale['variable'] = var
var.set(max + 5)
self.assertEqual(conv(self.scale.get()), var.get())
self.assertEqual(conv(self.scale.get()), max + 5)
del var
# the same happens with the value option
self.scale['value'] = max + 10
self.assertEqual(conv(self.scale.get()), max + 10)
self.assertEqual(conv(self.scale.get()), conv(self.scale['value']))
# nevertheless, note that the max/min values we can get specifying
# x, y coords are the ones according to the current range
self.assertEqual(conv(self.scale.get(0, 0)), min)
self.assertEqual(conv(self.scale.get(self.scale.winfo_width(), 0)), max)
self.assertRaises(tkinter.TclError, self.scale.set, None)
@add_standard_options(StandardTtkOptionsTests)
class ProgressbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'length',
'mode', 'maximum', 'phase',
'style', 'takefocus', 'value', 'variable',
)
_conv_pixels = noconv_meth
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Progressbar(self.root, **kwargs)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 100.1, 56.7, '2i')
def test_maximum(self):
widget = self.create()
self.checkFloatParam(widget, 'maximum', 150.2, 77.7, 0, -10, conv=False)
def test_mode(self):
widget = self.create()
self.checkEnumParam(widget, 'mode', 'determinate', 'indeterminate')
def test_phase(self):
# XXX
pass
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 150.2, 77.7, 0, -10,
conv=False)
@unittest.skipIf(sys.platform == 'darwin',
'ttk.Scrollbar is special on MacOSX')
@add_standard_options(StandardTtkOptionsTests)
class ScrollbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'orient', 'style', 'takefocus',
)
default_orient = 'vertical'
def create(self, **kwargs):
return ttk.Scrollbar(self.root, **kwargs)
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class NotebookTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height', 'padding', 'style', 'takefocus',
)
def setUp(self):
super(NotebookTest, self).setUp()
self.nb = self.create(padding=0)
self.child1 = ttk.Label(self.root)
self.child2 = ttk.Label(self.root)
self.nb.add(self.child1, text='a')
self.nb.add(self.child2, text='b')
def create(self, **kwargs):
return ttk.Notebook(self.root, **kwargs)
def test_tab_identifiers(self):
self.nb.forget(0)
self.nb.hide(self.child2)
self.assertRaises(tkinter.TclError, self.nb.tab, self.child1)
self.assertEqual(self.nb.index('end'), 1)
self.nb.add(self.child2)
self.assertEqual(self.nb.index('end'), 1)
self.nb.select(self.child2)
self.assertTrue(self.nb.tab('current'))
self.nb.add(self.child1, text='a')
self.nb.pack()
self.nb.wait_visibility()
if sys.platform == 'darwin':
tb_idx = "@20,5"
else:
tb_idx = "@5,5"
self.assertEqual(self.nb.tab(tb_idx), self.nb.tab('current'))
for i in range(5, 100, 5):
try:
if self.nb.tab('@%d, 5' % i, text=None) == 'a':
break
except tkinter.TclError:
pass
else:
self.fail("Tab with text 'a' not found")
def test_add_and_hidden(self):
self.assertRaises(tkinter.TclError, self.nb.hide, -1)
self.assertRaises(tkinter.TclError, self.nb.hide, 'hi')
self.assertRaises(tkinter.TclError, self.nb.hide, None)
self.assertRaises(tkinter.TclError, self.nb.add, None)
self.assertRaises(tkinter.TclError, self.nb.add, ttk.Label(self.root),
unknown='option')
tabs = self.nb.tabs()
self.nb.hide(self.child1)
self.nb.add(self.child1)
self.assertEqual(self.nb.tabs(), tabs)
child = ttk.Label(self.root)
self.nb.add(child, text='c')
tabs = self.nb.tabs()
curr = self.nb.index('current')
# verify that the tab gets readded at its previous position
child2_index = self.nb.index(self.child2)
self.nb.hide(self.child2)
self.nb.add(self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.assertEqual(self.nb.index(self.child2), child2_index)
self.assertEqual(str(self.child2), self.nb.tabs()[child2_index])
# but the tab next to it (not hidden) is the one selected now
self.assertEqual(self.nb.index('current'), curr + 1)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.nb.forget, -1)
self.assertRaises(tkinter.TclError, self.nb.forget, 'hi')
self.assertRaises(tkinter.TclError, self.nb.forget, None)
tabs = self.nb.tabs()
child1_index = self.nb.index(self.child1)
self.nb.forget(self.child1)
self.assertNotIn(str(self.child1), self.nb.tabs())
self.assertEqual(len(tabs) - 1, len(self.nb.tabs()))
self.nb.add(self.child1)
self.assertEqual(self.nb.index(self.child1), 1)
self.assertNotEqual(child1_index, self.nb.index(self.child1))
def test_index(self):
self.assertRaises(tkinter.TclError, self.nb.index, -1)
self.assertRaises(tkinter.TclError, self.nb.index, None)
self.assertIsInstance(self.nb.index('end'), int)
self.assertEqual(self.nb.index(self.child1), 0)
self.assertEqual(self.nb.index(self.child2), 1)
self.assertEqual(self.nb.index('end'), 2)
def test_insert(self):
# moving tabs
tabs = self.nb.tabs()
self.nb.insert(1, tabs[0])
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert(self.child1, self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert('end', self.child1)
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert('end', 0)
self.assertEqual(self.nb.tabs(), tabs)
# bad moves
self.assertRaises(tkinter.TclError, self.nb.insert, 2, tabs[0])
self.assertRaises(tkinter.TclError, self.nb.insert, -1, tabs[0])
# new tab
child3 = ttk.Label(self.root)
self.nb.insert(1, child3)
self.assertEqual(self.nb.tabs(), (tabs[0], str(child3), tabs[1]))
self.nb.forget(child3)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert(self.child1, child3)
self.assertEqual(self.nb.tabs(), (str(child3), ) + tabs)
self.nb.forget(child3)
self.assertRaises(tkinter.TclError, self.nb.insert, 2, child3)
self.assertRaises(tkinter.TclError, self.nb.insert, -1, child3)
# bad inserts
self.assertRaises(tkinter.TclError, self.nb.insert, 'end', None)
self.assertRaises(tkinter.TclError, self.nb.insert, None, 0)
self.assertRaises(tkinter.TclError, self.nb.insert, None, None)
def test_select(self):
self.nb.pack()
self.nb.wait_visibility()
success = []
tab_changed = []
self.child1.bind('<Unmap>', lambda evt: success.append(True))
self.nb.bind('<<NotebookTabChanged>>',
lambda evt: tab_changed.append(True))
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.select(self.child2)
self.assertTrue(success)
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.update()
self.assertTrue(tab_changed)
def test_tab(self):
self.assertRaises(tkinter.TclError, self.nb.tab, -1)
self.assertRaises(tkinter.TclError, self.nb.tab, 'notab')
self.assertRaises(tkinter.TclError, self.nb.tab, None)
self.assertIsInstance(self.nb.tab(self.child1), dict)
self.assertEqual(self.nb.tab(self.child1, text=None), 'a')
# newer form for querying a single option
self.assertEqual(self.nb.tab(self.child1, 'text'), 'a')
self.nb.tab(self.child1, text='abc')
self.assertEqual(self.nb.tab(self.child1, text=None), 'abc')
self.assertEqual(self.nb.tab(self.child1, 'text'), 'abc')
def test_tabs(self):
self.assertEqual(len(self.nb.tabs()), 2)
self.nb.forget(self.child1)
self.nb.forget(self.child2)
self.assertEqual(self.nb.tabs(), ())
def test_traversal(self):
self.nb.pack()
self.nb.wait_visibility()
self.nb.select(0)
simulate_mouse_click(self.nb, 5, 5)
self.nb.focus_force()
self.nb.event_generate('<Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.tab(self.child1, text='a', underline=0)
self.nb.enable_traversal()
self.nb.focus_force()
simulate_mouse_click(self.nb, 5, 5)
if sys.platform == 'darwin':
self.nb.event_generate('<Option-a>')
else:
self.nb.event_generate('<Alt-a>')
self.assertEqual(self.nb.select(), str(self.child1))
@add_standard_options(StandardTtkOptionsTests)
class TreeviewTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'columns', 'cursor', 'displaycolumns',
'height', 'padding', 'selectmode', 'show',
'style', 'takefocus', 'xscrollcommand', 'yscrollcommand',
)
def setUp(self):
super(TreeviewTest, self).setUp()
self.tv = self.create(padding=0)
def create(self, **kwargs):
return ttk.Treeview(self.root, **kwargs)
def test_columns(self):
widget = self.create()
self.checkParam(widget, 'columns', 'a b c',
expected=('a', 'b', 'c'))
self.checkParam(widget, 'columns', ('a', 'b', 'c'))
self.checkParam(widget, 'columns', () if tcl_version < (8, 5) else '')
def test_displaycolumns(self):
widget = self.create()
widget['columns'] = ('a', 'b', 'c')
self.checkParam(widget, 'displaycolumns', 'b a c',
expected=('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', ('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', '#all',
expected=('#all',))
self.checkParam(widget, 'displaycolumns', (2, 1, 0))
self.checkInvalidParam(widget, 'displaycolumns', ('a', 'b', 'd'),
errmsg='Invalid column index d')
self.checkInvalidParam(widget, 'displaycolumns', (1, 2, 3),
errmsg='Column index 3 out of bounds')
self.checkInvalidParam(widget, 'displaycolumns', (1, -2),
errmsg='Column index -2 out of bounds')
def test_height(self):
widget = self.create()
self.checkPixelsParam(widget, 'height', 100, -100, 0, '3c', conv=False)
self.checkPixelsParam(widget, 'height', 101.2, 102.6, conv=noconv)
def test_selectmode(self):
widget = self.create()
self.checkEnumParam(widget, 'selectmode',
'none', 'browse', 'extended')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', 'tree headings',
expected=('tree', 'headings'))
self.checkParam(widget, 'show', ('tree', 'headings'))
self.checkParam(widget, 'show', ('headings', 'tree'))
self.checkParam(widget, 'show', 'tree', expected=('tree',))
self.checkParam(widget, 'show', 'headings', expected=('headings',))
def test_bbox(self):
self.tv.pack()
self.assertEqual(self.tv.bbox(''), '')
self.tv.wait_visibility()
self.tv.update()
item_id = self.tv.insert('', 'end')
children = self.tv.get_children()
self.assertTrue(children)
bbox = self.tv.bbox(children[0])
self.assertIsBoundingBox(bbox)
# compare width in bboxes
self.tv['columns'] = ['test']
self.tv.column('test', width=50)
bbox_column0 = self.tv.bbox(children[0], 0)
root_width = self.tv.column('#0', width=None)
if not self.wantobjects:
root_width = int(root_width)
self.assertEqual(bbox_column0[0], bbox[0] + root_width)
# verify that bbox of a closed item is the empty string
child1 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.bbox(child1), '')
def test_children(self):
# no children yet, should get an empty tuple
self.assertEqual(self.tv.get_children(), ())
item_id = self.tv.insert('', 'end')
self.assertIsInstance(self.tv.get_children(), tuple)
self.assertEqual(self.tv.get_children()[0], item_id)
# add item_id and child3 as children of child2
child2 = self.tv.insert('', 'end')
child3 = self.tv.insert('', 'end')
self.tv.set_children(child2, item_id, child3)
self.assertEqual(self.tv.get_children(child2), (item_id, child3))
# child3 has child2 as parent, thus trying to set child2 as a children
# of child3 should result in an error
self.assertRaises(tkinter.TclError,
self.tv.set_children, child3, child2)
# remove child2 children
self.tv.set_children(child2)
self.assertEqual(self.tv.get_children(child2), ())
# remove root's children
self.tv.set_children('')
self.assertEqual(self.tv.get_children(), ())
def test_column(self):
# return a dict with all options/values
self.assertIsInstance(self.tv.column('#0'), dict)
# return a single value of the given option
if self.wantobjects:
self.assertIsInstance(self.tv.column('#0', width=None), int)
# set a new value for an option
self.tv.column('#0', width=10)
# testing new way to get option value
self.assertEqual(self.tv.column('#0', 'width'),
10 if self.wantobjects else '10')
self.assertEqual(self.tv.column('#0', width=None),
10 if self.wantobjects else '10')
# check read-only option
self.assertRaises(tkinter.TclError, self.tv.column, '#0', id='X')
self.assertRaises(tkinter.TclError, self.tv.column, 'invalid')
invalid_kws = [
{'unknown_option': 'some value'}, {'stretch': 'wrong'},
{'anchor': 'wrong'}, {'width': 'wrong'}, {'minwidth': 'wrong'}
]
for kw in invalid_kws:
self.assertRaises(tkinter.TclError, self.tv.column, '#0',
**kw)
def test_delete(self):
self.assertRaises(tkinter.TclError, self.tv.delete, '#0')
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
self.tv.delete(item_id)
self.assertFalse(self.tv.get_children())
# reattach should fail
self.assertRaises(tkinter.TclError,
self.tv.reattach, item_id, '', 'end')
# test multiple item delete
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
self.assertEqual(self.tv.get_children(), (item1, item2))
self.tv.delete(item1, item2)
self.assertFalse(self.tv.get_children())
def test_detach_reattach(self):
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
# calling detach without items is valid, although it does nothing
prev = self.tv.get_children()
self.tv.detach() # this should do nothing
self.assertEqual(prev, self.tv.get_children())
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# detach item with children
self.tv.detach(item_id)
self.assertFalse(self.tv.get_children())
# reattach item with children
self.tv.reattach(item_id, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# move a children to the root
self.tv.move(item2, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, item2))
self.assertEqual(self.tv.get_children(item_id), ())
# bad values
self.assertRaises(tkinter.TclError,
self.tv.reattach, 'nonexistent', '', 'end')
self.assertRaises(tkinter.TclError,
self.tv.detach, 'nonexistent')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, 'otherparent', 'end')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, '', 'invalid')
# multiple detach
self.tv.detach(item_id, item2)
self.assertEqual(self.tv.get_children(), ())
self.assertEqual(self.tv.get_children(item_id), ())
def test_exists(self):
self.assertEqual(self.tv.exists('something'), False)
self.assertEqual(self.tv.exists(''), True)
self.assertEqual(self.tv.exists({}), False)
# the following will make a tk.call equivalent to
# tk.call(treeview, "exists") which should result in an error
# in the tcl interpreter since tk requires an item.
self.assertRaises(tkinter.TclError, self.tv.exists, None)
def test_focus(self):
# nothing is focused right now
self.assertEqual(self.tv.focus(), '')
item1 = self.tv.insert('', 'end')
self.tv.focus(item1)
self.assertEqual(self.tv.focus(), item1)
self.tv.delete(item1)
self.assertEqual(self.tv.focus(), '')
# try focusing inexistent item
self.assertRaises(tkinter.TclError, self.tv.focus, 'hi')
def test_heading(self):
# check a dict is returned
self.assertIsInstance(self.tv.heading('#0'), dict)
# check a value is returned
self.tv.heading('#0', text='hi')
self.assertEqual(self.tv.heading('#0', 'text'), 'hi')
self.assertEqual(self.tv.heading('#0', text=None), 'hi')
# invalid option
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
background=None)
# invalid value
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
anchor=1)
def test_heading_callback(self):
def simulate_heading_click(x, y):
simulate_mouse_click(self.tv, x, y)
self.tv.update()
success = [] # no success for now
self.tv.pack()
self.tv.wait_visibility()
self.tv.heading('#0', command=lambda: success.append(True))
self.tv.column('#0', width=100)
self.tv.update()
# assuming that the coords (5, 5) fall into heading #0
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
success = []
commands = self.tv.master._tclCommands
self.tv.heading('#0', command=str(self.tv.heading('#0', command=None)))
self.assertEqual(commands, self.tv.master._tclCommands)
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
# XXX The following raises an error in a tcl interpreter, but not in
# Python
#self.tv.heading('#0', command='I dont exist')
#simulate_heading_click(5, 5)
def test_index(self):
# item 'what' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.index, 'what')
self.assertEqual(self.tv.index(''), 0)
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
c1 = self.tv.insert(item1, 'end')
c2 = self.tv.insert(item1, 'end')
self.assertEqual(self.tv.index(item1), 0)
self.assertEqual(self.tv.index(c1), 0)
self.assertEqual(self.tv.index(c2), 1)
self.assertEqual(self.tv.index(item2), 1)
self.tv.move(item2, '', 0)
self.assertEqual(self.tv.index(item2), 0)
self.assertEqual(self.tv.index(item1), 1)
# check that index still works even after its parent and siblings
# have been detached
self.tv.detach(item1)
self.assertEqual(self.tv.index(c2), 1)
self.tv.detach(c1)
self.assertEqual(self.tv.index(c2), 0)
# but it fails after item has been deleted
self.tv.delete(item1)
self.assertRaises(tkinter.TclError, self.tv.index, c2)
def test_insert_item(self):
# parent 'none' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.insert, 'none', 'end')
# open values
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='please')
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=True)))
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=False)))
# invalid index
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'middle')
# trying to duplicate item id is invalid
itemid = self.tv.insert('', 'end', 'first-item')
self.assertEqual(itemid, 'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
MockTclObj('first-item'))
# unicode values
value = u'\xe1ba'
item = self.tv.insert('', 'end', values=(value, ))
self.assertEqual(self.tv.item(item, 'values'),
(value,) if self.wantobjects else value)
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.tv.item(item, values=self.root.splitlist(self.tv.item(item, values=None)))
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.assertIsInstance(self.tv.item(item), dict)
# erase item values
self.tv.item(item, values='')
self.assertFalse(self.tv.item(item, values=None))
# item tags
item = self.tv.insert('', 'end', tags=[1, 2, value])
self.assertEqual(self.tv.item(item, tags=None),
('1', '2', value) if self.wantobjects else
'1 2 %s' % value)
self.tv.item(item, tags=[])
self.assertFalse(self.tv.item(item, tags=None))
self.tv.item(item, tags=(1, 2))
self.assertEqual(self.tv.item(item, tags=None),
('1', '2') if self.wantobjects else '1 2')
# values with spaces
item = self.tv.insert('', 'end', values=('a b c',
'%s %s' % (value, value)))
self.assertEqual(self.tv.item(item, values=None),
('a b c', '%s %s' % (value, value)) if self.wantobjects else
'{a b c} {%s %s}' % (value, value))
# text
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text="Label here"), text=None),
"Label here")
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text=value), text=None),
value)
def test_set(self):
self.tv['columns'] = ['A', 'B']
item = self.tv.insert('', 'end', values=['a', 'b'])
self.assertEqual(self.tv.set(item), {'A': 'a', 'B': 'b'})
self.tv.set(item, 'B', 'a')
self.assertEqual(self.tv.item(item, values=None),
('a', 'a') if self.wantobjects else 'a a')
self.tv['columns'] = ['B']
self.assertEqual(self.tv.set(item), {'B': 'a'})
self.tv.set(item, 'B', 'b')
self.assertEqual(self.tv.set(item, column='B'), 'b')
self.assertEqual(self.tv.item(item, values=None),
('b', 'a') if self.wantobjects else 'b a')
self.tv.set(item, 'B', 123)
self.assertEqual(self.tv.set(item, 'B'),
123 if self.wantobjects else '123')
self.assertEqual(self.tv.item(item, values=None),
(123, 'a') if self.wantobjects else '123 a')
self.assertEqual(self.tv.set(item),
{'B': 123} if self.wantobjects else {'B': '123'})
# inexistent column
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A')
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A', 'b')
# inexistent item
self.assertRaises(tkinter.TclError, self.tv.set, 'notme')
def test_tag_bind(self):
events = []
item1 = self.tv.insert('', 'end', tags=['call'])
item2 = self.tv.insert('', 'end', tags=['call'])
self.tv.tag_bind('call', '<ButtonPress-1>',
lambda evt: events.append(1))
self.tv.tag_bind('call', '<ButtonRelease-1>',
lambda evt: events.append(2))
self.tv.pack()
self.tv.wait_visibility()
self.tv.update()
pos_y = set()
found = set()
for i in range(0, 100, 10):
if len(found) == 2: # item1 and item2 already found
break
item_id = self.tv.identify_row(i)
if item_id and item_id not in found:
pos_y.add(i)
found.add(item_id)
self.assertEqual(len(pos_y), 2) # item1 and item2 y pos
for y in pos_y:
simulate_mouse_click(self.tv, 0, y)
# by now there should be 4 things in the events list, since each
# item had a bind for two events that were simulated above
self.assertEqual(len(events), 4)
for evt in zip(events[::2], events[1::2]):
self.assertEqual(evt, (1, 2))
def test_tag_configure(self):
# Just testing parameter passing for now
self.assertRaises(TypeError, self.tv.tag_configure)
self.assertRaises(tkinter.TclError, self.tv.tag_configure,
'test', sky='blue')
self.tv.tag_configure('test', foreground='blue')
self.assertEqual(str(self.tv.tag_configure('test', 'foreground')),
'blue')
self.assertEqual(str(self.tv.tag_configure('test', foreground=None)),
'blue')
self.assertIsInstance(self.tv.tag_configure('test'), dict)
def test_tag_has(self):
item1 = self.tv.insert('', 'end', text='Item 1', tags=['tag1'])
item2 = self.tv.insert('', 'end', text='Item 2', tags=['tag2'])
self.assertRaises(TypeError, self.tv.tag_has)
self.assertRaises(TclError, self.tv.tag_has, 'tag1', 'non-existing')
self.assertTrue(self.tv.tag_has('tag1', item1))
self.assertFalse(self.tv.tag_has('tag1', item2))
self.assertFalse(self.tv.tag_has('tag2', item1))
self.assertTrue(self.tv.tag_has('tag2', item2))
self.assertFalse(self.tv.tag_has('tag3', item1))
self.assertFalse(self.tv.tag_has('tag3', item2))
self.assertEqual(self.tv.tag_has('tag1'), (item1,))
self.assertEqual(self.tv.tag_has('tag2'), (item2,))
self.assertEqual(self.tv.tag_has('tag3'), ())
@add_standard_options(StandardTtkOptionsTests)
class SeparatorTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'style', 'takefocus',
# 'state'?
)
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Separator(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class SizegripTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'style', 'takefocus',
# 'state'?
)
def create(self, **kwargs):
return ttk.Sizegrip(self.root, **kwargs)
tests_gui = (
ButtonTest, CheckbuttonTest, ComboboxTest, EntryTest,
FrameTest, LabelFrameTest, LabelTest, MenubuttonTest,
NotebookTest, PanedWindowTest, ProgressbarTest,
RadiobuttonTest, ScaleTest, ScrollbarTest, SeparatorTest,
SizegripTest, TreeviewTest, WidgetTest,
)
if __name__ == "__main__":
run_unittest(*tests_gui)
| gpl-2.0 |
codesparkle/youtube-dl | youtube_dl/extractor/addanime.py | 29 | 3281 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
qualities,
)
class AddAnimeIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?add-anime\.net/(?:watch_video\.php\?(?:.*?)v=|video/)(?P<id>[\w_]+)'
_TESTS = [{
'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
'md5': '72954ea10bc979ab5e2eb288b21425a0',
'info_dict': {
'id': '24MR3YO5SAS9',
'ext': 'mp4',
'description': 'One Piece 606',
'title': 'One Piece 606',
}
}, {
'url': 'http://add-anime.net/video/MDUGWYKNGBD8/One-Piece-687',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
try:
webpage = self._download_webpage(url, video_id)
except ExtractorError as ee:
if not isinstance(ee.cause, compat_HTTPError) or \
ee.cause.code != 503:
raise
redir_webpage = ee.cause.read().decode('utf-8')
action = self._search_regex(
r'<form id="challenge-form" action="([^"]+)"',
redir_webpage, 'Redirect form')
vc = self._search_regex(
r'<input type="hidden" name="jschl_vc" value="([^"]+)"/>',
redir_webpage, 'redirect vc value')
av = re.search(
r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);',
redir_webpage)
if av is None:
raise ExtractorError('Cannot find redirect math task')
av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3))
parsed_url = compat_urllib_parse_urlparse(url)
av_val = av_res + len(parsed_url.netloc)
confirm_url = (
parsed_url.scheme + '://' + parsed_url.netloc +
action + '?' +
compat_urllib_parse_urlencode({
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
self._download_webpage(
confirm_url, video_id,
note='Confirming after redirect')
webpage = self._download_webpage(url, video_id)
FORMATS = ('normal', 'hq')
quality = qualities(FORMATS)
formats = []
for format_id in FORMATS:
rex = r"var %s_video_file = '(.*?)';" % re.escape(format_id)
video_url = self._search_regex(rex, webpage, 'video file URLx',
fatal=False)
if not video_url:
continue
formats.append({
'format_id': format_id,
'url': video_url,
'quality': quality(format_id),
})
self._sort_formats(formats)
video_title = self._og_search_title(webpage)
video_description = self._og_search_description(webpage)
return {
'_type': 'video',
'id': video_id,
'formats': formats,
'title': video_title,
'description': video_description
}
| unlicense |
irees/emdash | emdash/ui/Ui_Wizard.py | 1 | 1634 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Ui_Wizard.ui'
#
# Created: Tue Jul 31 04:19:55 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Wizard(object):
def setupUi(self, Wizard):
Wizard.setObjectName(_fromUtf8("Wizard"))
Wizard.resize(329, 355)
self.verticalLayout = QtGui.QVBoxLayout(Wizard)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label_help = QtGui.QLabel(Wizard)
self.label_help.setText(_fromUtf8(""))
self.label_help.setTextFormat(QtCore.Qt.RichText)
self.label_help.setWordWrap(True)
self.label_help.setOpenExternalLinks(True)
self.label_help.setObjectName(_fromUtf8("label_help"))
self.verticalLayout.addWidget(self.label_help)
self.line = QtGui.QFrame(Wizard)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.verticalLayout.addWidget(self.line)
self.layout = QtGui.QVBoxLayout()
self.layout.setObjectName(_fromUtf8("layout"))
self.verticalLayout.addLayout(self.layout)
self.retranslateUi(Wizard)
QtCore.QMetaObject.connectSlotsByName(Wizard)
def retranslateUi(self, Wizard):
Wizard.setWindowTitle(QtGui.QApplication.translate("Wizard", "WizardPage", None, QtGui.QApplication.UnicodeUTF8))
| bsd-3-clause |
str4d/i2p-tools | netdb/tests/test_netdb.py | 2 | 1894 | # test_netdb.py - Test netdb.py
# Author: Chris Barry <[email protected]>
# License: MIT
# Note: this uses py.test.
import netdb,os,random
'''
def test_inspect():
netdb.inspect()
'''
def test_sha256():
assert('d2f4e10adac32aeb600c2f57ba2bac1019a5c76baa65042714ed2678844320d0' == netdb.netdb.sha256('i2p is cool', raw=False))
def test_address_valid():
invalid = netdb.netdb.Address()
valid = netdb.netdb.Address()
valid.cost = 10
valid.transport = 'SSU'
valid.options = {'host': '0.0.0.0', 'port': '1234', 'key': '', 'caps': ''}
valid.expire = 0
valid.firewalled = False
assert(valid.valid() and not invalid.valid())
def test_address_repr():
valid = netdb.netdb.Address()
valid.cost = 10
valid.transport = 'SSU'
valid.options = {'host': '0.0.0.0', 'port': '1234', 'key': '', 'caps': ''}
valid.expire = 0
valid.firewalled = False
assert(repr(valid) == 'Address: transport=SSU cost=10 expire=0 options={\'host\': \'0.0.0.0\', \'port\': \'1234\', \'key\': \'\', \'caps\': \'\'} location=None firewalled=False')
# TODO: test_entry*
def test_entry_read_short():
assert(True)
def test_entry_read_mapping():
assert(True)
def test_entry_read():
assert(True)
def test_entry_read_short():
assert(True)
def test_entry_read_byte():
assert(True)
def test_entry_read_string():
assert(True)
def test_entry_init():
assert(True)
def test_entry_load():
assert(True)
def test_entry_verify():
assert(True)
def test_entry_repr():
assert(True)
def test_entry_dict():
assert(True)
# Make some garbage files and hope they break things.
def test_fuzz():
pwd = os.environ['PWD']
for i in range(1,100):
with open('{}/tests/fuzzdb/{}.dat'.format(pwd, i), 'wb') as fout:
fout.write(os.urandom(random.randint(2,400))) # replace 1024 with size_kb if not unreasonably large
# Now let's inspect the garbage.
netdb.inspect(netdb_dir='{}/fuzzdb/'.format(pwd))
| mit |
kmarius/qutebrowser | tests/unit/utils/test_error.py | 4 | 3334 | # Copyright 2015-2018 Florian Bruhin (The Compiler) <[email protected]>
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.utils.error."""
import logging
import pytest
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import QMessageBox
from qutebrowser.utils import error, utils
from qutebrowser.misc import ipc
class Error(Exception):
pass
@pytest.mark.parametrize('exc, name, exc_text', [
# "builtins." stripped
(ValueError('exception'), 'ValueError', 'exception'),
(ValueError, 'ValueError', 'none'),
# "qutebrowser." stripped
(ipc.Error, 'misc.ipc.Error', 'none'),
(Error, 'test_error.Error', 'none'),
])
def test_no_err_windows(caplog, exc, name, exc_text, fake_args):
"""Test handle_fatal_exc with no_err_windows = True."""
fake_args.no_err_windows = True
try:
raise exc
except Exception as e:
with caplog.at_level(logging.ERROR):
error.handle_fatal_exc(e, fake_args, 'title', pre_text='pre',
post_text='post')
assert len(caplog.records) == 1
expected = [
'Handling fatal {} with --no-err-windows!'.format(name),
'',
'title: title',
'pre_text: pre',
'post_text: post',
'exception text: {}'.format(exc_text),
]
assert caplog.records[0].msg == '\n'.join(expected)
# This happens on Xvfb for some reason
# See https://github.com/qutebrowser/qutebrowser/issues/984
@pytest.mark.qt_log_ignore(r'^QXcbConnection: XCB error: 8 \(BadMatch\), '
r'sequence: \d+, resource id: \d+, major code: 42 '
r'\(SetInputFocus\), minor code: 0$',
r'^QIODevice::write: device not open')
@pytest.mark.parametrize('pre_text, post_text, expected', [
('', '', 'exception'),
('foo', '', 'foo: exception'),
('foo', 'bar', 'foo: exception\n\nbar'),
('', 'bar', 'exception\n\nbar'),
], ids=repr)
def test_err_windows(qtbot, qapp, fake_args, pre_text, post_text, expected):
def err_window_check():
w = qapp.activeModalWidget()
try:
qtbot.add_widget(w)
if not utils.is_mac:
assert w.windowTitle() == 'title'
assert w.icon() == QMessageBox.Critical
assert w.standardButtons() == QMessageBox.Ok
assert w.text() == expected
finally:
w.close()
fake_args.no_err_windows = False
QTimer.singleShot(0, err_window_check)
error.handle_fatal_exc(ValueError("exception"), fake_args, 'title',
pre_text=pre_text, post_text=post_text)
| gpl-3.0 |
deepmind/acme | acme/agents/tf/bcq/discrete_learning.py | 1 | 9377 | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discrete BCQ learner implementation.
As described in https://arxiv.org/pdf/1910.01708.pdf.
"""
import copy
from typing import Dict, List, Optional
from acme import core
from acme import types
from acme.adders import reverb as adders
from acme.agents.tf import bc
from acme.tf import losses
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.tf.networks import discrete as discrete_networks
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
import trfl
class _InternalBCQLearner(core.Learner, tf2_savers.TFSaveable):
"""Internal BCQ learner.
This implements the Q-learning component in the discrete BCQ algorithm.
"""
def __init__(
self,
network: discrete_networks.DiscreteFilteredQNetwork,
discount: float,
importance_sampling_exponent: float,
learning_rate: float,
target_update_period: int,
dataset: tf.data.Dataset,
huber_loss_parameter: float = 1.,
replay_client: Optional[reverb.TFClient] = None,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = False,
):
"""Initializes the learner.
Args:
network: BCQ network
discount: discount to use for TD updates.
importance_sampling_exponent: power to which importance weights are raised
before normalizing.
learning_rate: learning rate for the q-network update.
target_update_period: number of learner steps to perform before updating
the target networks.
dataset: dataset to learn from, whether fixed or from a replay buffer (see
`acme.datasets.reverb.make_dataset` documentation).
huber_loss_parameter: Quadratic-linear boundary for Huber loss.
replay_client: client to replay to allow for updating priorities.
counter: Counter object for (potentially distributed) counting.
logger: Logger object for writing logs to.
checkpoint: boolean indicating whether to checkpoint the learner.
"""
# Internalise agent components (replay buffer, networks, optimizer).
# TODO(b/155086959): Fix type stubs and remove.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
self._network = network
self._q_network = network.q_network
self._target_q_network = copy.deepcopy(network.q_network)
self._optimizer = snt.optimizers.Adam(learning_rate)
self._replay_client = replay_client
# Internalise the hyperparameters.
self._discount = discount
self._target_update_period = target_update_period
self._importance_sampling_exponent = importance_sampling_exponent
self._huber_loss_parameter = huber_loss_parameter
# Learner state.
self._variables = [self._network.trainable_variables]
self._num_steps = tf.Variable(0, dtype=tf.int32)
# Internalise logging/counting objects.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger('learner',
save_data=False)
# Create a snapshotter object.
if checkpoint:
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={'network': network}, time_delta_minutes=60.)
else:
self._snapshotter = None
@tf.function
def _step(self) -> Dict[str, tf.Tensor]:
"""Do a step of SGD and update the priorities."""
# Pull out the data needed for updates/priorities.
inputs = next(self._iterator)
transitions: types.Transition = inputs.data
keys, probs = inputs.info[:2]
with tf.GradientTape() as tape:
# Evaluate our networks.
q_tm1 = self._q_network(transitions.observation)
q_t_value = self._target_q_network(transitions.next_observation)
q_t_selector = self._network(transitions.next_observation)
# The rewards and discounts have to have the same type as network values.
r_t = tf.cast(transitions.reward, q_tm1.dtype)
r_t = tf.clip_by_value(r_t, -1., 1.)
d_t = tf.cast(transitions.discount, q_tm1.dtype) * tf.cast(
self._discount, q_tm1.dtype)
# Compute the loss.
_, extra = trfl.double_qlearning(q_tm1, transitions.action, r_t, d_t,
q_t_value, q_t_selector)
loss = losses.huber(extra.td_error, self._huber_loss_parameter)
# Get the importance weights.
importance_weights = 1. / probs # [B]
importance_weights **= self._importance_sampling_exponent
importance_weights /= tf.reduce_max(importance_weights)
# Reweight.
loss *= tf.cast(importance_weights, loss.dtype) # [B]
loss = tf.reduce_mean(loss, axis=[0]) # []
# Do a step of SGD.
gradients = tape.gradient(loss, self._network.trainable_variables)
self._optimizer.apply(gradients, self._network.trainable_variables)
# Update the priorities in the replay buffer.
if self._replay_client:
priorities = tf.cast(tf.abs(extra.td_error), tf.float64)
self._replay_client.update_priorities(
table=adders.DEFAULT_PRIORITY_TABLE, keys=keys, priorities=priorities)
# Periodically update the target network.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(self._q_network.variables,
self._target_q_network.variables):
dest.assign(src)
self._num_steps.assign_add(1)
# Compute the global norm of the gradients for logging.
global_gradient_norm = tf.linalg.global_norm(gradients)
# Compute statistics of the Q-values for logging.
max_q = tf.reduce_max(q_t_value)
min_q = tf.reduce_min(q_t_value)
mean_q, var_q = tf.nn.moments(q_t_value, [0, 1])
# Report loss & statistics for logging.
fetches = {
'gradient_norm': global_gradient_norm,
'loss': loss,
'max_q': max_q,
'mean_q': mean_q,
'min_q': min_q,
'var_q': var_q,
}
return fetches
def step(self):
# Do a batch of SGD.
result = self._step()
# Update our counts and record it.
counts = self._counter.increment(steps=1)
result.update(counts)
# Snapshot and attempt to write logs.
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(result)
def get_variables(self, names: List[str]) -> List[np.ndarray]:
return tf2_utils.to_numpy(self._variables)
@property
def state(self):
"""Returns the stateful parts of the learner for checkpointing."""
return {
'network': self._network,
'target_q_network': self._target_q_network,
'optimizer': self._optimizer,
'num_steps': self._num_steps
}
class DiscreteBCQLearner(core.Learner, tf2_savers.TFSaveable):
"""Discrete BCQ learner.
This learner combines supervised BC learning and Q learning to implement the
discrete BCQ algorithm as described in https://arxiv.org/pdf/1910.01708.pdf.
"""
def __init__(self,
network: discrete_networks.DiscreteFilteredQNetwork,
dataset: tf.data.Dataset,
learning_rate: float,
counter: Optional[counting.Counter] = None,
bc_logger: Optional[loggers.Logger] = None,
bcq_logger: Optional[loggers.Logger] = None,
**bcq_learner_kwargs):
counter = counter or counting.Counter()
self._bc_logger = bc_logger or loggers.TerminalLogger('bc_learner',
time_delta=1.)
self._bcq_logger = bcq_logger or loggers.TerminalLogger('bcq_learner',
time_delta=1.)
self._bc_learner = bc.BCLearner(
network=network.g_network,
learning_rate=learning_rate,
dataset=dataset,
counter=counting.Counter(counter, 'bc'),
logger=self._bc_logger,
checkpoint=False)
self._bcq_learner = _InternalBCQLearner(
network=network,
learning_rate=learning_rate,
dataset=dataset,
counter=counting.Counter(counter, 'bcq'),
logger=self._bcq_logger,
**bcq_learner_kwargs)
def get_variables(self, names):
return self._bcq_learner.get_variables(names)
@property
def state(self):
bc_state = self._bc_learner.state
bc_state.pop('network') # No need to checkpoint the BC network.
bcq_state = self._bcq_learner.state
state = dict()
state.update({f'bc_{k}': v for k, v in bc_state.items()})
state.update({f'bcq_{k}': v for k, v in bcq_state.items()})
return state
def step(self):
self._bc_learner.step()
self._bcq_learner.step()
| apache-2.0 |
gorjuce/odoo | addons/l10n_in_hr_payroll/wizard/__init__.py | 430 | 1110 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_salary_employee_bymonth
import hr_yearly_salary_detail
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
MarioGranada/bitgraytest | bower_components/bootstrap/test-infra/s3_cache.py | 1700 | 3523 | #!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
| mit |
sbuss/voteswap | lib/networkx/linalg/spectrum.py | 42 | 2793 | """
Eigenvalue spectrum of graphs.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = "\n".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])',
'Jean-Gabriel Young ([email protected])'])
__all__ = ['laplacian_spectrum', 'adjacency_spectrum', 'modularity_spectrum']
def laplacian_spectrum(G, weight='weight'):
"""Return eigenvalues of the Laplacian of G
Parameters
----------
G : graph
A NetworkX graph
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
Returns
-------
evals : NumPy array
Eigenvalues
Notes
-----
For MultiGraph/MultiDiGraph, the edges weights are summed.
See to_numpy_matrix for other options.
See Also
--------
laplacian_matrix
"""
from scipy.linalg import eigvalsh
return eigvalsh(nx.laplacian_matrix(G,weight=weight).todense())
def adjacency_spectrum(G, weight='weight'):
"""Return eigenvalues of the adjacency matrix of G.
Parameters
----------
G : graph
A NetworkX graph
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
Returns
-------
evals : NumPy array
Eigenvalues
Notes
-----
For MultiGraph/MultiDiGraph, the edges weights are summed.
See to_numpy_matrix for other options.
See Also
--------
adjacency_matrix
"""
from scipy.linalg import eigvals
return eigvals(nx.adjacency_matrix(G,weight=weight).todense())
def modularity_spectrum(G):
"""Return eigenvalues of the modularity matrix of G.
Parameters
----------
G : Graph
A NetworkX Graph or DiGraph
Returns
-------
evals : NumPy array
Eigenvalues
See Also
--------
modularity_matrix
References
----------
.. [1] M. E. J. Newman, "Modularity and community structure in networks",
Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006.
"""
from scipy.linalg import eigvals
if G.is_directed():
return eigvals(nx.directed_modularity_matrix(G))
else:
return eigvals(nx.modularity_matrix(G))
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import scipy.linalg
except:
raise SkipTest("scipy.linalg not available")
| mit |
jumpojoy/neutron | neutron/tests/common/base.py | 34 | 3238 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import functools
import unittest.case
from oslo_db.sqlalchemy import test_base
import testtools.testcase
from neutron.common import constants as n_const
from neutron.tests import base
from neutron.tests import tools
def create_resource(prefix, creation_func, *args, **kwargs):
"""Create a new resource that does not already exist.
If prefix isn't 'max_length' in size, a random suffix is concatenated to
ensure it is random. Otherwise, 'prefix' is used as is.
:param prefix: The prefix for a randomly generated name
:param creation_func: A function taking the name of the resource
to be created as it's first argument. An error is assumed
to indicate a name collision.
:param *args *kwargs: These will be passed to the create function.
"""
# Don't generate a random name if prefix is already full-length.
if len(prefix) == n_const.DEVICE_NAME_MAX_LEN:
return creation_func(prefix, *args, **kwargs)
while True:
name = base.get_rand_name(
max_length=n_const.DEVICE_NAME_MAX_LEN,
prefix=prefix)
try:
return creation_func(name, *args, **kwargs)
except RuntimeError:
pass
def no_skip_on_missing_deps(wrapped):
"""Do not allow a method/test to skip on missing dependencies.
This decorator raises an error if a skip is raised by wrapped method when
OS_FAIL_ON_MISSING_DEPS is evaluated to True. This decorator should be used
only for missing dependencies (including missing system requirements).
"""
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
try:
return wrapped(*args, **kwargs)
except (testtools.TestCase.skipException, unittest.case.SkipTest) as e:
if base.bool_from_env('OS_FAIL_ON_MISSING_DEPS'):
tools.fail(
'%s cannot be skipped because OS_FAIL_ON_MISSING_DEPS '
'is enabled, skip reason: %s' % (wrapped.__name__, e))
raise
return wrapper
class MySQLTestCase(test_base.MySQLOpportunisticTestCase):
"""Base test class for MySQL tests.
If the MySQL db is unavailable then this test is skipped, unless
OS_FAIL_ON_MISSING_DEPS is enabled.
"""
SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS')
class PostgreSQLTestCase(test_base.PostgreSQLOpportunisticTestCase):
"""Base test class for PostgreSQL tests.
If the PostgreSQL db is unavailable then this test is skipped, unless
OS_FAIL_ON_MISSING_DEPS is enabled.
"""
SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS')
| apache-2.0 |
pkainz/pylearn2 | pylearn2/linear/tests/test_conv2d.py | 45 | 5497 | import theano
from theano import tensor
import numpy
from pylearn2.linear.conv2d import Conv2D, make_random_conv2D
from pylearn2.space import Conv2DSpace
from pylearn2.utils import sharedX
import unittest
try:
scipy_available = True
import scipy.ndimage
except ImportError:
scipy_available = False
class TestConv2D(unittest.TestCase):
"""
Tests for Conv2D code
"""
def setUp(self):
"""
Set up a test image and filter to re-use
"""
self.image = numpy.random.rand(1, 3, 3, 1).astype(theano.config.floatX)
self.image_tensor = tensor.tensor4()
self.input_space = Conv2DSpace((3, 3), 1)
self.filters_values = numpy.ones(
(1, 1, 2, 2), dtype=theano.config.floatX
)
self.filters = sharedX(self.filters_values, name='filters')
self.conv2d = Conv2D(self.filters, 1, self.input_space)
def test_value_errors(self):
"""
Check correct errors are raised when bad input is given
"""
bad_filters = sharedX(numpy.zeros((1, 3, 2)))
self.assertRaises(ValueError, Conv2D, bad_filters, 1, self.input_space)
self.assertRaises(AssertionError, Conv2D, self.filters, 0,
self.input_space)
def test_get_params(self):
"""
Check whether the conv2d has stored the correct filters
"""
assert self.conv2d.get_params() == [self.filters]
def test_lmul(self):
"""
Use SciPy's ndimage to check whether the convolution worked
correctly
"""
f = theano.function([self.image_tensor],
self.conv2d.lmul(self.image_tensor))
if scipy_available:
numpy.allclose(
f(self.image).reshape((2, 2)),
scipy.ndimage.filters.convolve(
self.image.reshape((3, 3)),
self.filters_values.reshape((2, 2))
)[:2, :2]
)
def test_lmul_T(self):
"""
Check whether this function outputs the right shape
"""
conv2d = self.conv2d.lmul(self.image_tensor)
f = theano.function([self.image_tensor],
self.conv2d.lmul_T(conv2d))
assert f(self.image).shape == self.image.shape
def test_lmul_sq_T(self):
"""
Check whether this function outputs the same values as when
taking the square manually
"""
conv2d_sq = Conv2D(sharedX(numpy.square(self.filters_values)),
1, self.input_space
).lmul(self.image_tensor)
conv2d = self.conv2d.lmul(self.image_tensor)
f = theano.function([self.image_tensor],
self.conv2d.lmul_T(conv2d_sq))
f2 = theano.function([self.image_tensor],
self.conv2d.lmul_sq_T(conv2d))
numpy.testing.assert_allclose(f(self.image), f2(self.image))
def test_set_batch_size(self):
"""
Make sure that setting the batch size actually changes the property
"""
cur_img_shape = self.conv2d._img_shape
cur_batch_size = self.conv2d._img_shape[0]
self.conv2d.set_batch_size(cur_batch_size + 10)
assert self.conv2d._img_shape[0] == cur_batch_size + 10
assert self.conv2d._img_shape[1:] == cur_img_shape[1:]
def test_axes(self):
"""
Use different output axes and see whether the output is what we
expect
"""
default_axes = ('b', 0, 1, 'c')
axes = (0, 'b', 1, 'c')
mapping = tuple(axes.index(axis) for axis in default_axes)
input_space = Conv2DSpace((3, 3), num_channels=1, axes=axes)
conv2d = Conv2D(self.filters, 1, input_space, output_axes=axes)
f_axes = theano.function([self.image_tensor],
conv2d.lmul(self.image_tensor))
f = theano.function([self.image_tensor],
self.conv2d.lmul(self.image_tensor))
output_axes = f_axes(numpy.transpose(self.image, mapping))
output = f(self.image)
output_axes = numpy.transpose(output_axes, mapping)
numpy.testing.assert_allclose(output, output_axes)
assert output.shape == output_axes.shape
def test_channels(self):
"""
Go from 2 to 3 channels and see whether the shape is correct
"""
input_space = Conv2DSpace((3, 3), num_channels=3)
filters_values = numpy.ones(
(2, 3, 2, 2), dtype=theano.config.floatX
)
filters = sharedX(filters_values)
image = numpy.random.rand(1, 3, 3, 3).astype(theano.config.floatX)
conv2d = Conv2D(filters, 1, input_space)
f = theano.function([self.image_tensor],
conv2d.lmul(self.image_tensor))
assert f(image).shape == (1, 2, 2, 2)
def test_make_random_conv2D(self):
"""
Create a random convolution and check whether the shape, axes and
input space are all what we expect
"""
output_space = Conv2DSpace((2, 2), 1)
conv2d = make_random_conv2D(1, self.input_space, output_space,
(2, 2), 1)
f = theano.function([self.image_tensor],
conv2d.lmul(self.image_tensor))
assert f(self.image).shape == (1, 2, 2, 1)
assert conv2d.input_space == self.input_space
assert conv2d.output_axes == output_space.axes
| bsd-3-clause |
robynbergeron/ansible-modules-extras | cloud/cloudstack/cs_affinitygroup.py | 24 | 7542 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_affinitygroup
short_description: Manages affinity groups on Apache CloudStack based clouds.
description:
- Create and remove affinity groups.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the affinity group.
required: true
affinty_type:
description:
- Type of the affinity group. If not specified, first found affinity type is used.
required: false
default: null
description:
description:
- Description of the affinity group.
required: false
default: null
state:
description:
- State of the affinity group.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
domain:
description:
- Domain the affinity group is related to.
required: false
default: null
account:
description:
- Account the affinity group is related to.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a affinity group
- local_action:
module: cs_affinitygroup
name: haproxy
affinty_type: host anti-affinity
# Remove a affinity group
- local_action:
module: cs_affinitygroup
name: haproxy
state: absent
'''
RETURN = '''
---
id:
description: UUID of the affinity group.
returned: success
type: string
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
name:
description: Name of affinity group.
returned: success
type: string
sample: app
description:
description: Description of affinity group.
returned: success
type: string
sample: application affinity group
affinity_type:
description: Type of affinity group.
returned: success
type: string
sample: host anti-affinity
'''
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackAffinityGroup(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackAffinityGroup, self).__init__(module)
self.returns = {
'type': 'affinity_type',
}
self.affinity_group = None
def get_affinity_group(self):
if not self.affinity_group:
affinity_group = self.module.params.get('name')
args = {}
args['account'] = self.get_account('name')
args['domainid'] = self.get_domain('id')
affinity_groups = self.cs.listAffinityGroups(**args)
if affinity_groups:
for a in affinity_groups['affinitygroup']:
if affinity_group in [ a['name'], a['id'] ]:
self.affinity_group = a
break
return self.affinity_group
def get_affinity_type(self):
affinity_type = self.module.params.get('affinty_type')
affinity_types = self.cs.listAffinityGroupTypes()
if affinity_types:
if not affinity_type:
return affinity_types['affinityGroupType'][0]['type']
for a in affinity_types['affinityGroupType']:
if a['type'] == affinity_type:
return a['type']
self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type)
def create_affinity_group(self):
affinity_group = self.get_affinity_group()
if not affinity_group:
self.result['changed'] = True
args = {}
args['name'] = self.module.params.get('name')
args['type'] = self.get_affinity_type()
args['description'] = self.module.params.get('description')
args['account'] = self.get_account('name')
args['domainid'] = self.get_domain('id')
if not self.module.check_mode:
res = self.cs.createAffinityGroup(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
affinity_group = self._poll_job(res, 'affinitygroup')
return affinity_group
def remove_affinity_group(self):
affinity_group = self.get_affinity_group()
if affinity_group:
self.result['changed'] = True
args = {}
args['name'] = self.module.params.get('name')
args['account'] = self.get_account('name')
args['domainid'] = self.get_domain('id')
if not self.module.check_mode:
res = self.cs.deleteAffinityGroup(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self._poll_job(res, 'affinitygroup')
return affinity_group
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
affinty_type = dict(default=None),
description = dict(default=None),
state = dict(choices=['present', 'absent'], default='present'),
domain = dict(default=None),
account = dict(default=None),
poll_async = dict(choices=BOOLEANS, default=True),
api_key = dict(default=None),
api_secret = dict(default=None, no_log=True),
api_url = dict(default=None),
api_http_method = dict(choices=['get', 'post'], default='get'),
api_timeout = dict(type='int', default=10),
api_region = dict(default='cloudstack'),
),
required_together = (
['api_key', 'api_secret', 'api_url'],
),
supports_check_mode=True
)
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
try:
acs_ag = AnsibleCloudStackAffinityGroup(module)
state = module.params.get('state')
if state in ['absent']:
affinity_group = acs_ag.remove_affinity_group()
else:
affinity_group = acs_ag.create_affinity_group()
result = acs_ag.get_result(affinity_group)
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
erjohnso/ansible | test/units/modules/packaging/os/test_apk.py | 137 | 1157 | from ansible.compat.tests import mock
from ansible.compat.tests import unittest
from ansible.modules.packaging.os import apk
class TestApkQueryLatest(unittest.TestCase):
def setUp(self):
self.module_names = [
'bash',
'g++',
]
@mock.patch('ansible.modules.packaging.os.apk.AnsibleModule')
def test_not_latest(self, mock_module):
apk.APK_PATH = ""
for module_name in self.module_names:
command_output = module_name + '-2.0.0-r1 < 3.0.0-r2 '
mock_module.run_command.return_value = (0, command_output, None)
command_result = apk.query_latest(mock_module, module_name)
self.assertFalse(command_result)
@mock.patch('ansible.modules.packaging.os.apk.AnsibleModule')
def test_latest(self, mock_module):
apk.APK_PATH = ""
for module_name in self.module_names:
command_output = module_name + '-2.0.0-r1 = 2.0.0-r1 '
mock_module.run_command.return_value = (0, command_output, None)
command_result = apk.query_latest(mock_module, module_name)
self.assertTrue(command_result)
| gpl-3.0 |
Javier-Acosta/meran | dev-plugins/node/lib/node/wafadmin/Logs.py | 4 | 4722 | #!/usr/bin/env python
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# encoding: utf-8
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# Thomas Nagy, 2005 (ita)
import ansiterm
import os, re, logging, traceback, sys
from Constants import *
zones = ''
verbose = 0
colors_lst = {
'USE' : True,
'BOLD' :'\x1b[01;1m',
'RED' :'\x1b[01;31m',
'GREEN' :'\x1b[32m',
'YELLOW':'\x1b[33m',
'PINK' :'\x1b[35m',
'BLUE' :'\x1b[01;34m',
'CYAN' :'\x1b[36m',
'NORMAL':'\x1b[0m',
'cursor_on' :'\x1b[?25h',
'cursor_off' :'\x1b[?25l',
}
got_tty = False
term = os.environ.get('TERM', 'dumb')
if not term in ['dumb', 'emacs']:
try:
got_tty = sys.stderr.isatty() or (sys.platform == 'win32' and term in ['xterm', 'msys'])
except AttributeError:
pass
import Utils
if not got_tty or 'NOCOLOR' in os.environ:
colors_lst['USE'] = False
# test
#if sys.platform == 'win32':
# colors_lst['USE'] = True
def get_color(cl):
if not colors_lst['USE']: return ''
return colors_lst.get(cl, '')
class foo(object):
def __getattr__(self, a):
return get_color(a)
def __call__(self, a):
return get_color(a)
colors = foo()
re_log = re.compile(r'(\w+): (.*)', re.M)
class log_filter(logging.Filter):
def __init__(self, name=None):
pass
def filter(self, rec):
rec.c1 = colors.PINK
rec.c2 = colors.NORMAL
rec.zone = rec.module
if rec.levelno >= logging.INFO:
if rec.levelno >= logging.ERROR:
rec.c1 = colors.RED
elif rec.levelno >= logging.WARNING:
rec.c1 = colors.YELLOW
else:
rec.c1 = colors.GREEN
return True
zone = ''
m = re_log.match(rec.msg)
if m:
zone = rec.zone = m.group(1)
rec.msg = m.group(2)
if zones:
return getattr(rec, 'zone', '') in zones or '*' in zones
elif not verbose > 2:
return False
return True
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self, LOG_FORMAT, HOUR_FORMAT)
def format(self, rec):
if rec.levelno >= logging.WARNING or rec.levelno == logging.INFO:
try:
return '%s%s%s' % (rec.c1, rec.msg.decode('utf-8'), rec.c2)
except:
return rec.c1+rec.msg+rec.c2
return logging.Formatter.format(self, rec)
def debug(*k, **kw):
if verbose:
k = list(k)
k[0] = k[0].replace('\n', ' ')
logging.debug(*k, **kw)
def error(*k, **kw):
logging.error(*k, **kw)
if verbose > 1:
if isinstance(k[0], Utils.WafError):
st = k[0].stack
else:
st = traceback.extract_stack()
if st:
st = st[:-1]
buf = []
for filename, lineno, name, line in st:
buf.append(' File "%s", line %d, in %s' % (filename, lineno, name))
if line:
buf.append(' %s' % line.strip())
if buf: logging.error("\n".join(buf))
warn = logging.warn
info = logging.info
def init_log():
log = logging.getLogger()
log.handlers = []
log.filters = []
hdlr = logging.StreamHandler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
# may be initialized more than once
init_log() | gpl-3.0 |
amir343/ansible | test/units/parsing/test_data_loader.py | 99 | 3256 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import PY2
from yaml.scanner import ScannerError
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, mock_open
from ansible.errors import AnsibleParserError
from ansible.parsing import DataLoader
from ansible.parsing.yaml.objects import AnsibleMapping
class TestDataLoader(unittest.TestCase):
def setUp(self):
# FIXME: need to add tests that utilize vault_password
self._loader = DataLoader()
def tearDown(self):
pass
@patch.object(DataLoader, '_get_file_contents')
def test_parse_json_from_file(self, mock_def):
mock_def.return_value = ("""{"a": 1, "b": 2, "c": 3}""", True)
output = self._loader.load_from_file('dummy_json.txt')
self.assertEqual(output, dict(a=1,b=2,c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_yaml_from_file(self, mock_def):
mock_def.return_value = ("""
a: 1
b: 2
c: 3
""", True)
output = self._loader.load_from_file('dummy_yaml.txt')
self.assertEqual(output, dict(a=1,b=2,c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_fail_from_file(self, mock_def):
mock_def.return_value = ("""
TEXT:
***
NOT VALID
""", True)
self.assertRaises(AnsibleParserError, self._loader.load_from_file, 'dummy_yaml_bad.txt')
class TestDataLoaderWithVault(unittest.TestCase):
def setUp(self):
self._loader = DataLoader(vault_password='ansible')
def tearDown(self):
pass
@patch.multiple(DataLoader, path_exists=lambda s, x: True, is_file=lambda s, x: True)
def test_parse_from_vault_1_1_file(self):
vaulted_data = """$ANSIBLE_VAULT;1.1;AES256
33343734386261666161626433386662623039356366656637303939306563376130623138626165
6436333766346533353463636566313332623130383662340a393835656134633665333861393331
37666233346464636263636530626332623035633135363732623332313534306438393366323966
3135306561356164310a343937653834643433343734653137383339323330626437313562306630
3035
"""
if PY2:
builtins_name = '__builtin__'
else:
builtins_name = 'builtins'
with patch(builtins_name + '.open', mock_open(read_data=vaulted_data)):
output = self._loader.load_from_file('dummy_vault.txt')
self.assertEqual(output, dict(foo='bar'))
| gpl-3.0 |
fujunwei/chromium-crosswalk | chrome/test/ispy/server/rebaseline_handler.py | 100 | 1208 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Request Handler that updates the Expectation version."""
import webapp2
import ispy_api
from common import constants
import gs_bucket
class RebaselineHandler(webapp2.RequestHandler):
"""Request handler to allow test mask updates."""
def post(self):
"""Accepts post requests.
Expects a test_run as a parameter and updates the associated version file to
use the expectations associated with that test run.
"""
test_run = self.request.get('test_run')
# Fail if test_run parameter is missing.
if not test_run:
self.response.headers['Content-Type'] = 'json/application'
self.response.write(json.dumps(
{'error': '\'test_run\' must be supplied to rebaseline.'}))
return
# Otherwise, set up the utilities.
bucket = gs_bucket.GoogleCloudStorageBucket(constants.BUCKET)
ispy = ispy_api.ISpyApi(bucket)
# Update versions file.
ispy.RebaselineToTestRun(test_run)
# Redirect back to the sites list for the test run.
self.redirect('/?test_run=%s' % test_run)
| bsd-3-clause |
persandstrom/home-assistant | homeassistant/components/switch/orvibo.py | 8 | 3041 | """
Support for Orvibo S20 Wifi Smart Switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.orvibo/
"""
import logging
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_SWITCHES, CONF_MAC, CONF_DISCOVERY)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['orvibo==1.1.1']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Orvibo S20 Switch'
DEFAULT_DISCOVERY = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SWITCHES, default=[]):
vol.All(cv.ensure_list, [{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_MAC): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
}]),
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
})
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up S20 switches."""
from orvibo.s20 import discover, S20, S20Exception
switch_data = {}
switches = []
switch_conf = config.get(CONF_SWITCHES, [config])
if config.get(CONF_DISCOVERY):
_LOGGER.info("Discovering S20 switches ...")
switch_data.update(discover())
for switch in switch_conf:
switch_data[switch.get(CONF_HOST)] = switch
for host, data in switch_data.items():
try:
switches.append(S20Switch(data.get(CONF_NAME),
S20(host, mac=data.get(CONF_MAC))))
_LOGGER.info("Initialized S20 at %s", host)
except S20Exception:
_LOGGER.error("S20 at %s couldn't be initialized", host)
add_entities_callback(switches)
class S20Switch(SwitchDevice):
"""Representation of an S20 switch."""
def __init__(self, name, s20):
"""Initialize the S20 device."""
from orvibo.s20 import S20Exception
self._name = name
self._s20 = s20
self._state = False
self._exc = S20Exception
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def update(self):
"""Update device state."""
try:
self._state = self._s20.on
except self._exc:
_LOGGER.exception("Error while fetching S20 state")
def turn_on(self, **kwargs):
"""Turn the device on."""
try:
self._s20.on = True
except self._exc:
_LOGGER.exception("Error while turning on S20")
def turn_off(self, **kwargs):
"""Turn the device off."""
try:
self._s20.on = False
except self._exc:
_LOGGER.exception("Error while turning off S20")
| apache-2.0 |
bluesabre/melodius | melodius/PreferencesMelodiusDialog.py | 1 | 8596 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2012 Sean Davis <[email protected]>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
# This is your preferences dialog.
#
# Define your preferences in
# data/glib-2.0/schemas/net.launchpad.melodius.gschema.xml
# See http://developer.gnome.org/gio/stable/GSettings.html for more info.
from gi.repository import GObject, Gio, Gtk, Notify # pylint: disable=E0611
import locale
from locale import gettext as _
locale.textdomain('melodius')
import logging
logger = logging.getLogger('melodius')
from melodius_lib.PreferencesDialog import PreferencesDialog
from . import MelodiusLibrary
class PreferencesMelodiusDialog(PreferencesDialog):
__gtype_name__ = "PreferencesMelodiusDialog"
__gsignals__ = {
'library_updated': (GObject.SIGNAL_RUN_FIRST, None,
(bool,)),
'show_preview_notification': (GObject.SIGNAL_RUN_FIRST, None,
(bool,))
}
def finish_initializing(self, builder): # pylint: disable=E1002
"""Set up the preferences dialog"""
super(PreferencesMelodiusDialog, self).finish_initializing(builder)
# Library Settings
self.library_treeview = self.builder.get_object('library_treeview')
column = self.library_treeview.get_column(0)
self.library_treeview.append_column( column)
cell = Gtk.CellRendererText()
column.pack_start(cell, True)
column.add_attribute(cell, 'text', 0)
self.library_toolbar = self.builder.get_object('library_toolbar')
context = self.library_toolbar.get_style_context()
context.add_class("inline-toolbar")
self.library_stats = self.builder.get_object('library_stats')
# Notification Settings
self.show_notifications = self.builder.get_object("checkbutton_show_notifications")
self.preview_image = self.builder.get_object("preview_image")
self.preview_primary_message = self.builder.get_object("preview_primary_message")
self.preview_secondary_message = self.builder.get_object("preview_secondary_message")
self.notification_settings = self.builder.get_object("box_notification_settings")
self.notifications_coverart = self.builder.get_object("notifications_coverart")
self.notifications_primary = self.builder.get_object("notifications_primary")
self.notifications_secondary = self.builder.get_object("notifications_secondary")
# Bind each preference widget to gsettings
self.settings = Gio.Settings("net.launchpad.melodius")
model = self.library_treeview.get_model()
for folder in self.settings['folders']:
model.append([folder])
self.library = MelodiusLibrary.MelodiusLibrary()
self.show_notifications.set_active( self.settings["show-notifications"] )
self.notifications_coverart.set_active( self.settings["show-coverart"] )
self.notifications_primary.set_text( self.settings["primary-message"] )
self.notifications_secondary.set_text( self.settings["secondary-message"] )
#widget = self.builder.get_object('example_entry')
#settings.bind("example", widget, "text", Gio.SettingsBindFlags.DEFAULT)
# Initialize notification previews
Notify.init("melodius-preview")
def on_toolbutton_library_add_clicked(self, widget):
dialog = Gtk.FileChooserDialog(title=_("Add a folder to the library"), parent=self, action=Gtk.FileChooserAction.SELECT_FOLDER, buttons=(Gtk.STOCK_CANCEL,Gtk.ResponseType.CANCEL,Gtk.STOCK_ADD,Gtk.ResponseType.OK))
dialog.set_select_multiple(True)
dialog.show()
response = dialog.run()
dialog.hide()
if response == Gtk.ResponseType.OK:
model = self.library_treeview.get_model()
existing = []
iter = model.get_iter_first()
while iter:
existing.append( model.get_value(iter, 0) )
iter = model.iter_next(iter)
for folder in dialog.get_filenames():
if folder not in existing:
model.append([folder])
self.library.add_folder(folder)
self.on_prefs_library_updated()
def on_toolbutton_library_remove_clicked(self, widget):
sel = self.library_treeview.get_selection()
store, path = sel.get_selected_rows()
folder = store[path][0]
iter = store.get_iter( path[0] )
store.remove(iter)
self.library.remove_folder(folder)
self.on_prefs_library_updated()
def on_prefs_library_updated(self):
model = self.library_treeview.get_model()
folders = []
iter = model.get_iter_first()
while iter:
folders.append( model.get_value(iter, 0) )
iter = model.iter_next(iter)
folders.sort()
model.clear()
for folder in folders:
model.append([folder])
self.settings['folders'] = folders
self.library = MelodiusLibrary.MelodiusLibrary()
self.library_stats.set_label(_('<i>%i songs in library. %s total playtime.</i>') % (len(self.library), '0:00:00'))
self.emit("library_updated", len(self.library))
def on_checkbutton_show_notifications_toggled(self, widget):
"""Toggle the notification settings editable"""
self.settings["show-notifications"] = widget.get_active()
self.notification_settings.set_sensitive(widget.get_active())
def on_notifications_coverart_toggled(self, widget):
self.preview_image.set_visible(widget.get_active())
self.settings["show-coverart"] = widget.get_active()
def on_notifications_primary_changed(self, widget):
"""Update the primary message preview"""
text = widget.get_text()
self.settings["primary-message"] = text
text = text.replace("<", "<").replace(">", ">")
text = text.replace("%s", _("Song Title"))
text = text.replace("%a", _("Song Artist"))
text = text.replace("%l", _("Song Album"))
self.preview_primary_message.set_markup("<b>%s</b>" % text)
def on_notifications_secondary_changed(self, widget):
"""Update the secondary message preview"""
text = widget.get_text()
self.settings["secondary-message"] = text
text = text.replace("%s", _("Song Title"))
text = text.replace("%a", _("Song Artist"))
text = text.replace("%l", _("Song Album"))
self.preview_secondary_message.set_markup(text)
def on_button_preview_clicked(self, widget):
"""Show a notification preview"""
primary = self.notifications_primary.get_text()
primary = primary.replace("<", "<").replace(">", ">")
primary = primary.replace("%s", _("Song Title"))
primary = primary.replace("%a", _("Song Artist"))
primary = primary.replace("%l", _("Song Album"))
secondary = self.notifications_secondary.get_text()
secondary = secondary.replace("<", "<").replace(">", ">")
secondary = secondary.replace("%s", _("Song Title"))
secondary = secondary.replace("%a", _("Song Artist"))
secondary = secondary.replace("%l", _("Song Album"))
if self.notifications_coverart.get_active():
notification = Notify.Notification.new (primary,secondary,"audio-player")
else:
notification = Notify.Notification.new (primary,secondary,None)
notification.show ()
def on_notifications_revert_clicked(self, widget):
"""Revert notification settings to defaults."""
self.notifications_coverart.set_active(True)
self.notifications_primary.set_text("%s")
self.notifications_secondary.set_text("by %a on %l")
| gpl-3.0 |
corvorepack/REPOIVAN | plugin.video.tv.astra.vip/resources/regex/dinozap.py | 3 | 4487 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# TV Ultra 7K Regex de Dinozap
# Version 0.1 (17.10.2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
import os
import urllib
import urllib2
import shutil
import zipfile
import time
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools, scrapertools
import sys,traceback,urllib2,re
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
def dinozap0(params):
plugintools.log('[%s %s] Initializing Businessapp regex... %s' % (addonName, addonVersion, repr(params)))
url_user = {}
# Construimos diccionario...
url = params.get("url")
url_extracted = url.split(" ")
for entry in url_extracted:
if entry.startswith("rtmp"):
entry = entry.replace("rtmp=", "")
url_user["rtmp"]=entry
elif entry.startswith("playpath"):
entry = entry.replace("playpath=", "")
url_user["playpath"]=entry
elif entry.startswith("swfUrl"):
entry = entry.replace("swfUrl=", "")
url_user["swfurl"]=entry
elif entry.startswith("pageUrl"):
entry = entry.replace("pageUrl=", "")
url_user["pageurl"]=entry
elif entry.startswith("token"):
entry = entry.replace("token=", "")
url_user["token"]=entry
elif entry.startswith("referer"):
entry = entry.replace("referer=", "")
url_user["referer"]=entry
url = url_user.get("pageurl")
ref = 'http://www.dinozap.info/'
body='';body=gethttp_referer_headers(url,ref)
reff=url;url=plugintools.find_single_match(body,'iframe\ssrc="([^"]+)');
for i in range(1,10):
k=url;body=gethttp_referer_headers(url,reff);
scrpt='document\.write\(unescape\(\'([^\']+)';scrpt=plugintools.find_single_match(body,scrpt)
tok='securetoken([^\n]+)';tok=plugintools.find_single_match(body,tok);
try: hidd='type="hidden"\sid="([^"]+)"\svalue="([^"]*)';hidd=plugintools.find_multiple_matches(body,hidd);
except: i-=1;
diov='var\s(sUrl|cod1)\s=\s\'([^\']+)';diov=plugintools.find_multiple_matches(body,diov);#print diov;
Epoc_mil=str(int(time.time()*1000));EpocTime=str(int(time.time()));jquery = '%s?callback=jQuery17049106340911455604_%s&v_cod1=%s&v_cod2=%s&_=%s';
jurl=jquery%(hidd[3][1].decode('base64'),Epoc_mil,urllib.quote_plus(hidd[1][1]),urllib.quote_plus(hidd[2][1]),Epoc_mil);r='"result\d{1}":"([^"]+)';p='plugintools.find_multiple_matches(body,r)';
body=gethttp_referer_headers(jurl,k);x=eval(p)[0];print jurl
if x=='not_found': print 'try '+str(i)+' : '+x;
else: print 'try '+str(i)+' : OK :)';break;
if x=='not_found': eval(nolink);sys.exit();
swfUrl='http://www.businessapp1.pw/jwplayer5/addplayer/jwplayer.flash.swf';app=plugintools.find_single_match(eval(p)[1].replace('\\',''),'1735\/([^"]+)'); q='%s app=%s playpath=%s flashver=WIN%5C2017,0,0,134 swfUrl=%s swfVfy=1 pageUrl=%s live=1 timeout=15';#dzap,tvdirecto
w=eval(p)[1].replace('\\','')+' app='+app+' playpath='+eval(p)[0]+' flashver=WIN%5C2017,0,0,134 swfUrl='+swfUrl+' swfVfy=1 pageUrl='+k+' live=1 timeout=15'
if w: plugintools.play_resolved_url(w);sys.exit();
else: eval(nolink);sys.exit();
def gethttp_referer_headers(url,ref):
plugintools.log("url= "+url)
plugintools.log("ref= "+ref)
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0"])
request_headers.append(["Referer", ref])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
plugintools.log("body= "+body)
return body
def gethttp_headers(url):
plugintools.log("url= "+url)
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
plugintools.log("body= "+body)
return body
| gpl-2.0 |
agentfog/qiime | tests/test_pick_rep_set.py | 15 | 20044 | #!/usr/bin/env python
"""Tests of code for representative set picking"""
__author__ = "Rob Knight"
__copyright__ = "Copyright 2011, The QIIME Project"
# remember to add yourself if you make changes
__credits__ = ["Rob Knight", "Kyle Bittinger", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Daniel McDonald"
__email__ = "[email protected]"
from os import remove, close
from tempfile import mkstemp
from unittest import TestCase, main
from skbio.util import remove_files
from skbio.parse.sequences import parse_fasta
from skbio.alignment import SequenceCollection
from skbio.sequence import DNA
from qiime.pick_rep_set import (RepSetPicker, GenericRepSetPicker, first_id,
first, random_id, longest_id, unique_id_map, label_to_name,
make_most_abundant, parse_fasta, ReferenceRepSetPicker)
class RepSetPickerTests(TestCase):
"""Tests of the abstract RepSetPicker class"""
def test_init(self):
"""Abstract RepSetPicker __init__ should store name, params"""
p = RepSetPicker({})
self.assertEqual(p.Name, 'RepSetPicker')
self.assertEqual(p.Params, {})
def test_call(self):
"""Abstract RepSetPicker __call__ should raise NotImplementedError"""
p = RepSetPicker({})
self.assertRaises(NotImplementedError, p, '/path/to/seqs',
'/path/to/otus')
class SharedSetupTestCase(TestCase):
"""Wrapper for shared setup stuff"""
def setUp(self):
# create the temporary input files
fd, self.tmp_seq_filepath = mkstemp(prefix='GenericRepSetPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath, 'w')
seq_file.write(dna_seqs)
seq_file.close()
fd, self.tmp_otu_filepath = mkstemp(prefix='GenericRepSetPickerTest_',
suffix='.otu')
close(fd)
otu_file = open(self.tmp_otu_filepath, 'w')
otu_file.write(otus)
otu_file.close()
self.files_to_remove = [self.tmp_seq_filepath, self.tmp_otu_filepath]
self.params = {'Algorithm': 'first', 'ChoiceF': first_id}
def tearDown(self):
remove_files(self.files_to_remove)
class GenericRepSetPickerTests(SharedSetupTestCase):
""" Tests of the generic RepSet picker """
def test_call_default_params(self):
"""GenericRepSetPicker.__call__ returns expected clusters default params"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
exp = {'0': 'R27DLI_4812',
'1': 'U1PLI_7889',
'2': 'W3Cecum_4858',
'3': 'R27DLI_3243',
}
app = GenericRepSetPicker(params={'Algorithm': 'first',
'ChoiceF': first_id})
obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath)
self.assertEqual(obs, exp)
def test_call_wrapped_function(self):
"""GenericRepSetPicker.__call__ returns expected clusters default params"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
exp = {'0': 'R27DLI_4812',
'1': 'U1PLI_7889',
'2': 'W3Cecum_4858',
'3': 'R27DLI_3243',
}
app = GenericRepSetPicker(params={'Algorithm': 'most_abundant',
'ChoiceF': make_most_abundant, 'ChoiceFRequiresSeqs': True})
obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath)
self.assertEqual(obs, exp)
def test_call_output_to_file(self):
"""GenericRepSetPicker.__call__ output to file functions as expected
"""
fd, tmp_result_filepath = mkstemp(
prefix='GenericRepSetPickerTest.test_call_output_to_file_',
suffix='.txt')
close(fd)
app = GenericRepSetPicker(params=self.params)
obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath,
result_path=tmp_result_filepath)
result_file = open(tmp_result_filepath)
result_file_str = result_file.read()
result_file.close()
# remove the result file before running the test, so in
# case it fails the temp file is still cleaned up
remove(tmp_result_filepath)
# compare data in result file to fake expected file
self.assertEqual(result_file_str, rep_seqs_result_file_exp)
# confirm that nothing is returned when result_path is specified
self.assertEqual(obs, None)
def test_call_output_to_file_sorted(self):
"""GenericRepSetPicker.__call__ output to file sorts when requested
"""
fd, tmp_result_filepath = mkstemp(
prefix='GenericRepSetPickerTest.test_call_output_to_file_',
suffix='.txt')
close(fd)
app = GenericRepSetPicker(params=self.params)
obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath,
result_path=tmp_result_filepath, sort_by='seq_id')
result_file = open(tmp_result_filepath)
result_file_str = result_file.read()
result_file.close()
# remove the result file before running the test, so in
# case it fails the temp file is still cleaned up
remove(tmp_result_filepath)
# compare data in result file to fake expected file
self.assertEqual(result_file_str, rep_seqs_result_file_sorted_exp)
# confirm that nothing is returned when result_path is specified
self.assertEqual(obs, None)
def test_call_log_file(self):
"""GenericRepSetPicker.__call__ writes log when expected
"""
fd, tmp_log_filepath = mkstemp(
prefix='GenericRepSetPickerTest.test_call_output_to_file_l_',
suffix='.txt')
close(fd)
fd, tmp_result_filepath = mkstemp(
prefix='GenericRepSetPickerTest.test_call_output_to_file_r_',
suffix='.txt')
close(fd)
app = GenericRepSetPicker(params=self.params)
obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath,
result_path=tmp_result_filepath, log_path=tmp_log_filepath)
log_file = open(tmp_log_filepath)
log_file_str = log_file.read()
log_file.close()
# remove the temp files before running the test, so in
# case it fails the temp file is still cleaned up
remove(tmp_log_filepath)
remove(tmp_result_filepath)
log_file_exp = ["GenericRepSetPicker parameters:",
'Algorithm:first',
"Application:None",
'ChoiceF:first',
'ChoiceFRequiresSeqs:False',
"Result path: %s" % tmp_result_filepath, ]
# compare data in log file to fake expected log file
for i, j in zip(log_file_str.splitlines(), log_file_exp):
if not i.startswith('ChoiceF:'): # can't test, different each time
self.assertEqual(i, j)
class ReferenceRepSetPickerTests(SharedSetupTestCase):
"""Tests of the ReferenceRepSetPickerclass """
def setUp(self):
# create the temporary input files
fd, self.tmp_seq_filepath = mkstemp(
prefix='ReferenceRepSetPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath, 'w')
seq_file.write(dna_seqs)
seq_file.close()
fd, self.ref_seq_filepath = mkstemp(
prefix='ReferenceRepSetPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.ref_seq_filepath, 'w')
seq_file.write(reference_seqs)
seq_file.close()
fd, self.tmp_otu_filepath = mkstemp(
prefix='ReferenceRepSetPickerTest_',
suffix='.otu')
close(fd)
otu_file = open(self.tmp_otu_filepath, 'w')
otu_file.write(otus_w_ref)
otu_file.close()
fd, self.result_filepath = mkstemp(
prefix='ReferenceRepSetPickerTest_',
suffix='.fasta')
close(fd)
otu_file = open(self.result_filepath, 'w')
otu_file.write(otus_w_ref)
otu_file.close()
self.files_to_remove = [self.tmp_seq_filepath,
self.tmp_otu_filepath,
self.ref_seq_filepath,
self.result_filepath]
self.params = {'Algorithm': 'first', 'ChoiceF': first_id}
def test_call_default_params(self):
"""ReferenceRepSetPicker.__call__ expected clusters default params"""
exp = {'0': ('R27DLI_4812', 'CTGGGCCGTATCTC'),
'ref1': ('ref1', 'GGGGGGGAAAAAAAAAAAAA'),
'2': ('W3Cecum_4858', 'TTGGGCCGTGTCTCAGT'),
'ref0': ('ref0', 'CCCAAAAAAATTTTTT'),
}
app = ReferenceRepSetPicker(params={'Algorithm': 'first',
'ChoiceF': first_id})
obs = app(self.tmp_seq_filepath,
self.tmp_otu_filepath,
self.ref_seq_filepath)
self.assertEqual(obs, exp)
def test_call_write_to_file(self):
"""ReferenceRepSetPicker.__call__ otu map correctly written to file"""
app = ReferenceRepSetPicker(params={'Algorithm': 'first',
'ChoiceF': first_id})
app(self.tmp_seq_filepath,
self.tmp_otu_filepath,
self.ref_seq_filepath,
result_path=self.result_filepath)
with open(self.result_filepath) as f:
actual = SequenceCollection.from_fasta_records(parse_fasta(f), DNA)
expected = SequenceCollection.from_fasta_records(
parse_fasta(rep_seqs_reference_result_file_exp.split('\n')), DNA)
# we don't care about order in the results
self.assertEqual(set(actual), set(expected))
def test_non_ref_otus(self):
"""ReferenceRepSetPicker.__call__ same result as Generic when no ref otus
"""
exp = {'0': ('R27DLI_4812', 'CTGGGCCGTATCTC'),
'1': ('U1PLI_7889', 'TTGGACCGTG'),
'2': ('W3Cecum_4858', 'TTGGGCCGTGTCTCAGT'),
'3': ('R27DLI_3243', 'CTGGACCGTGTCT')}
fd, tmp_otu_filepath = mkstemp(
prefix='ReferenceRepSetPickerTest_',
suffix='.otu')
close(fd)
otu_file = open(tmp_otu_filepath, 'w')
otu_file.write(otus)
otu_file.close()
self.files_to_remove.append(tmp_otu_filepath)
app = ReferenceRepSetPicker(params={'Algorithm': 'first',
'ChoiceF': first_id})
obs = app(self.tmp_seq_filepath,
tmp_otu_filepath,
self.ref_seq_filepath)
self.assertEqual(obs, exp)
def test_call_invalid_id(self):
"""ReferenceRepSetPicker.__call__ expected clusters default params"""
app = ReferenceRepSetPicker(params={'Algorithm': 'first',
'ChoiceF': first_id})
fd, tmp_otu_filepath = mkstemp(
prefix='ReferenceRepSetPickerTest_',
suffix='.otu')
close(fd)
otu_file = open(tmp_otu_filepath, 'w')
# replace a valid sequence identifier with an invalid
# sequence identifier (i.e., one that we don't have a sequence for)
otu_file.write(otus_w_ref.replace('R27DLI_4812', 'bad_seq_identifier'))
otu_file.close()
self.files_to_remove.append(tmp_otu_filepath)
# returning in dict
self.assertRaises(KeyError,
app,
self.tmp_seq_filepath,
tmp_otu_filepath,
self.ref_seq_filepath)
# writing to file
self.assertRaises(KeyError,
app,
self.tmp_seq_filepath,
tmp_otu_filepath,
self.ref_seq_filepath,
result_path=self.result_filepath)
def test_call_ref_only(self):
"""ReferenceRepSetPicker.__call__ functions with no non-refseqs"""
fd, tmp_otu_filepath = mkstemp(
prefix='ReferenceRepSetPickerTest_',
suffix='.otu')
close(fd)
otu_file = open(tmp_otu_filepath, 'w')
otu_file.write(otus_all_ref)
otu_file.close()
self.files_to_remove.append(tmp_otu_filepath)
exp = {'ref1': ('ref1', 'GGGGGGGAAAAAAAAAAAAA'),
'ref0': ('ref0', 'CCCAAAAAAATTTTTT')}
# passing only reference (not input seqs)
app = ReferenceRepSetPicker(params={'Algorithm': 'first',
'ChoiceF': first_id})
obs = app(None,
tmp_otu_filepath,
self.ref_seq_filepath)
self.assertEqual(obs, exp)
# passing reference and input seqs
app = ReferenceRepSetPicker(params={'Algorithm': 'first',
'ChoiceF': first_id})
obs = app(self.tmp_seq_filepath,
tmp_otu_filepath,
self.ref_seq_filepath)
self.assertEqual(obs, exp)
def test_call_alt_non_ref_picker(self):
"""ReferenceRepSetPicker.__call__ handles alt non-ref picking method"""
exp = {'0': ('U1PLI_9526', 'CTGGGCCGTATCTCAGTCCCAATGTGGCCGGTCG'
'GTCTCTCAACCCGGCTACCCATCGCGGGCTAGGTGGGCCGTT'
'ACCCCGCCTACTACCTAATGGGCCGCGACCCCATCCCTTGCCGTCTGGGC'
'TTTCCCGGGCCCCCCAGGAGGGGGGCGAGGAGTATCCGGTATTAGCCTCGGTT'
'TCCCAAGGTTGTCCCGGAGCAAGGGGCAGGTTGGTCACGTGTTACTCACCCGT'
'TCGCCACTTCATGTCCGCCCGAGGGCGGTTTCATCG'),
'ref1': ('ref1', 'GGGGGGGAAAAAAAAAAAAA'),
'2': ('W3Cecum_4858', 'TTGGGCCGTGTCTCAGT'),
'ref0': ('ref0', 'CCCAAAAAAATTTTTT'),
}
app = ReferenceRepSetPicker(params={'Algorithm': 'longest',
'ChoiceF': longest_id})
obs = app(self.tmp_seq_filepath,
self.tmp_otu_filepath,
self.ref_seq_filepath)
self.assertEqual(obs, exp)
class TopLevelTests(SharedSetupTestCase):
"""Tests of top-level functions"""
def test_first(self):
"""first should always return first item"""
vals = [3, 4, 2]
self.assertEqual(first(vals), 3)
vals.reverse()
self.assertEqual(first(vals), 2)
def test_first_id(self):
"""first_id should return first id from list"""
ids = \
"R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969".split(
)
self.assertEqual(first_id(ids, {}), 'R27DLI_4812')
def test_random_id(self):
"""random_id should return random id from list"""
ids = \
"R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969".split(
)
assert random_id(ids, {}) in ids
# just test we got something from the list, don't add stochastic test
def test_longest_id(self):
"""longest_id should return id associated with longest seq"""
ids = \
"R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969".split(
)
seqs = dict(parse_fasta(dna_seqs.splitlines(),
label_to_name=label_to_name))
self.assertEqual(longest_id(ids, seqs), 'U1PLI_403')
def test_unique_id_map(self):
"""unique_id_map should return map of seqs:unique representatives"""
seqs = {'a': 'AG', 'b': 'AG', 'c': 'CC', 'd': 'CT'}
obs = unique_id_map(seqs)
exp = {'c': ['c'], 'd': ['d'], 'a': ['a', 'b'], 'b': ['a', 'b']}
# can't predict if a or b
for k in obs:
assert obs[k] in exp[k]
def test_make_most_abundant(self):
"""make_most_abundant should return function with correct behavior"""
ids = \
"R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969".split(
)
seqs = dict(parse_fasta(dna_seqs.splitlines(),
label_to_name=label_to_name))
f = make_most_abundant(seqs)
result = f(ids, seqs)
assert result in ['R27DLI_4812', 'R27DLI_727', 'U1PLI_8969']
dna_seqs = """>R27DLI_4812 FMSX0OV01EIYV5 orig_bc=CTTGATGCGTAT new_bc=CTTGATGCGTAT bc_diffs=0
CTGGGCCGTATCTC
>R27DLI_600 FMSX0OV01D110Y orig_bc=CTTGATGCGTAT new_bc=CTTGATGCGTAT bc_diffs=0
CTGGGCCGTATCTCA
>R27DLI_727 FMSX0OV01D5X55 orig_bc=CTTGATGCGTAT new_bc=CTTGATGCGTAT bc_diffs=0
CTGGGCCGTATCTC
>U1PLI_403 FMSX0OV01DVG99 orig_bc=TACAGATGGCTC new_bc=TACAGATGGCTC bc_diffs=0
CTGGGCCGTATCTCAGTCCCAA
>U1PLI_8969 FMSX0OV01ARWY7 orig_bc=TACAGATGGCTC new_bc=TACAGATGGCTC bc_diffs=0
CTGGGCCGTATCTC
>U1PLI_9080 FMSX0OV01C9JUX orig_bc=TACAGATGGCTC new_bc=TACAGATGGCTC bc_diffs=0
CTGGGCCG
>U1PLI_9526 FMSX0OV01EUN7B orig_bc=TACAGATGGCTC new_bc=TACAGATGGCTC bc_diffs=0
CTGGGCCGTATCTCAGTCCCAATGTGGCCGGTCGGTCTCTCAACCCGGCTACCCATCGCGGGCTAGGTGGGCCGTTACCCCGCCTACTACCTAATGGGCCGCGACCCCATCCCTTGCCGTCTGGGCTTTCCCGGGCCCCCCAGGAGGGGGGCGAGGAGTATCCGGTATTAGCCTCGGTTTCCCAAGGTTGTCCCGGAGCAAGGGGCAGGTTGGTCACGTGTTACTCACCCGTTCGCCACTTCATGTCCGCCCGAGGGCGGTTTCATCG
>W3Cecum_6642 FMSX0OV01CW7FI orig_bc=GATACGTCCTGA new_bc=GATACGTCCTGA bc_diffs=0
CTGGGCCGTATCTCAGT
>W3Cecum_8992 FMSX0OV01C3YXK orig_bc=GATACGTCCTGA new_bc=GATACGTCCTGA bc_diffs=0
CTGGGCCGTGTCTC
>U1PLI_7889 FMSX0OV01C6HRL orig_bc=TACAGATGGCTC new_bc=TACAGATGGCTC bc_diffs=0
TTGGACCGTG
>W3Cecum_4858 FMSX0OV01BX4KM orig_bc=GATACGTCCTGA new_bc=GATACGTCCTGA bc_diffs=0
TTGGGCCGTGTCTCAGT
>R27DLI_3243 FMSX0OV01DH41R orig_bc=CTTGATGCGTAT new_bc=CTTGATGCGTAT bc_diffs=0
CTGGACCGTGTCT
>R27DLI_4562 FMSX0OV01EJKLT orig_bc=CTTGATGCGTAT new_bc=CTTGATGCGTAT bc_diffs=0
CTGGACCGTGTCT
>R27DLI_6828 FMSX0OV01BCWTL orig_bc=CTTGATGCGTAT new_bc=CTTGATGCGTAT bc_diffs=0
CTGGACCGTGTCT
>R27DLI_9097 FMSX0OV01APUV6 orig_bc=CTTGATGCGTAT new_bc=CTTGATGCGTAT bc_diffs=0
CTGGACCGTGTCT
>U1PLI_2780 FMSX0OV01E2K1S orig_bc=TACAGATGGCTC new_bc=TACAGATGGCTC bc_diffs=0
CTGGACCGTGTCTC
>U1PLI_67 FMSX0OV01DO1NS orig_bc=TACAGATGGCTC new_bc=TACAGATGGCTC bc_diffs=0
CTGGACCGTGT
>U9PSI_10475 FMSX0OV01BB4Q3 orig_bc=GATAGCTGTCTT new_bc=GATAGCTGTCTT bc_diffs=0
CTGGACCGTGTCTC
>U9PSI_4341 FMSX0OV01B8SXV orig_bc=GATAGCTGTCTT new_bc=GATAGCTGTCTT bc_diffs=0
CTGGACCGTGTCT
>W3Cecum_5191 FMSX0OV01BMU6R orig_bc=GATACGTCCTGA new_bc=GATACGTCCTGA bc_diffs=0
CTGGACCGTGTCT
"""
otus = """0 R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969 U1PLI_9080 U1PLI_9526 W3Cecum_6642 W3Cecum_8992
1 U1PLI_7889
2 W3Cecum_4858
3 R27DLI_3243 R27DLI_4562 R27DLI_6828 R27DLI_9097 U1PLI_2780 U1PLI_67 U9PSI_10475 U9PSI_4341 W3Cecum_5191
"""
rep_seqs_result_file_exp = """>0 R27DLI_4812
CTGGGCCGTATCTC
>1 U1PLI_7889
TTGGACCGTG
>2 W3Cecum_4858
TTGGGCCGTGTCTCAGT
>3 R27DLI_3243
CTGGACCGTGTCT
"""
rep_seqs_result_file_sorted_exp = """>3 R27DLI_3243
CTGGACCGTGTCT
>0 R27DLI_4812
CTGGGCCGTATCTC
>2 W3Cecum_4858
TTGGGCCGTGTCTCAGT
>1 U1PLI_7889
TTGGACCGTG
"""
otus_w_ref = """0 R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969 U1PLI_9080 U1PLI_9526 W3Cecum_6642 W3Cecum_8992
ref1 U1PLI_7889
2 W3Cecum_4858
ref0 R27DLI_3243 R27DLI_4562 R27DLI_6828 R27DLI_9097 U1PLI_2780 U1PLI_67 U9PSI_10475 U9PSI_4341 W3Cecum_5191
"""
otus_all_ref = """ref1 U1PLI_7889
ref0 R27DLI_3243 R27DLI_4562 R27DLI_6828 R27DLI_9097 U1PLI_2780 U1PLI_67 U9PSI_10475 U9PSI_4341 W3Cecum_5191
"""
reference_seqs = """>ref0
CCCAAAAAAATTTTTT
>ref1 some comment
GGGGGGGAAAAAAAAAAAAA
>ref2
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCAAAA
"""
rep_seqs_reference_result_file_exp = """>0 R27DLI_4812
CTGGGCCGTATCTC
>ref1 ref1
GGGGGGGAAAAAAAAAAAAA
>2 W3Cecum_4858
TTGGGCCGTGTCTCAGT
>ref0 ref0
CCCAAAAAAATTTTTT
"""
# run unit tests if run from command-line
if __name__ == '__main__':
main()
| gpl-2.0 |
TheoChevalier/bedrock | bedrock/privacy/views.py | 5 | 3689 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
from django.views.decorators.cache import cache_page
from commonware.response.decorators import xframe_allow
from bs4 import BeautifulSoup
from lib import l10n_utils
from bedrock.legal_docs.views import LegalDocView, load_legal_doc
HN_PATTERN = re.compile(r'^h(\d)$')
HREF_PATTERN = re.compile(r'^https?\:\/\/www\.mozilla\.org')
def process_legal_doc(content):
"""
Load a static Markdown file and return the document as a BeautifulSoup
object for easier manipulation.
:param content: HTML Content of the legal doc.
"""
soup = BeautifulSoup(content)
# Manipulate the markup
for section in soup.find_all('section'):
level = 0
header = soup.new_tag('header')
div = soup.new_tag('div')
section.insert(0, header)
section.insert(1, div)
# Append elements to <header> or <div>
for tag in section.children:
if not tag.name:
continue
match = HN_PATTERN.match(tag.name)
if match:
header.append(tag)
level = int(match.group(1))
if tag.name == 'p':
(header if level == 1 else div).append(tag)
if tag.name in ['ul', 'hr']:
div.append(tag)
if level > 3:
section.parent.div.append(section)
# Remove empty <div>s
if len(div.contents) == 0:
div.extract()
# Convert the site's full URLs to absolute paths
for link in soup.find_all(href=HREF_PATTERN):
link['href'] = HREF_PATTERN.sub('', link['href'])
# Return the HTML fragment as a BeautifulSoup object
return soup
class PrivacyDocView(LegalDocView):
def get_legal_doc(self):
doc = super(PrivacyDocView, self).get_legal_doc()
doc['content'] = process_legal_doc(doc['content'])
return doc
firefox_notices = PrivacyDocView.as_view(
template_name='privacy/notices/firefox.html',
legal_doc_name='firefox_privacy_notice')
firefox_os_notices = PrivacyDocView.as_view(
template_name='privacy/notices/firefox-os.html',
legal_doc_name='firefox_os_privacy_notice')
firefox_cloud_notices = PrivacyDocView.as_view(
template_name='privacy/notices/firefox-cloud.html',
legal_doc_name='firefox_cloud_services_PrivacyNotice')
firefox_hello_notices = PrivacyDocView.as_view(
template_name='privacy/notices/firefox-hello.html',
legal_doc_name='WebRTC_PrivacyNotice')
firefox_focus_notices = PrivacyDocView.as_view(
template_name='privacy/notices/firefox-focus.html',
legal_doc_name='focus_privacy_notice')
thunderbird_notices = PrivacyDocView.as_view(
template_name='privacy/notices/thunderbird.html',
legal_doc_name='thunderbird_privacy_policy')
websites_notices = PrivacyDocView.as_view(
template_name='privacy/notices/websites.html',
legal_doc_name='websites_privacy_notice')
facebook_notices = PrivacyDocView.as_view(
template_name='privacy/notices/facebook.html',
legal_doc_name='facebook_privacy_info')
facebook_notices = xframe_allow(facebook_notices)
@cache_page(60 * 60) # cache for 1 hour
def privacy(request):
doc = load_legal_doc('mozilla_privacy_policy', l10n_utils.get_locale(request))
template_vars = {
'doc': process_legal_doc(doc['content']),
'localized': doc['localized'],
'translations': doc['translations'],
}
return l10n_utils.render(request, 'privacy/index.html', template_vars)
| mpl-2.0 |
yatinkumbhare/openstack-nova | nova/tests/unit/matchers.py | 14 | 19003 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Matcher classes to be used inside of the testtools assertThat framework."""
import pprint
import StringIO
from lxml import etree
from testtools import content
class DictKeysMismatch(object):
def __init__(self, d1only, d2only):
self.d1only = d1only
self.d2only = d2only
def describe(self):
return ('Keys in d1 and not d2: %(d1only)s.'
' Keys in d2 and not d1: %(d2only)s' %
{'d1only': self.d1only, 'd2only': self.d2only})
def get_details(self):
return {}
class DictMismatch(object):
def __init__(self, key, d1_value, d2_value):
self.key = key
self.d1_value = d1_value
self.d2_value = d2_value
def describe(self):
return ("Dictionaries do not match at %(key)s."
" d1: %(d1_value)s d2: %(d2_value)s" %
{'key': self.key, 'd1_value': self.d1_value,
'd2_value': self.d2_value})
def get_details(self):
return {}
class DictMatches(object):
def __init__(self, d1, approx_equal=False, tolerance=0.001):
self.d1 = d1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictMatches(%s)' % (pprint.pformat(self.d1))
# Useful assertions
def match(self, d2):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
d1keys = set(self.d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = sorted(d1keys - d2keys)
d2only = sorted(d2keys - d1keys)
return DictKeysMismatch(d1only, d2only)
for key in d1keys:
d1value = self.d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= self.tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
matcher = DictMatches(d1value)
did_match = matcher.match(d2value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (d1value, d2value):
continue
elif self.approx_equal and within_tolerance:
continue
elif d1value != d2value:
return DictMismatch(key, d1value, d2value)
class ListLengthMismatch(object):
def __init__(self, len1, len2):
self.len1 = len1
self.len2 = len2
def describe(self):
return ('Length mismatch: len(L1)=%(len1)d != '
'len(L2)=%(len2)d' % {'len1': self.len1, 'len2': self.len2})
def get_details(self):
return {}
class DictListMatches(object):
def __init__(self, l1, approx_equal=False, tolerance=0.001):
self.l1 = l1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictListMatches(%s)' % (pprint.pformat(self.l1))
# Useful assertions
def match(self, l2):
"""Assert a list of dicts are equivalent."""
l1count = len(self.l1)
l2count = len(l2)
if l1count != l2count:
return ListLengthMismatch(l1count, l2count)
for d1, d2 in zip(self.l1, l2):
matcher = DictMatches(d2,
approx_equal=self.approx_equal,
tolerance=self.tolerance)
did_match = matcher.match(d1)
if did_match:
return did_match
class SubDictMismatch(object):
def __init__(self,
key=None,
sub_value=None,
super_value=None,
keys=False):
self.key = key
self.sub_value = sub_value
self.super_value = super_value
self.keys = keys
def describe(self):
if self.keys:
return "Keys between dictionaries did not match"
else:
return("Dictionaries do not match at %s. d1: %s d2: %s"
% (self.key,
self.super_value,
self.sub_value))
def get_details(self):
return {}
class IsSubDictOf(object):
def __init__(self, super_dict):
self.super_dict = super_dict
def __str__(self):
return 'IsSubDictOf(%s)' % (self.super_dict)
def match(self, sub_dict):
"""Assert a sub_dict is subset of super_dict."""
if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())):
return SubDictMismatch(keys=True)
for k, sub_value in sub_dict.items():
super_value = self.super_dict[k]
if isinstance(sub_value, dict):
matcher = IsSubDictOf(super_value)
did_match = matcher.match(sub_value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
if sub_value != super_value:
return SubDictMismatch(k, sub_value, super_value)
class FunctionCallMatcher(object):
def __init__(self, expected_func_calls):
self.expected_func_calls = expected_func_calls
self.actual_func_calls = []
def call(self, *args, **kwargs):
func_call = {'args': args, 'kwargs': kwargs}
self.actual_func_calls.append(func_call)
def match(self):
dict_list_matcher = DictListMatches(self.expected_func_calls)
return dict_list_matcher.match(self.actual_func_calls)
class XMLMismatch(object):
"""Superclass for XML mismatch."""
def __init__(self, state):
self.path = str(state)
self.expected = state.expected
self.actual = state.actual
def describe(self):
return "%(path)s: XML does not match" % {'path': self.path}
def get_details(self):
return {
'expected': content.text_content(self.expected),
'actual': content.text_content(self.actual),
}
class XMLDocInfoMismatch(XMLMismatch):
"""XML version or encoding doesn't match."""
def __init__(self, state, expected_doc_info, actual_doc_info):
super(XMLDocInfoMismatch, self).__init__(state)
self.expected_doc_info = expected_doc_info
self.actual_doc_info = actual_doc_info
def describe(self):
return ("%(path)s: XML information mismatch(version, encoding) "
"expected version %(expected_version)s, "
"expected encoding %(expected_encoding)s; "
"actual version %(actual_version)s, "
"actual encoding %(actual_encoding)s" %
{'path': self.path,
'expected_version': self.expected_doc_info['version'],
'expected_encoding': self.expected_doc_info['encoding'],
'actual_version': self.actual_doc_info['version'],
'actual_encoding': self.actual_doc_info['encoding']})
class XMLTagMismatch(XMLMismatch):
"""XML tags don't match."""
def __init__(self, state, idx, expected_tag, actual_tag):
super(XMLTagMismatch, self).__init__(state)
self.idx = idx
self.expected_tag = expected_tag
self.actual_tag = actual_tag
def describe(self):
return ("%(path)s: XML tag mismatch at index %(idx)d: "
"expected tag <%(expected_tag)s>; "
"actual tag <%(actual_tag)s>" %
{'path': self.path, 'idx': self.idx,
'expected_tag': self.expected_tag,
'actual_tag': self.actual_tag})
class XMLAttrKeysMismatch(XMLMismatch):
"""XML attribute keys don't match."""
def __init__(self, state, expected_only, actual_only):
super(XMLAttrKeysMismatch, self).__init__(state)
self.expected_only = ', '.join(sorted(expected_only))
self.actual_only = ', '.join(sorted(actual_only))
def describe(self):
return ("%(path)s: XML attributes mismatch: "
"keys only in expected: %(expected_only)s; "
"keys only in actual: %(actual_only)s" %
{'path': self.path, 'expected_only': self.expected_only,
'actual_only': self.actual_only})
class XMLAttrValueMismatch(XMLMismatch):
"""XML attribute values don't match."""
def __init__(self, state, key, expected_value, actual_value):
super(XMLAttrValueMismatch, self).__init__(state)
self.key = key
self.expected_value = expected_value
self.actual_value = actual_value
def describe(self):
return ("%(path)s: XML attribute value mismatch: "
"expected value of attribute %(key)s: %(expected_value)r; "
"actual value: %(actual_value)r" %
{'path': self.path, 'key': self.key,
'expected_value': self.expected_value,
'actual_value': self.actual_value})
class XMLTextValueMismatch(XMLMismatch):
"""XML text values don't match."""
def __init__(self, state, expected_text, actual_text):
super(XMLTextValueMismatch, self).__init__(state)
self.expected_text = expected_text
self.actual_text = actual_text
def describe(self):
return ("%(path)s: XML text value mismatch: "
"expected text value: %(expected_text)r; "
"actual value: %(actual_text)r" %
{'path': self.path, 'expected_text': self.expected_text,
'actual_text': self.actual_text})
class XMLUnexpectedChild(XMLMismatch):
"""Unexpected child present in XML."""
def __init__(self, state, tag, idx):
super(XMLUnexpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML unexpected child element <%(tag)s> "
"present at index %(idx)d" %
{'path': self.path, 'tag': self.tag, 'idx': self.idx})
class XMLExpectedChild(XMLMismatch):
"""Expected child not present in XML."""
def __init__(self, state, tag, idx):
super(XMLExpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML expected child element <%(tag)s> "
"not present at index %(idx)d" %
{'path': self.path, 'tag': self.tag, 'idx': self.idx})
class XMLMatchState(object):
"""Maintain some state for matching.
Tracks the XML node path and saves the expected and actual full
XML text, for use by the XMLMismatch subclasses.
"""
def __init__(self, expected, actual):
self.path = []
self.expected = expected
self.actual = actual
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, exc_tb):
self.path.pop()
return False
def __str__(self):
return '/' + '/'.join(self.path)
def node(self, tag, idx):
"""Adds tag and index to the path; they will be popped off when
the corresponding 'with' statement exits.
:param tag: The element tag
:param idx: If not None, the integer index of the element
within its parent. Not included in the path
element if None.
"""
if idx is not None:
self.path.append("%s[%d]" % (tag, idx))
else:
self.path.append(tag)
return self
class XMLMatches(object):
"""Compare XML strings. More complete than string comparison."""
SKIP_TAGS = (etree.Comment, etree.ProcessingInstruction)
def __init__(self, expected, allow_mixed_nodes=False,
skip_empty_text_nodes=True, skip_values=('DONTCARE',)):
self.expected_xml = expected
self.expected = etree.parse(StringIO.StringIO(expected))
self.allow_mixed_nodes = allow_mixed_nodes
self.skip_empty_text_nodes = skip_empty_text_nodes
self.skip_values = set(skip_values)
def __str__(self):
return 'XMLMatches(%r)' % self.expected_xml
def match(self, actual_xml):
actual = etree.parse(StringIO.StringIO(actual_xml))
state = XMLMatchState(self.expected_xml, actual_xml)
expected_doc_info = self._get_xml_docinfo(self.expected)
actual_doc_info = self._get_xml_docinfo(actual)
if expected_doc_info != actual_doc_info:
return XMLDocInfoMismatch(state, expected_doc_info,
actual_doc_info)
result = self._compare_node(self.expected.getroot(),
actual.getroot(), state, None)
if result is False:
return XMLMismatch(state)
elif result is not True:
return result
@staticmethod
def _get_xml_docinfo(xml_document):
return {'version': xml_document.docinfo.xml_version,
'encoding': xml_document.docinfo.encoding}
def _compare_text_nodes(self, expected, actual, state):
expected_text = [expected.text]
expected_text.extend(child.tail for child in expected)
actual_text = [actual.text]
actual_text.extend(child.tail for child in actual)
if self.skip_empty_text_nodes:
expected_text = [text for text in expected_text
if text and not text.isspace()]
actual_text = [text for text in actual_text
if text and not text.isspace()]
if self.skip_values.intersection(
expected_text + actual_text):
return
if self.allow_mixed_nodes:
# lets sort text nodes because they can be mixed
expected_text = sorted(expected_text)
actual_text = sorted(actual_text)
if expected_text != actual_text:
return XMLTextValueMismatch(state, expected_text, actual_text)
def _compare_node(self, expected, actual, state, idx):
"""Recursively compares nodes within the XML tree."""
# Start by comparing the tags
if expected.tag != actual.tag:
return XMLTagMismatch(state, idx, expected.tag, actual.tag)
with state.node(expected.tag, idx):
# Compare the attribute keys
expected_attrs = set(expected.attrib.keys())
actual_attrs = set(actual.attrib.keys())
if expected_attrs != actual_attrs:
expected_only = expected_attrs - actual_attrs
actual_only = actual_attrs - expected_attrs
return XMLAttrKeysMismatch(state, expected_only, actual_only)
# Compare the attribute values
for key in expected_attrs:
expected_value = expected.attrib[key]
actual_value = actual.attrib[key]
if self.skip_values.intersection(
[expected_value, actual_value]):
continue
elif expected_value != actual_value:
return XMLAttrValueMismatch(state, key, expected_value,
actual_value)
# Compare text nodes
text_nodes_mismatch = self._compare_text_nodes(
expected, actual, state)
if text_nodes_mismatch:
return text_nodes_mismatch
# Compare the contents of the node
matched_actual_child_idxs = set()
# first_actual_child_idx - pointer to next actual child
# used with allow_mixed_nodes=False ONLY
# prevent to visit actual child nodes twice
first_actual_child_idx = 0
for expected_child in expected:
if expected_child.tag in self.SKIP_TAGS:
continue
related_actual_child_idx = None
if self.allow_mixed_nodes:
first_actual_child_idx = 0
for actual_child_idx in range(
first_actual_child_idx, len(actual)):
if actual[actual_child_idx].tag in self.SKIP_TAGS:
first_actual_child_idx += 1
continue
if actual_child_idx in matched_actual_child_idxs:
continue
# Compare the nodes
result = self._compare_node(expected_child,
actual[actual_child_idx],
state, actual_child_idx)
first_actual_child_idx += 1
if result is not True:
if self.allow_mixed_nodes:
continue
else:
return result
else: # nodes match
related_actual_child_idx = actual_child_idx
break
if related_actual_child_idx is not None:
matched_actual_child_idxs.add(actual_child_idx)
else:
return XMLExpectedChild(state, expected_child.tag,
actual_child_idx + 1)
# Make sure we consumed all nodes in actual
for actual_child_idx, actual_child in enumerate(actual):
if (actual_child.tag not in self.SKIP_TAGS and
actual_child_idx not in matched_actual_child_idxs):
return XMLUnexpectedChild(state, actual_child.tag,
actual_child_idx)
# The nodes match
return True
| apache-2.0 |
snamstorm/rockstor-core | src/rockstor/smart_manager/views/samba_service.py | 2 | 3041 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import shutil
from rest_framework.response import Response
from storageadmin.util import handle_exception
from system.services import systemctl
from system.samba import (update_global_config, restart_samba)
from django.db import transaction
from django.conf import settings
from base_service import BaseServiceDetailView
from smart_manager.models import Service
from storageadmin.models import SambaShare
from system.osi import md5sum
import logging
logger = logging.getLogger(__name__)
class SambaServiceView(BaseServiceDetailView):
@transaction.commit_on_success
def post(self, request, command):
"""
execute a command on the service
"""
service_name = 'smb'
service = Service.objects.get(name=service_name)
if (command == 'config'):
#nothing to really configure atm. just save the model
try:
config = request.data.get('config', {'workgroup': 'MYGROUP',})
workgroup = config['workgroup']
self._save_config(service, config)
update_global_config(workgroup)
restart_samba(hard=True)
except Exception, e:
e_msg = ('Samba could not be configured. Try again. '
'Exception: %s' % e.__str__())
handle_exception(Exception(e_msg), request)
else:
try:
if (command == 'stop'):
systemctl('smb', 'disable')
systemctl('nmb', 'disable')
else:
systemd_name = '%s.service' % service_name
ss_dest = ('/etc/systemd/system/%s' % systemd_name)
ss_src = ('%s/%s' % (settings.CONFROOT, systemd_name))
sum1 = md5sum(ss_dest)
sum2 = md5sum(ss_src)
if (sum1 != sum2):
shutil.copy(ss_src, ss_dest)
systemctl('smb', 'enable')
systemctl('nmb', 'enable')
systemctl('smb', command)
systemctl('nmb', command)
except Exception, e:
e_msg = ('Failed to %s samba due to a system error: %s' % (command, e.__str__()))
handle_exception(Exception(e_msg), request)
return Response()
| gpl-3.0 |
HWal/paparazzi | sw/supervision/python/lib/environment.py | 33 | 2810 | # Paparazzi center utilities
#
# Copyright (C) 2016 ENAC, Florian BITARD (intern student)
#
# This file is part of paparazzi.
#
# paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
###############################################################################
# [Imports]
import os
from sys import platform as os_name
import logging
###############################################################################
# [Constants]
LOGGER = logging.getLogger("[ENV]")
OS = os_name
SRC_NAME = "PAPARAZZI_SRC"
HERE_TO_SRC = "../../../.."
CONF_NAME = "PAPARAZZI_CONF"
HOME_TO_CONF = "conf"
HOME_NAME = "PAPARAZZI_HOME"
HOME_TO_VAR = "var"
IVY_BUS_NAME = "IVY_BUS"
def get_src_dir(src_name):
current_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.normpath(os.path.join(current_dir, HERE_TO_SRC))
return os.getenv(src_name, src_dir)
PAPARAZZI_SRC = get_src_dir(SRC_NAME)
LOGGER.debug("%s=%s", SRC_NAME, PAPARAZZI_SRC)
def get_home_dir(home_name):
return os.getenv(home_name, get_src_dir(SRC_NAME))
PAPARAZZI_HOME = get_home_dir(HOME_NAME)
LOGGER.info("%s=%s", HOME_NAME, PAPARAZZI_HOME)
def get_conf_dir(conf_name):
conf_dir = os.path.join(PAPARAZZI_HOME, HOME_TO_CONF)
if os.path.exists(conf_dir):
return os.getenv(conf_name, conf_dir)
else:
LOGGER.error("'%s' directory doesn't exist !", conf_dir)
PAPARAZZI_CONF = get_conf_dir(CONF_NAME)
def get_ivy_bus(ivy_bus_name):
supposed_ivy_bus = os.getenv(ivy_bus_name)
if supposed_ivy_bus is not None:
return supposed_ivy_bus
elif OS == 'linux':
return "127.255.255.255:2010"
elif OS == 'linux2':
return "127.255.255.255:2010"
elif OS == 'darwin':
return "224.5.6.7:8910"
LOGGER.error("Unknown Ivy bus for the current OS !")
IVY_BUS = get_ivy_bus(IVY_BUS_NAME)
LOGGER.debug("%s=%s", HOME_NAME, PAPARAZZI_HOME)
RUN_VERSION_EXE_NAME = "paparazzi_version"
RUN_VERSION_EXE = os.path.join(PAPARAZZI_HOME, RUN_VERSION_EXE_NAME)
BUILD_VERSION_FILE_NAME = "build_version.txt"
BUILD_VERSION_FILE = os.path.join(PAPARAZZI_HOME, HOME_TO_VAR,
BUILD_VERSION_FILE_NAME)
| gpl-2.0 |
0Chencc/CTFCrackTools | Lib/test/test_linecache.py | 96 | 4079 | """ Tests for the linecache module """
import linecache
import unittest
import os.path
from test import test_support as support
FILENAME = linecache.__file__
INVALID_NAME = '!@$)(!@#_1'
EMPTY = ''
TESTS = 'inspect_fodder inspect_fodder2 mapping_tests'
TESTS = TESTS.split()
TEST_PATH = os.path.dirname(support.__file__)
MODULES = "linecache abc".split()
MODULE_PATH = os.path.dirname(FILENAME)
SOURCE_1 = '''
" Docstring "
def function():
return result
'''
SOURCE_2 = '''
def f():
return 1 + 1
a = f()
'''
SOURCE_3 = '''
def f():
return 3''' # No ending newline
class LineCacheTests(unittest.TestCase):
def test_getline(self):
getline = linecache.getline
# Bad values for line number should return an empty string
self.assertEqual(getline(FILENAME, 2**15), EMPTY)
self.assertEqual(getline(FILENAME, -1), EMPTY)
# Float values currently raise TypeError, should it?
self.assertRaises(TypeError, getline, FILENAME, 1.1)
# Bad filenames should return an empty string
self.assertEqual(getline(EMPTY, 1), EMPTY)
self.assertEqual(getline(INVALID_NAME, 1), EMPTY)
# Check whether lines correspond to those from file iteration
for entry in TESTS:
filename = os.path.join(TEST_PATH, entry) + '.py'
for index, line in enumerate(open(filename)):
self.assertEqual(line, getline(filename, index + 1))
# Check module loading
for entry in MODULES:
filename = os.path.join(MODULE_PATH, entry) + '.py'
for index, line in enumerate(open(filename)):
self.assertEqual(line, getline(filename, index + 1))
# Check that bogus data isn't returned (issue #1309567)
empty = linecache.getlines('a/b/c/__init__.py')
self.assertEqual(empty, [])
def test_no_ending_newline(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as fp:
fp.write(SOURCE_3)
lines = linecache.getlines(support.TESTFN)
self.assertEqual(lines, ["\n", "def f():\n", " return 3\n"])
def test_clearcache(self):
cached = []
for entry in TESTS:
filename = os.path.join(TEST_PATH, entry) + '.py'
cached.append(filename)
linecache.getline(filename, 1)
# Are all files cached?
cached_empty = [fn for fn in cached if fn not in linecache.cache]
self.assertEqual(cached_empty, [])
# Can we clear the cache?
linecache.clearcache()
cached_empty = [fn for fn in cached if fn in linecache.cache]
self.assertEqual(cached_empty, [])
def test_checkcache(self):
getline = linecache.getline
# Create a source file and cache its contents
source_name = support.TESTFN + '.py'
self.addCleanup(support.unlink, source_name)
with open(source_name, 'w') as source:
source.write(SOURCE_1)
getline(source_name, 1)
# Keep a copy of the old contents
source_list = []
with open(source_name) as source:
for index, line in enumerate(source):
self.assertEqual(line, getline(source_name, index + 1))
source_list.append(line)
with open(source_name, 'w') as source:
source.write(SOURCE_2)
# Try to update a bogus cache entry
linecache.checkcache('dummy')
# Check that the cache matches the old contents
for index, line in enumerate(source_list):
self.assertEqual(line, getline(source_name, index + 1))
# Update the cache and check whether it matches the new source file
linecache.checkcache(source_name)
with open(source_name) as source:
for index, line in enumerate(source):
self.assertEqual(line, getline(source_name, index + 1))
source_list.append(line)
def test_main():
support.run_unittest(LineCacheTests)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
ulule/django-linguist | linguist/mixins.py | 1 | 14692 | # -*- coding: utf-8 -*-
import copy
from contextlib import contextmanager
import django
from django.db.models import Q
from django.db import models
from django.utils.functional import cached_property
from . import utils
from .cache import CachedTranslation
from .helpers import prefetch_translations
if django.VERSION >= (1, 11):
from django.db.models.query import ModelIterable as BaseModelIterable
class ModelIterable(BaseModelIterable):
model = None
def __iter__(self):
for obj in super(ModelIterable, self).__iter__():
if obj and not isinstance(obj, self.queryset.model):
yield obj
continue
utils.set_object_translations_cache(obj, self.queryset)
yield obj
class QuerySetMixin(object):
"""
Linguist QuerySet Mixin.
"""
def __init__(self, *args, **kwargs):
self.init(*args, **kwargs)
super(QuerySetMixin, self).__init__(*args, **kwargs)
if django.VERSION >= (1, 11):
self._iterable_class = ModelIterable
def init(self, *args, **kwargs):
self._prefetched_translations_cache = kwargs.pop(
"_prefetched_translations_cache", []
)
self._prefetch_translations_done = kwargs.pop(
"_prefetch_translations_done", False
)
def _filter_or_exclude(self, negate, args, kwargs):
"""
Overrides default behavior to handle linguist fields.
"""
from .models import Translation
new_args = self.get_cleaned_args(args)
new_kwargs = self.get_cleaned_kwargs(kwargs)
translation_args = self.get_translation_args(args)
translation_kwargs = self.get_translation_kwargs(kwargs)
has_linguist_args = self.has_linguist_args(args)
has_linguist_kwargs = self.has_linguist_kwargs(kwargs)
if translation_args or translation_kwargs:
ids = list(
set(
Translation.objects.filter(
*translation_args, **translation_kwargs
).values_list("object_id", flat=True)
)
)
if ids:
new_kwargs["id__in"] = ids
has_kwargs = has_linguist_kwargs and not (new_kwargs or new_args)
has_args = has_linguist_args and not (new_args or new_kwargs)
# No translations but we looked for translations?
# Returns empty queryset.
if has_kwargs or has_args:
return self._clone().none()
return super(QuerySetMixin, self)._filter_or_exclude(
negate, new_args, new_kwargs
)
def _clone(self, klass=None, setup=False, **kwargs):
if django.VERSION < (1, 9):
kwargs.update({"klass": klass, "setup": setup})
qs = super(QuerySetMixin, self)._clone(**kwargs)
qs._prefetched_translations_cache = self._prefetched_translations_cache
qs._prefetch_translations_done = self._prefetch_translations_done
return qs
def iterator(self):
for obj in super(QuerySetMixin, self).iterator():
if obj and not isinstance(obj, self.model):
yield obj
continue
utils.set_object_translations_cache(obj, self)
yield obj
@cached_property
def concrete_field_names(self):
"""
Returns model concrete field names.
"""
return [f[0].name for f in self._get_concrete_fields_with_model()]
def _get_concrete_fields_with_model(self):
"""For compatibility with Django<=1.10. Replace old
`_meta.get_concrete_fields_with_model`.
https://docs.djangoproject.com/en/1.10/ref/models/meta/
"""
return [
(f, f.model if f.model != self.model else None)
for f in self.model._meta.get_fields()
if f.concrete
and (
not f.is_relation or f.one_to_one or (f.many_to_one and f.related_model)
)
]
@cached_property
def linguist_field_names(self):
"""
Returns linguist field names (example: "title" and "title_fr").
"""
return list(self.model._linguist.fields) + list(
utils.get_language_fields(self.model._linguist.fields)
)
def has_linguist_kwargs(self, kwargs):
"""
Parses the given kwargs and returns True if they contain
linguist lookups.
"""
for k in kwargs:
if self.is_linguist_lookup(k):
return True
return False
def has_linguist_args(self, args):
"""
Parses the given args and returns True if they contain
linguist lookups.
"""
linguist_args = []
for arg in args:
condition = self._get_linguist_condition(arg)
if condition:
linguist_args.append(condition)
return bool(linguist_args)
def get_translation_args(self, args):
"""
Returns linguist args from model args.
"""
translation_args = []
for arg in args:
condition = self._get_linguist_condition(arg, transform=True)
if condition:
translation_args.append(condition)
return translation_args
def get_translation_kwargs(self, kwargs):
"""
Returns linguist lookup kwargs (related to Translation model).
"""
lks = []
for k, v in kwargs.items():
if self.is_linguist_lookup(k):
lks.append(
utils.get_translation_lookup(self.model._linguist.identifier, k, v)
)
translation_kwargs = {}
for lk in lks:
for k, v in lk.items():
if k not in translation_kwargs:
translation_kwargs[k] = v
return translation_kwargs
def is_linguist_lookup(self, lookup):
"""
Returns true if the given lookup is a valid linguist lookup.
"""
field = utils.get_field_name_from_lookup(lookup)
# To keep default behavior with "FieldError: Cannot resolve keyword".
if (
field not in self.concrete_field_names
and field in self.linguist_field_names
):
return True
return False
def _get_linguist_condition(self, condition, reverse=False, transform=False):
"""
Parses Q tree and returns linguist lookups or model lookups
if reverse is True.
"""
# We deal with a node
if isinstance(condition, Q):
children = []
for child in condition.children:
parsed = self._get_linguist_condition(
condition=child, reverse=reverse, transform=transform
)
if parsed is not None:
if (isinstance(parsed, Q) and parsed.children) or isinstance(
parsed, tuple
):
children.append(parsed)
new_condition = copy.deepcopy(condition)
new_condition.children = children
return new_condition
# We are dealing with a lookup ('field', 'value').
lookup, value = condition
is_linguist = self.is_linguist_lookup(lookup)
if transform and is_linguist:
return Q(
**utils.get_translation_lookup(
self.model._linguist.identifier, lookup, value
)
)
if (reverse and not is_linguist) or (not reverse and is_linguist):
return condition
def get_cleaned_args(self, args):
"""
Returns positional arguments for related model query.
"""
if not args:
return args
cleaned_args = []
for arg in args:
condition = self._get_linguist_condition(arg, True)
if condition:
cleaned_args.append(condition)
return cleaned_args
def get_cleaned_kwargs(self, kwargs):
"""
Returns concrete field lookups.
"""
cleaned_kwargs = kwargs.copy()
if kwargs is not None:
for k in kwargs:
if self.is_linguist_lookup(k):
del cleaned_kwargs[k]
return cleaned_kwargs
def with_translations(self, **kwargs):
"""
Prefetches translations.
Takes three optional keyword arguments:
* ``field_names``: ``field_name`` values for SELECT IN
* ``languages``: ``language`` values for SELECT IN
* ``chunks_length``: fetches IDs by chunk
"""
force = kwargs.pop("force", False)
if self._prefetch_translations_done and force is False:
return self
self._prefetched_translations_cache = utils.get_grouped_translations(
self, **kwargs
)
self._prefetch_translations_done = True
return self._clone()
def activate_language(self, language):
"""
Activates the given ``language`` for the QuerySet instances.
"""
utils.activate_language(self, language)
return self
class LinguistQuerySet(QuerySetMixin, models.query.QuerySet):
pass
class ManagerMixin(object):
"""
Linguist Manager Mixin.
"""
def get_queryset(self):
return LinguistQuerySet(self.model)
def with_translations(self, **kwargs):
"""
Proxy for ``QuerySetMixin.with_translations()`` method.
"""
return self.get_queryset().with_translations(**kwargs)
def activate_language(self, language):
"""
Proxy for ``QuerySetMixin.activate_language()`` method.
"""
self.get_queryset().activate_language(language)
class ModelMixin(object):
def prefetch_translations(self, *args, **kwargs):
if not self.pk:
return
prefetch_translations([self], **kwargs)
if args:
fields = [
f for f in self._meta.get_fields(include_hidden=True) if f.name in args
]
for field in fields:
value = getattr(self, field.name, None)
if issubclass(value.__class__, ModelMixin):
value.prefetch_translations()
def populate_missing_translations(self):
for field in self._linguist.fields:
if field in self._linguist.translations:
languages = self._linguist.translations[field]
missing_languages = list(
set(self._linguist.supported_languages) - set(languages.keys())
)
for language in missing_languages:
self._linguist.translations[field][language] = CachedTranslation()
else:
self._linguist.translations[field] = {}
for language in self._linguist.supported_languages:
self._linguist.translations[field][language] = CachedTranslation()
@property
def linguist_identifier(self):
"""
Returns Linguist's identifier for this model.
"""
return self._linguist.identifier
@property
def active_language(self):
return self._linguist.language
@property
def translatable_fields(self):
"""
Returns Linguist's translation class fields (translatable fields).
"""
return self._linguist.fields
@property
def available_languages(self):
"""
Returns available languages.
"""
from .models import Translation
return (
Translation.objects.filter(
identifier=self.linguist_identifier, object_id=self.pk
)
.values_list("language", flat=True)
.distinct()
.order_by("language")
)
@property
def cached_translations_count(self):
"""
Returns cached translations count.
"""
return self._linguist.translations_count
def clear_translations_cache(self):
"""
Clears Linguist cache.
"""
self._linguist.translations.clear()
def get_translations(self, language=None):
"""
Returns available (saved) translations for this instance.
"""
from .models import Translation
if not self.pk:
return Translation.objects.none()
return Translation.objects.get_translations(obj=self, language=language)
def delete_translations(self, language=None):
"""
Deletes related translations.
"""
from .models import Translation
return Translation.objects.delete_translations(obj=self, language=language)
def activate_language(self, language):
"""
Context manager to override the instance language.
"""
self._linguist.language = language
@contextmanager
def override_language(self, language):
"""
Context manager to override the instance language.
"""
previous_language = self._linguist.language
self._linguist.language = language
yield
self._linguist.language = previous_language
def _save_table(
self,
raw=False,
cls=None,
force_insert=False,
force_update=False,
using=None,
update_fields=None,
):
"""
Overwrites model's ``_save_table`` method to save translations after instance
has been saved (required to retrieve the object ID for ``Translation``
model).
Preferred over overriding the object's ``save`` method
to ensure that `pre_save` and ``post_save`` signals happen
respectively before and after the translations have been saved to the database.
Thus ``pre_save`` signals have access to the ``has_changed`` attribute on translated fields
before the translations are saved and the attribute is reset.
And `post_save`` signals always have access to the updated translations.
"""
updated = super(ModelMixin, self)._save_table(
raw=raw,
cls=cls,
force_insert=force_insert,
force_update=force_update,
using=using,
update_fields=update_fields,
)
self._linguist.decider.objects.save_translations([self])
return updated
def get_field_object(self, field_name, language):
return self.__class__.__dict__[
utils.build_localized_field_name(field_name, language)
].field
| mit |
LoHChina/nova | nova/tests/unit/virt/xenapi/image/test_bittorrent.py | 51 | 4959 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mox3 import mox
import six
from nova import context
from nova import test
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi.image import bittorrent
from nova.virt.xenapi import vm_utils
class TestBittorrentStore(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(TestBittorrentStore, self).setUp()
self.store = bittorrent.BittorrentStore()
self.mox = mox.Mox()
self.flags(torrent_base_url='http://foo',
connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.context = context.RequestContext(
'user', 'project', auth_token='foobar')
fake.reset()
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.stubs.Set(
vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
def test_download_image(self):
instance = {'uuid': '00000000-0000-0000-0000-000000007357'}
params = {'image_id': 'fake_image_uuid',
'sr_path': '/fake/sr/path',
'torrent_download_stall_cutoff': 600,
'torrent_listen_port_end': 6891,
'torrent_listen_port_start': 6881,
'torrent_max_last_accessed': 86400,
'torrent_max_seeder_processes_per_host': 1,
'torrent_seed_chance': 1.0,
'torrent_seed_duration': 3600,
'torrent_url': 'http://foo/fake_image_uuid.torrent',
'uuid_stack': ['uuid1']}
self.stubs.Set(vm_utils, '_make_uuid_stack',
lambda *a, **kw: ['uuid1'])
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized(
'bittorrent', 'download_vhd', **params)
self.mox.ReplayAll()
self.store.download_image(self.context, self.session,
instance, 'fake_image_uuid')
self.mox.VerifyAll()
def test_upload_image(self):
self.assertRaises(NotImplementedError, self.store.upload_image,
self.context, self.session, mox.IgnoreArg, 'fake_image_uuid',
['fake_vdi_uuid'])
class LookupTorrentURLTestCase(test.NoDBTestCase):
def setUp(self):
super(LookupTorrentURLTestCase, self).setUp()
self.store = bittorrent.BittorrentStore()
self.image_id = 'fakeimageid'
def test_default_fetch_url_no_base_url_set(self):
self.flags(torrent_base_url=None,
group='xenserver')
exc = self.assertRaises(
RuntimeError, self.store._lookup_torrent_url_fn)
self.assertEqual('Cannot create default bittorrent URL without'
' xenserver.torrent_base_url configuration option'
' set.',
six.text_type(exc))
def test_default_fetch_url_base_url_is_set(self):
self.flags(torrent_base_url='http://foo',
group='xenserver')
lookup_fn = self.store._lookup_torrent_url_fn()
self.assertEqual('http://foo/fakeimageid.torrent',
lookup_fn(self.image_id))
def test_invalid_base_url_warning_logged(self):
self.flags(torrent_base_url='www.foo.com',
group='xenserver')
# Make sure a warning is logged when an invalid base URL is set,
# where invalid means it does not contain any slash characters
warnings = []
def fake_warn(msg):
warnings.append(msg)
self.stubs.Set(bittorrent.LOG, 'warn', fake_warn)
lookup_fn = self.store._lookup_torrent_url_fn()
self.assertEqual('fakeimageid.torrent',
lookup_fn(self.image_id))
self.assertTrue(any('does not contain a slash character' in msg for
msg in warnings),
'_lookup_torrent_url_fn() did not log a warning '
'message when the torrent_base_url did not contain a '
'slash character.')
| apache-2.0 |
PRIMEDesigner15/PRIMEDesigner15 | Test_files/dependencies/Lib/site-packages/pygame/pkgdata.py | 603 | 2146 | """pkgdata is a simple, extensible way for a package to acquire data file
resources.
The getResource function is equivalent to the standard idioms, such as
the following minimal implementation::
import sys, os
def getResource(identifier, pkgname=__name__):
pkgpath = os.path.dirname(sys.modules[pkgname].__file__)
path = os.path.join(pkgpath, identifier)
return file(os.path.normpath(path), mode='rb')
When a __loader__ is present on the module given by __name__, it will defer
getResource to its get_data implementation and return it as a file-like
object (such as StringIO).
"""
__all__ = ['getResource']
import sys
import os
#from cStringIO import StringIO
from io import StringIO
try:
# Try to use setuptools if available.
from pkg_resources import resource_stream
_have_resource_stream = True
except ImportError:
_have_resource_stream = False
def getResource(identifier, pkgname=__name__):
"""Acquire a readable object for a given package name and identifier.
An IOError will be raised if the resource can not be found.
For example::
mydata = getResource('mypkgdata.jpg').read()
Note that the package name must be fully qualified, if given, such
that it would be found in sys.modules.
In some cases, getResource will return a real file object. In that
case, it may be useful to use its name attribute to get the path
rather than use it as a file-like object. For example, you may
be handing data off to a C API.
"""
# Prefer setuptools
if _have_resource_stream:
return resource_stream(pkgname, identifier)
mod = sys.modules[pkgname]
fn = getattr(mod, '__file__', None)
if fn is None:
raise IOError("%r has no __file__!")
path = os.path.join(os.path.dirname(fn), identifier)
loader = getattr(mod, '__loader__', None)
if loader is not None:
try:
data = loader.get_data(path)
except IOError:
pass
else:
return StringIO(data)
#return file(os.path.normpath(path), 'rb')
return open(os.path.normpath(path), 'rb')
| bsd-3-clause |
julian-seward1/servo | tests/wpt/web-platform-tests/css/tools/w3ctestlib/Indexer.py | 26 | 9812 | #!/usr/bin/python
# CSS Test Suite Manipulation Library
# Initial code by fantasai, joint copyright 2010 W3C and Microsoft
# Licensed under BSD 3-Clause: <http://www.w3.org/Consortium/Legal/2008/03-bsd-license>
# Define contains vmethod for Template Toolkit
from template.stash import list_op
@list_op("contains")
def list_contains(l, x):
return x in l
import sys
import re
import os
import codecs
from os.path import join, exists, abspath
from template import Template
import w3ctestlib
from Utils import listfiles, escapeToNamedASCII
from OutputFormats import ExtensionMap
import shutil
class Section:
def __init__(self, uri, title, numstr):
self.uri = uri
self.title = title
self.numstr = numstr
self.tests = []
def __cmp__(self, other):
return cmp(self.natsortkey(), other.natsortkey())
def chapterNum(self):
return self.numstr.partition('.')[0]
def natsortkey(self):
chunks = self.numstr.partition('.#')[0].split('.')
for index in range(len(chunks)):
if chunks[index].isdigit():
# wrap in tuple with '0' to explicitly specify numbers come first
chunks[index] = (0, int(chunks[index]))
else:
chunks[index] = (1, chunks[index])
return (chunks, self.numstr)
class Indexer:
def __init__(self, suite, sections, suites, flags, splitChapter=False, templatePathList=None,
extraData=None, overviewTmplNames=None, overviewCopyExts=('.css', 'htaccess')):
"""Initialize indexer with TestSuite `suite` toc data file
`tocDataPath` and additional template paths in list `templatePathList`.
The toc data file should be list of tab-separated records, one
per line, of each spec section's uri, number/letter, and title.
`splitChapter` selects a single page index if False, chapter
indicies if True.
`extraData` can be a dictionary whose data gets passed to the templates.
`overviewCopyExts` lists file extensions that should be found
and copied from the template path into the main build directory.
The default value is ['.css', 'htaccess'].
`overviewTemplateNames` lists template names that should be
processed from the template path into the main build directory.
The '.tmpl' extension, if any, is stripped from the output filename.
The default value is ['index.htm.tmpl', 'index.xht.tmpl', 'testinfo.data.tmpl']
"""
self.suite = suite
self.splitChapter = splitChapter
self.extraData = extraData
self.overviewCopyExtPat = re.compile('.*(%s)$' % '|'.join(overviewCopyExts))
self.overviewTmplNames = overviewTmplNames if overviewTmplNames is not None \
else ['index.htm.tmpl', 'index.xht.tmpl', 'testinfo.data.tmpl',
'implementation-report-TEMPLATE.data.tmpl']
# Initialize template engine
self.templatePath = [join(w3ctestlib.__path__[0], 'templates')]
if templatePathList:
self.templatePath.extend(templatePathList)
self.templatePath = [abspath(path) for path in self.templatePath]
self.tt = Template({
'INCLUDE_PATH': self.templatePath,
'ENCODING' : 'utf-8',
'PRE_CHOMP' : 1,
'POST_CHOMP' : 0,
})
# Load toc data
self.sections = {}
for uri, numstr, title in sections:
uri = intern(uri.encode('ascii'))
uriKey = intern(self._normalizeScheme(uri))
numstr = escapeToNamedASCII(numstr)
title = escapeToNamedASCII(title) if title else None
self.sections[uriKey] = Section(uri, title, numstr)
self.suites = suites
self.flags = flags
# Initialize storage
self.errors = {}
self.contributors = {}
self.alltests = []
def _normalizeScheme(self, uri):
if (uri and uri.startswith('http:')):
return 'https:' + uri[5:]
return uri
def indexGroup(self, group):
for test in group.iterTests():
data = test.getMetadata()
if data: # Shallow copy for template output
data = dict(data)
data['file'] = '/'.join((group.name, test.relpath)) \
if group.name else test.relpath
if (data['scripttest']):
data['flags'].append(intern('script'))
self.alltests.append(data)
for uri in data['links']:
uri = self._normalizeScheme(uri)
uri = uri.replace(self._normalizeScheme(self.suite.draftroot), self._normalizeScheme(self.suite.specroot))
if self.sections.has_key(uri):
testlist = self.sections[uri].tests.append(data)
for credit in data['credits']:
self.contributors[credit[0]] = credit[1]
else:
self.errors[test.sourcepath] = test.errors
def __writeTemplate(self, template, data, outfile):
o = self.tt.process(template, data)
f = open(outfile, 'w')
f.write(o.encode('utf-8'))
f.close()
def writeOverview(self, destDir, errorOut=sys.stderr, addTests=[]):
"""Write format-agnostic pages such as test suite overview pages,
test data files, and error reports.
Indexed errors are reported to errorOut, which must be either
an output handle such as sys.stderr, a tuple of
(template filename string, output filename string)
or None to suppress error output.
`addTests` is a list of additional test paths, relative to the
overview root; it is intended for indexing raw tests
"""
# Set common values
data = self.extraData.copy()
data['suitetitle'] = self.suite.title
data['suite'] = self.suite.name
data['specroot'] = self.suite.specroot
data['draftroot'] = self.suite.draftroot
data['contributors'] = self.contributors
data['tests'] = self.alltests
data['extmap'] = ExtensionMap({'.xht':'', '.html':'', '.htm':'', '.svg':''})
data['formats'] = self.suite.formats
data['addtests'] = addTests
data['suites'] = self.suites
data['flagInfo'] = self.flags
data['formatInfo'] = { 'html4': { 'report': True, 'path': 'html4', 'ext': 'htm', 'filter': 'nonHTML'},
'html5': { 'report': True, 'path': 'html', 'ext': 'htm', 'filter': 'nonHTML' },
'xhtml1': { 'report': True, 'path': 'xhtml1', 'ext': 'xht', 'filter': 'HTMLonly' },
'xhtml1print': { 'report': False, 'path': 'xhtml1print', 'ext': 'xht', 'filter': 'HTMLonly' },
'svg': { 'report': True, 'path': 'svg', 'ext': 'svg', 'filter': 'HTMLonly' }
}
# Copy simple copy files
for tmplDir in reversed(self.templatePath):
files = listfiles(tmplDir)
for file in files:
if self.overviewCopyExtPat.match(file):
shutil.copy(join(tmplDir, file), join(destDir, file))
# Generate indexes
for tmpl in self.overviewTmplNames:
out = tmpl[0:-5] if tmpl.endswith('.tmpl') else tmpl
self.__writeTemplate(tmpl, data, join(destDir, out))
# Report errors
if (self.errors):
if type(errorOut) is type(('tmpl','out')):
data['errors'] = errors
self.__writeTemplate(errorOut[0], data, join(destDir, errorOut[1]))
else:
sys.stdout.flush()
for errorLocation in self.errors:
print >> errorOut, "Error in %s: %s" % \
(errorLocation, ' '.join([str(error) for error in self.errors[errorLocation]]))
def writeIndex(self, format):
"""Write indices into test suite build output through format `format`.
"""
# Set common values
data = self.extraData.copy()
data['suitetitle'] = self.suite.title
data['suite'] = self.suite.name
data['specroot'] = self.suite.specroot
data['draftroot'] = self.suite.draftroot
data['indexext'] = format.indexExt
data['isXML'] = format.indexExt.startswith('.x')
data['formatdir'] = format.formatDirName
data['extmap'] = format.extMap
data['tests'] = self.alltests
data['suites'] = self.suites
data['flagInfo'] = self.flags
# Generate indices:
# Reftest indices
self.__writeTemplate('reftest-toc.tmpl', data,
format.dest('reftest-toc%s' % format.indexExt))
self.__writeTemplate('reftest.tmpl', data,
format.dest('reftest.list'))
# Table of Contents
sectionlist = sorted(self.sections.values())
if self.splitChapter:
# Split sectionlist into chapters
chapters = []
lastChapNum = '$' # some nonmatching initial char
chap = None
for section in sectionlist:
if (section.title and (section.chapterNum() != lastChapNum)):
lastChapNum = section.chapterNum()
chap = section
chap.sections = []
chap.testcount = 0
chap.testnames = set()
chapters.append(chap)
chap.testnames.update([test['name'] for test in section.tests])
chap.testcount = len(chap.testnames)
chap.sections.append(section)
# Generate main toc
data['chapters'] = chapters
self.__writeTemplate('chapter-toc.tmpl', data,
format.dest('toc%s' % format.indexExt))
del data['chapters']
# Generate chapter tocs
for chap in chapters:
data['chaptertitle'] = chap.title
data['testcount'] = chap.testcount
data['sections'] = chap.sections
self.__writeTemplate('test-toc.tmpl', data, format.dest('chapter-%s%s' \
% (chap.numstr, format.indexExt)))
else: # not splitChapter
data['chapters'] = sectionlist
self.__writeTemplate('test-toc.tmpl', data,
format.dest('toc%s' % format.indexExt))
del data['chapters']
| mpl-2.0 |
karwa/swift | utils/split_file.py | 65 | 1410 | #!/usr/bin/env python
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import argparse
import os
import re
import sys
parser = argparse.ArgumentParser(
description="""
Take the file at <path> and write it to multiple files, switching to a new file
every time an annotation of the form "// BEGIN file1.swift" is encountered. If
<dir> is specified, place the files in <dir>; otherwise, put them in the
current directory.
""")
parser.add_argument(
"-o", dest="out_dir", default=".", metavar="<dir>",
help="directory path where the output files are placed in. "
"(defaults to current directory)")
parser.add_argument(
"input", type=argparse.FileType("r"), nargs="?", default=sys.stdin,
metavar="<path>",
help="input file. (defaults to stdin)")
args = parser.parse_args()
fp_out = None
for line in args.input:
m = re.match(r'^//\s*BEGIN\s+([^\s]+)\s*$', line)
if m:
if fp_out:
fp_out.close()
fp_out = open(os.path.join(args.out_dir, m.group(1)), 'w')
elif fp_out:
fp_out.write(line)
args.input.close()
if fp_out:
fp_out.close()
| apache-2.0 |
chidea/GoPythonDLLWrapper | bin/lib/tkinter/test/test_ttk/test_functions.py | 79 | 17143 | # -*- encoding: utf-8 -*-
import unittest
import tkinter
from tkinter import ttk
class MockTkApp:
def splitlist(self, arg):
if isinstance(arg, tuple):
return arg
return arg.split(':')
def wantobjects(self):
return True
class MockTclObj(object):
typename = 'test'
def __init__(self, val):
self.val = val
def __str__(self):
return str(self.val)
class MockStateSpec(object):
typename = 'StateSpec'
def __init__(self, *args):
self.val = args
def __str__(self):
return ' '.join(self.val)
class InternalFunctionsTest(unittest.TestCase):
def test_format_optdict(self):
def check_against(fmt_opts, result):
for i in range(0, len(fmt_opts), 2):
self.assertEqual(result.pop(fmt_opts[i]), fmt_opts[i + 1])
if result:
self.fail("result still got elements: %s" % result)
# passing an empty dict should return an empty object (tuple here)
self.assertFalse(ttk._format_optdict({}))
# check list formatting
check_against(
ttk._format_optdict({'fg': 'blue', 'padding': [1, 2, 3, 4]}),
{'-fg': 'blue', '-padding': '1 2 3 4'})
# check tuple formatting (same as list)
check_against(
ttk._format_optdict({'test': (1, 2, '', 0)}),
{'-test': '1 2 {} 0'})
# check untouched values
check_against(
ttk._format_optdict({'test': {'left': 'as is'}}),
{'-test': {'left': 'as is'}})
# check script formatting
check_against(
ttk._format_optdict(
{'test': [1, -1, '', '2m', 0], 'test2': 3,
'test3': '', 'test4': 'abc def',
'test5': '"abc"', 'test6': '{}',
'test7': '} -spam {'}, script=True),
{'-test': '{1 -1 {} 2m 0}', '-test2': '3',
'-test3': '{}', '-test4': '{abc def}',
'-test5': '{"abc"}', '-test6': r'\{\}',
'-test7': r'\}\ -spam\ \{'})
opts = {'αβγ': True, 'á': False}
orig_opts = opts.copy()
# check if giving unicode keys is fine
check_against(ttk._format_optdict(opts), {'-αβγ': True, '-á': False})
# opts should remain unchanged
self.assertEqual(opts, orig_opts)
# passing values with spaces inside a tuple/list
check_against(
ttk._format_optdict(
{'option': ('one two', 'three')}),
{'-option': '{one two} three'})
check_against(
ttk._format_optdict(
{'option': ('one\ttwo', 'three')}),
{'-option': '{one\ttwo} three'})
# passing empty strings inside a tuple/list
check_against(
ttk._format_optdict(
{'option': ('', 'one')}),
{'-option': '{} one'})
# passing values with braces inside a tuple/list
check_against(
ttk._format_optdict(
{'option': ('one} {two', 'three')}),
{'-option': r'one\}\ \{two three'})
# passing quoted strings inside a tuple/list
check_against(
ttk._format_optdict(
{'option': ('"one"', 'two')}),
{'-option': '{"one"} two'})
check_against(
ttk._format_optdict(
{'option': ('{one}', 'two')}),
{'-option': r'\{one\} two'})
# ignore an option
amount_opts = len(ttk._format_optdict(opts, ignore=('á'))) / 2
self.assertEqual(amount_opts, len(opts) - 1)
# ignore non-existing options
amount_opts = len(ttk._format_optdict(opts, ignore=('á', 'b'))) / 2
self.assertEqual(amount_opts, len(opts) - 1)
# ignore every option
self.assertFalse(ttk._format_optdict(opts, ignore=list(opts.keys())))
def test_format_mapdict(self):
opts = {'a': [('b', 'c', 'val'), ('d', 'otherval'), ('', 'single')]}
result = ttk._format_mapdict(opts)
self.assertEqual(len(result), len(list(opts.keys())) * 2)
self.assertEqual(result, ('-a', '{b c} val d otherval {} single'))
self.assertEqual(ttk._format_mapdict(opts, script=True),
('-a', '{{b c} val d otherval {} single}'))
self.assertEqual(ttk._format_mapdict({2: []}), ('-2', ''))
opts = {'üñíćódè': [('á', 'vãl')]}
result = ttk._format_mapdict(opts)
self.assertEqual(result, ('-üñíćódè', 'á vãl'))
# empty states
valid = {'opt': [('', '', 'hi')]}
self.assertEqual(ttk._format_mapdict(valid), ('-opt', '{ } hi'))
# when passing multiple states, they all must be strings
invalid = {'opt': [(1, 2, 'valid val')]}
self.assertRaises(TypeError, ttk._format_mapdict, invalid)
invalid = {'opt': [([1], '2', 'valid val')]}
self.assertRaises(TypeError, ttk._format_mapdict, invalid)
# but when passing a single state, it can be anything
valid = {'opt': [[1, 'value']]}
self.assertEqual(ttk._format_mapdict(valid), ('-opt', '1 value'))
# special attention to single states which evalute to False
for stateval in (None, 0, False, '', set()): # just some samples
valid = {'opt': [(stateval, 'value')]}
self.assertEqual(ttk._format_mapdict(valid),
('-opt', '{} value'))
# values must be iterable
opts = {'a': None}
self.assertRaises(TypeError, ttk._format_mapdict, opts)
# items in the value must have size >= 2
self.assertRaises(IndexError, ttk._format_mapdict,
{'a': [('invalid', )]})
def test_format_elemcreate(self):
self.assertTrue(ttk._format_elemcreate(None), (None, ()))
## Testing type = image
# image type expects at least an image name, so this should raise
# IndexError since it tries to access the index 0 of an empty tuple
self.assertRaises(IndexError, ttk._format_elemcreate, 'image')
# don't format returned values as a tcl script
# minimum acceptable for image type
self.assertEqual(ttk._format_elemcreate('image', False, 'test'),
("test ", ()))
# specifying a state spec
self.assertEqual(ttk._format_elemcreate('image', False, 'test',
('', 'a')), ("test {} a", ()))
# state spec with multiple states
self.assertEqual(ttk._format_elemcreate('image', False, 'test',
('a', 'b', 'c')), ("test {a b} c", ()))
# state spec and options
self.assertEqual(ttk._format_elemcreate('image', False, 'test',
('a', 'b'), a='x'), ("test a b", ("-a", "x")))
# format returned values as a tcl script
# state spec with multiple states and an option with a multivalue
self.assertEqual(ttk._format_elemcreate('image', True, 'test',
('a', 'b', 'c', 'd'), x=[2, 3]), ("{test {a b c} d}", "-x {2 3}"))
## Testing type = vsapi
# vsapi type expects at least a class name and a part_id, so this
# should raise an ValueError since it tries to get two elements from
# an empty tuple
self.assertRaises(ValueError, ttk._format_elemcreate, 'vsapi')
# don't format returned values as a tcl script
# minimum acceptable for vsapi
self.assertEqual(ttk._format_elemcreate('vsapi', False, 'a', 'b'),
("a b ", ()))
# now with a state spec with multiple states
self.assertEqual(ttk._format_elemcreate('vsapi', False, 'a', 'b',
('a', 'b', 'c')), ("a b {a b} c", ()))
# state spec and option
self.assertEqual(ttk._format_elemcreate('vsapi', False, 'a', 'b',
('a', 'b'), opt='x'), ("a b a b", ("-opt", "x")))
# format returned values as a tcl script
# state spec with a multivalue and an option
self.assertEqual(ttk._format_elemcreate('vsapi', True, 'a', 'b',
('a', 'b', [1, 2]), opt='x'), ("{a b {a b} {1 2}}", "-opt x"))
# Testing type = from
# from type expects at least a type name
self.assertRaises(IndexError, ttk._format_elemcreate, 'from')
self.assertEqual(ttk._format_elemcreate('from', False, 'a'),
('a', ()))
self.assertEqual(ttk._format_elemcreate('from', False, 'a', 'b'),
('a', ('b', )))
self.assertEqual(ttk._format_elemcreate('from', True, 'a', 'b'),
('{a}', 'b'))
def test_format_layoutlist(self):
def sample(indent=0, indent_size=2):
return ttk._format_layoutlist(
[('a', {'other': [1, 2, 3], 'children':
[('b', {'children':
[('c', {'children':
[('d', {'nice': 'opt'})], 'something': (1, 2)
})]
})]
})], indent=indent, indent_size=indent_size)[0]
def sample_expected(indent=0, indent_size=2):
spaces = lambda amount=0: ' ' * (amount + indent)
return (
"%sa -other {1 2 3} -children {\n"
"%sb -children {\n"
"%sc -something {1 2} -children {\n"
"%sd -nice opt\n"
"%s}\n"
"%s}\n"
"%s}" % (spaces(), spaces(indent_size),
spaces(2 * indent_size), spaces(3 * indent_size),
spaces(2 * indent_size), spaces(indent_size), spaces()))
# empty layout
self.assertEqual(ttk._format_layoutlist([])[0], '')
# _format_layoutlist always expects the second item (in every item)
# to act like a dict (except when the value evalutes to False).
self.assertRaises(AttributeError,
ttk._format_layoutlist, [('a', 'b')])
smallest = ttk._format_layoutlist([('a', None)], indent=0)
self.assertEqual(smallest,
ttk._format_layoutlist([('a', '')], indent=0))
self.assertEqual(smallest[0], 'a')
# testing indentation levels
self.assertEqual(sample(), sample_expected())
for i in range(4):
self.assertEqual(sample(i), sample_expected(i))
self.assertEqual(sample(i, i), sample_expected(i, i))
# invalid layout format, different kind of exceptions will be
# raised by internal functions
# plain wrong format
self.assertRaises(ValueError, ttk._format_layoutlist,
['bad', 'format'])
# will try to use iteritems in the 'bad' string
self.assertRaises(AttributeError, ttk._format_layoutlist,
[('name', 'bad')])
# bad children formatting
self.assertRaises(ValueError, ttk._format_layoutlist,
[('name', {'children': {'a': None}})])
def test_script_from_settings(self):
# empty options
self.assertFalse(ttk._script_from_settings({'name':
{'configure': None, 'map': None, 'element create': None}}))
# empty layout
self.assertEqual(
ttk._script_from_settings({'name': {'layout': None}}),
"ttk::style layout name {\nnull\n}")
configdict = {'αβγ': True, 'á': False}
self.assertTrue(
ttk._script_from_settings({'name': {'configure': configdict}}))
mapdict = {'üñíćódè': [('á', 'vãl')]}
self.assertTrue(
ttk._script_from_settings({'name': {'map': mapdict}}))
# invalid image element
self.assertRaises(IndexError,
ttk._script_from_settings, {'name': {'element create': ['image']}})
# minimal valid image
self.assertTrue(ttk._script_from_settings({'name':
{'element create': ['image', 'name']}}))
image = {'thing': {'element create':
['image', 'name', ('state1', 'state2', 'val')]}}
self.assertEqual(ttk._script_from_settings(image),
"ttk::style element create thing image {name {state1 state2} val} ")
image['thing']['element create'].append({'opt': 30})
self.assertEqual(ttk._script_from_settings(image),
"ttk::style element create thing image {name {state1 state2} val} "
"-opt 30")
image['thing']['element create'][-1]['opt'] = [MockTclObj(3),
MockTclObj('2m')]
self.assertEqual(ttk._script_from_settings(image),
"ttk::style element create thing image {name {state1 state2} val} "
"-opt {3 2m}")
def test_tclobj_to_py(self):
self.assertEqual(
ttk._tclobj_to_py((MockStateSpec('a', 'b'), 'val')),
[('a', 'b', 'val')])
self.assertEqual(
ttk._tclobj_to_py([MockTclObj('1'), 2, MockTclObj('3m')]),
[1, 2, '3m'])
def test_list_from_statespec(self):
def test_it(sspec, value, res_value, states):
self.assertEqual(ttk._list_from_statespec(
(sspec, value)), [states + (res_value, )])
states_even = tuple('state%d' % i for i in range(6))
statespec = MockStateSpec(*states_even)
test_it(statespec, 'val', 'val', states_even)
test_it(statespec, MockTclObj('val'), 'val', states_even)
states_odd = tuple('state%d' % i for i in range(5))
statespec = MockStateSpec(*states_odd)
test_it(statespec, 'val', 'val', states_odd)
test_it(('a', 'b', 'c'), MockTclObj('val'), 'val', ('a', 'b', 'c'))
def test_list_from_layouttuple(self):
tk = MockTkApp()
# empty layout tuple
self.assertFalse(ttk._list_from_layouttuple(tk, ()))
# shortest layout tuple
self.assertEqual(ttk._list_from_layouttuple(tk, ('name', )),
[('name', {})])
# not so interesting ltuple
sample_ltuple = ('name', '-option', 'value')
self.assertEqual(ttk._list_from_layouttuple(tk, sample_ltuple),
[('name', {'option': 'value'})])
# empty children
self.assertEqual(ttk._list_from_layouttuple(tk,
('something', '-children', ())),
[('something', {'children': []})]
)
# more interesting ltuple
ltuple = (
'name', '-option', 'niceone', '-children', (
('otherone', '-children', (
('child', )), '-otheropt', 'othervalue'
)
)
)
self.assertEqual(ttk._list_from_layouttuple(tk, ltuple),
[('name', {'option': 'niceone', 'children':
[('otherone', {'otheropt': 'othervalue', 'children':
[('child', {})]
})]
})]
)
# bad tuples
self.assertRaises(ValueError, ttk._list_from_layouttuple, tk,
('name', 'no_minus'))
self.assertRaises(ValueError, ttk._list_from_layouttuple, tk,
('name', 'no_minus', 'value'))
self.assertRaises(ValueError, ttk._list_from_layouttuple, tk,
('something', '-children')) # no children
def test_val_or_dict(self):
def func(res, opt=None, val=None):
if opt is None:
return res
if val is None:
return "test val"
return (opt, val)
tk = MockTkApp()
tk.call = func
self.assertEqual(ttk._val_or_dict(tk, {}, '-test:3'),
{'test': '3'})
self.assertEqual(ttk._val_or_dict(tk, {}, ('-test', 3)),
{'test': 3})
self.assertEqual(ttk._val_or_dict(tk, {'test': None}, 'x:y'),
'test val')
self.assertEqual(ttk._val_or_dict(tk, {'test': 3}, 'x:y'),
{'test': 3})
def test_convert_stringval(self):
tests = (
(0, 0), ('09', 9), ('a', 'a'), ('áÚ', 'áÚ'), ([], '[]'),
(None, 'None')
)
for orig, expected in tests:
self.assertEqual(ttk._convert_stringval(orig), expected)
class TclObjsToPyTest(unittest.TestCase):
def test_unicode(self):
adict = {'opt': 'välúè'}
self.assertEqual(ttk.tclobjs_to_py(adict), {'opt': 'välúè'})
adict['opt'] = MockTclObj(adict['opt'])
self.assertEqual(ttk.tclobjs_to_py(adict), {'opt': 'välúè'})
def test_multivalues(self):
adict = {'opt': [1, 2, 3, 4]}
self.assertEqual(ttk.tclobjs_to_py(adict), {'opt': [1, 2, 3, 4]})
adict['opt'] = [1, 'xm', 3]
self.assertEqual(ttk.tclobjs_to_py(adict), {'opt': [1, 'xm', 3]})
adict['opt'] = (MockStateSpec('a', 'b'), 'válũè')
self.assertEqual(ttk.tclobjs_to_py(adict),
{'opt': [('a', 'b', 'válũè')]})
self.assertEqual(ttk.tclobjs_to_py({'x': ['y z']}),
{'x': ['y z']})
def test_nosplit(self):
self.assertEqual(ttk.tclobjs_to_py({'text': 'some text'}),
{'text': 'some text'})
tests_nogui = (InternalFunctionsTest, TclObjsToPyTest)
if __name__ == "__main__":
from test.support import run_unittest
run_unittest(*tests_nogui)
| mit |
timou/volatility | volatility/plugins/mac/route.py | 44 | 3443 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import datetime
import volatility.obj as obj
import volatility.plugins.mac.common as common
class mac_route(common.AbstractMacCommand):
""" Prints the routing table """
def _get_table(self, tbl):
rnh = tbl #obj.Object("radix_node", offset=tbl.v(), vm=self.addr_space)
rn = rnh.rnh_treetop
while rn.is_valid() and rn.rn_bit >= 0:
rn = rn.rn_u.rn_node.rn_L
rnhash = {}
while rn.is_valid():
base = rn
if rn in rnhash:
break
rnhash[rn] = 1
while rn.is_valid() and rn.rn_parent.rn_u.rn_node.rn_R == rn and rn.rn_flags & 2 == 0:
rn = rn.rn_parent
rn = rn.rn_parent.rn_u.rn_node.rn_R
while rn.is_valid() and rn.rn_bit >= 0:
rn = rn.rn_u.rn_node.rn_L
nextptr = rn
while base.v() != 0:
rn = base
base = rn.rn_u.rn_leaf.rn_Dupedkey
if rn.rn_flags & 2 == 0:
rt = obj.Object("rtentry", offset = rn, vm = self.addr_space)
yield rt
rn = nextptr
if rn.rn_flags & 2 != 0:
break
def calculate(self):
common.set_plugin_members(self)
tables_addr = self.addr_space.profile.get_symbol("_rt_tables")
## FIXME: if we only use ents[2] why do we need to instantiate 32?
ents = obj.Object('Array', offset = tables_addr, vm = self.addr_space, targetType = 'Pointer', count = 32)
ipv4table = obj.Object("radix_node_head", offset = ents[2], vm = self.addr_space)
rts = self._get_table(ipv4table)
for rt in rts:
yield rt
def render_text(self, outfd, data):
self.table_header(outfd, [("Source IP", "24"),
("Dest. IP", "24"),
("Name", "^10"),
("Sent", "^18"),
("Recv", "^18"),
("Time", "^30"),
("Exp.", "^10"),
("Delta", "")])
for rt in data:
self.table_row(outfd,
rt.source_ip,
rt.dest_ip,
rt.name,
rt.sent, rt.rx,
rt.get_time(),
rt.rt_expire,
rt.delta)
| gpl-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.