repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
casanovainformationservices/LazyLibrarian | cherrypy/test/test_tools.py | 6 | 16927 | """Test the various means of instantiating and invoking tools."""
import gzip
import sys
import unittest
import io
from cherrypy._cpcompat import copyitems, itervalues
from cherrypy._cpcompat import IncompleteRead, ntob, ntou, xrange
import time
timeout = 0.2
import types
import six
import cherrypy
from cherrypy import tools
europoundUnicode = ntou('\x80\xa3')
# Client-side code #
from cherrypy.test import helper
class ToolTests(helper.CPWebCase):
def setup_server():
# Put check_access in a custom toolbox with its own namespace
myauthtools = cherrypy._cptools.Toolbox("myauth")
def check_access(default=False):
if not getattr(cherrypy.request, "userid", default):
raise cherrypy.HTTPError(401)
myauthtools.check_access = cherrypy.Tool(
'before_request_body', check_access)
def numerify():
def number_it(body):
for chunk in body:
for k, v in cherrypy.request.numerify_map:
chunk = chunk.replace(k, v)
yield chunk
cherrypy.response.body = number_it(cherrypy.response.body)
class NumTool(cherrypy.Tool):
def _setup(self):
def makemap():
m = self._merged_args().get("map", {})
cherrypy.request.numerify_map = copyitems(m)
cherrypy.request.hooks.attach('on_start_resource', makemap)
def critical():
cherrypy.request.error_response = cherrypy.HTTPError(
502).set_response
critical.failsafe = True
cherrypy.request.hooks.attach('on_start_resource', critical)
cherrypy.request.hooks.attach(self._point, self.callable)
tools.numerify = NumTool('before_finalize', numerify)
# It's not mandatory to inherit from cherrypy.Tool.
class NadsatTool:
def __init__(self):
self.ended = {}
self._name = "nadsat"
def nadsat(self):
def nadsat_it_up(body):
for chunk in body:
chunk = chunk.replace(ntob("good"), ntob("horrorshow"))
chunk = chunk.replace(ntob("piece"), ntob("lomtick"))
yield chunk
cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
nadsat.priority = 0
def cleanup(self):
# This runs after the request has been completely written out.
cherrypy.response.body = [ntob("razdrez")]
id = cherrypy.request.params.get("id")
if id:
self.ended[id] = True
cleanup.failsafe = True
def _setup(self):
cherrypy.request.hooks.attach('before_finalize', self.nadsat)
cherrypy.request.hooks.attach('on_end_request', self.cleanup)
tools.nadsat = NadsatTool()
def pipe_body():
cherrypy.request.process_request_body = False
clen = int(cherrypy.request.headers['Content-Length'])
cherrypy.request.body = cherrypy.request.rfile.read(clen)
# Assert that we can use a callable object instead of a function.
class Rotator(object):
def __call__(self, scale):
r = cherrypy.response
r.collapse_body()
if six.PY3:
r.body = [bytes([(x + scale) % 256 for x in r.body[0]])]
else:
r.body = [chr((ord(x) + scale) % 256) for x in r.body[0]]
cherrypy.tools.rotator = cherrypy.Tool('before_finalize', Rotator())
def stream_handler(next_handler, *args, **kwargs):
assert cherrypy.request.config.get('tools.streamer.arg') == 'arg value'
cherrypy.response.output = o = io.BytesIO()
try:
response = next_handler(*args, **kwargs)
# Ignore the response and return our accumulated output
# instead.
return o.getvalue()
finally:
o.close()
cherrypy.tools.streamer = cherrypy._cptools.HandlerWrapperTool(
stream_handler)
class Root:
@cherrypy.expose
def index(self):
return "Howdy earth!"
@cherrypy.expose
@cherrypy.config(**{'tools.streamer.on': True, 'tools.streamer.arg': 'arg value'})
def tarfile(self):
assert cherrypy.request.config.get('tools.streamer.arg') == 'arg value'
cherrypy.response.output.write(ntob('I am '))
cherrypy.response.output.write(ntob('a tarfile'))
@cherrypy.expose
def euro(self):
hooks = list(cherrypy.request.hooks['before_finalize'])
hooks.sort()
cbnames = [x.callback.__name__ for x in hooks]
assert cbnames == ['gzip'], cbnames
priorities = [x.priority for x in hooks]
assert priorities == [80], priorities
yield ntou("Hello,")
yield ntou("world")
yield europoundUnicode
# Bare hooks
@cherrypy.expose
@cherrypy.config(**{'hooks.before_request_body': pipe_body})
def pipe(self):
return cherrypy.request.body
# Multiple decorators; include kwargs just for fun.
# Note that rotator must run before gzip.
@cherrypy.expose
def decorated_euro(self, *vpath):
yield ntou("Hello,")
yield ntou("world")
yield europoundUnicode
decorated_euro = tools.gzip(compress_level=6)(decorated_euro)
decorated_euro = tools.rotator(scale=3)(decorated_euro)
root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each
subclass, and adds an instance of the subclass as an attribute
of root.
"""
def __init__(cls, name, bases, dct):
type.__init__(cls, name, bases, dct)
for value in itervalues(dct):
if isinstance(value, types.FunctionType):
cherrypy.expose(value)
setattr(root, name.lower(), cls())
Test = TestType('Test', (object,), {})
# METHOD ONE:
# Declare Tools in _cp_config
@cherrypy.config(**{"tools.nadsat.on": True})
class Demo(Test):
def index(self, id=None):
return "A good piece of cherry pie"
def ended(self, id):
return repr(tools.nadsat.ended[id])
def err(self, id=None):
raise ValueError()
def errinstream(self, id=None):
yield "nonconfidential"
raise ValueError()
yield "confidential"
# METHOD TWO: decorator using Tool()
# We support Python 2.3, but the @-deco syntax would look like
# this:
# @tools.check_access()
def restricted(self):
return "Welcome!"
restricted = myauthtools.check_access()(restricted)
userid = restricted
def err_in_onstart(self):
return "success!"
@cherrypy.config(**{'response.stream': True})
def stream(self, id=None):
for x in xrange(100000000):
yield str(x)
conf = {
# METHOD THREE:
# Declare Tools in detached config
'/demo': {
'tools.numerify.on': True,
'tools.numerify.map': {ntob("pie"): ntob("3.14159")},
},
'/demo/restricted': {
'request.show_tracebacks': False,
},
'/demo/userid': {
'request.show_tracebacks': False,
'myauth.check_access.default': True,
},
'/demo/errinstream': {
'response.stream': True,
},
'/demo/err_in_onstart': {
# Because this isn't a dict, on_start_resource will error.
'tools.numerify.map': "pie->3.14159"
},
# Combined tools
'/euro': {
'tools.gzip.on': True,
'tools.encode.on': True,
},
# Priority specified in config
'/decorated_euro/subpath': {
'tools.gzip.priority': 10,
},
# Handler wrappers
'/tarfile': {'tools.streamer.on': True}
}
app = cherrypy.tree.mount(root, config=conf)
app.request_class.namespaces['myauth'] = myauthtools
if sys.version_info >= (2, 5):
from cherrypy.test import _test_decorators
root.tooldecs = _test_decorators.ToolExamples()
setup_server = staticmethod(setup_server)
def testHookErrors(self):
self.getPage("/demo/?id=1")
# If body is "razdrez", then on_end_request is being called too early.
self.assertBody("A horrorshow lomtick of cherry 3.14159")
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage("/demo/ended/1")
self.assertBody("True")
valerr = '\n raise ValueError()\nValueError'
self.getPage("/demo/err?id=3")
# If body is "razdrez", then on_end_request is being called too early.
self.assertErrorPage(502, pattern=valerr)
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage("/demo/ended/3")
self.assertBody("True")
# If body is "razdrez", then on_end_request is being called too early.
if (cherrypy.server.protocol_version == "HTTP/1.0" or
getattr(cherrypy.server, "using_apache", False)):
self.getPage("/demo/errinstream?id=5")
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus("200 OK")
self.assertBody("nonconfidential")
else:
# Because this error is raised after the response body has
# started, and because it's chunked output, an error is raised by
# the HTTP client when it encounters incomplete output.
self.assertRaises((ValueError, IncompleteRead), self.getPage,
"/demo/errinstream?id=5")
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage("/demo/ended/5")
self.assertBody("True")
# Test the "__call__" technique (compile-time decorator).
self.getPage("/demo/restricted")
self.assertErrorPage(401)
# Test compile-time decorator with kwargs from config.
self.getPage("/demo/userid")
self.assertBody("Welcome!")
def testEndRequestOnDrop(self):
old_timeout = None
try:
httpserver = cherrypy.server.httpserver
old_timeout = httpserver.timeout
except (AttributeError, IndexError):
return self.skip()
try:
httpserver.timeout = timeout
# Test that on_end_request is called even if the client drops.
self.persistent = True
try:
conn = self.HTTP_CONN
conn.putrequest("GET", "/demo/stream?id=9", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
# Skip the rest of the request and close the conn. This will
# cause the server's active socket to error, which *should*
# result in the request being aborted, and request.close being
# called all the way up the stack (including WSGI middleware),
# eventually calling our on_end_request hook.
finally:
self.persistent = False
time.sleep(timeout * 2)
# Test that the on_end_request hook was called.
self.getPage("/demo/ended/9")
self.assertBody("True")
finally:
if old_timeout is not None:
httpserver.timeout = old_timeout
def testGuaranteedHooks(self):
# The 'critical' on_start_resource hook is 'failsafe' (guaranteed
# to run even if there are failures in other on_start methods).
# This is NOT true of the other hooks.
# Here, we have set up a failure in NumerifyTool.numerify_map,
# but our 'critical' hook should run and set the error to 502.
self.getPage("/demo/err_in_onstart")
self.assertErrorPage(502)
self.assertInBody(
"AttributeError: 'str' object has no attribute 'items'")
def testCombinedTools(self):
expectedResult = (ntou("Hello,world") +
europoundUnicode).encode('utf-8')
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)
zfile.write(expectedResult)
zfile.close()
self.getPage("/euro",
headers=[
("Accept-Encoding", "gzip"),
("Accept-Charset", "ISO-8859-1,utf-8;q=0.7,*;q=0.7")])
self.assertInBody(zbuf.getvalue()[:3])
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=6)
zfile.write(expectedResult)
zfile.close()
self.getPage("/decorated_euro", headers=[("Accept-Encoding", "gzip")])
self.assertInBody(zbuf.getvalue()[:3])
# This returns a different value because gzip's priority was
# lowered in conf, allowing the rotator to run after gzip.
# Of course, we don't want breakage in production apps,
# but it proves the priority was changed.
self.getPage("/decorated_euro/subpath",
headers=[("Accept-Encoding", "gzip")])
if six.PY3:
self.assertInBody(bytes([(x + 3) % 256 for x in zbuf.getvalue()]))
else:
self.assertInBody(''.join([chr((ord(x) + 3) % 256)
for x in zbuf.getvalue()]))
def testBareHooks(self):
content = "bit of a pain in me gulliver"
self.getPage("/pipe",
headers=[("Content-Length", str(len(content))),
("Content-Type", "text/plain")],
method="POST", body=content)
self.assertBody(content)
def testHandlerWrapperTool(self):
self.getPage("/tarfile")
self.assertBody("I am a tarfile")
def testToolWithConfig(self):
if not sys.version_info >= (2, 5):
return self.skip("skipped (Python 2.5+ only)")
self.getPage('/tooldecs/blah')
self.assertHeader('Content-Type', 'application/data')
def testWarnToolOn(self):
# get
try:
cherrypy.tools.numerify.on
except AttributeError:
pass
else:
raise AssertionError("Tool.on did not error as it should have.")
# set
try:
cherrypy.tools.numerify.on = True
except AttributeError:
pass
else:
raise AssertionError("Tool.on did not error as it should have.")
def testDecorator(self):
@cherrypy.tools.register('on_start_resource')
def example():
pass
self.assertTrue(isinstance(cherrypy.tools.example, cherrypy.Tool))
self.assertEqual(cherrypy.tools.example._point, 'on_start_resource')
@cherrypy.tools.register('before_finalize', name='renamed', priority=60)
def example():
pass
self.assertTrue(isinstance(cherrypy.tools.renamed, cherrypy.Tool))
self.assertEqual(cherrypy.tools.renamed._point, 'before_finalize')
self.assertEqual(cherrypy.tools.renamed._name, 'renamed')
self.assertEqual(cherrypy.tools.renamed._priority, 60)
class SessionAuthTest(unittest.TestCase):
def test_login_screen_returns_bytes(self):
"""
login_screen must return bytes even if unicode parameters are passed.
Issue 1132 revealed that login_screen would return unicode if the
username and password were unicode.
"""
sa = cherrypy.lib.cptools.SessionAuth()
res = sa.login_screen(None, username=six.text_type('nobody'),
password=six.text_type('anypass'))
self.assertTrue(isinstance(res, bytes))
| gpl-3.0 | -5,910,287,537,592,999,000 | 37.038202 | 94 | 0.552077 | false |
illmade/image-node | py/inception.py | 1 | 1043 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Brings all inception models under one namespace."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from inception_v4 import inception_v4
from inception_v4 import inception_v4_arg_scope
from inception_v4 import inception_v4_base
# pylint: enable=unused-import
| apache-2.0 | 6,713,954,808,620,718,000 | 40.72 | 80 | 0.716203 | false |
Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2016_06_01/aio/_configuration.py | 1 | 2915 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class SubscriptionClientConfiguration(Configuration):
"""Configuration for SubscriptionClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
"""
def __init__(
self,
credential: "AsyncTokenCredential",
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(SubscriptionClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.api_version = "2016-06-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| mit | 1,127,833,269,698,670,500 | 46.786885 | 134 | 0.681647 | false |
ParticulateFlow/Palabos-PFM | scons/scons-local-2.1.0/SCons/Scanner/Prog.py | 21 | 3292 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/Prog.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
# global, set by --debug=findlibs
print_find_libs = None
def ProgramScanner(**kw):
"""Return a prototype Scanner instance for scanning executable
files for static-lib dependencies"""
kw['path_function'] = SCons.Scanner.FindPathDirs('LIBPATH')
ps = SCons.Scanner.Base(scan, "ProgramScanner", **kw)
return ps
def scan(node, env, libpath = ()):
"""
This scanner scans program files for static-library
dependencies. It will search the LIBPATH environment variable
for libraries specified in the LIBS variable, returning any
files it finds as dependencies.
"""
try:
libs = env['LIBS']
except KeyError:
# There are no LIBS in this environment, so just return a null list:
return []
if SCons.Util.is_String(libs):
libs = libs.split()
else:
libs = SCons.Util.flatten(libs)
try:
prefix = env['LIBPREFIXES']
if not SCons.Util.is_List(prefix):
prefix = [ prefix ]
except KeyError:
prefix = [ '' ]
try:
suffix = env['LIBSUFFIXES']
if not SCons.Util.is_List(suffix):
suffix = [ suffix ]
except KeyError:
suffix = [ '' ]
pairs = []
for suf in map(env.subst, suffix):
for pref in map(env.subst, prefix):
pairs.append((pref, suf))
result = []
if callable(libpath):
libpath = libpath()
find_file = SCons.Node.FS.find_file
adjustixes = SCons.Util.adjustixes
for lib in libs:
if SCons.Util.is_String(lib):
lib = env.subst(lib)
for pref, suf in pairs:
l = adjustixes(lib, pref, suf)
l = find_file(l, libpath, verbose=print_find_libs)
if l:
result.append(l)
else:
result.append(lib)
return result
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| agpl-3.0 | 7,151,366,954,915,680,000 | 31.594059 | 101 | 0.660693 | false |
joedborg/fusilier | register/migrations/0001_initial.py | 1 | 1082 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-02 16:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('member', '0001_initial'),
('firearm', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Visit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('range', models.IntegerField(choices=[(0, 'Bicester'), (1, 'Chesterton')])),
('timestamp', models.DateTimeField(auto_now=True)),
('session', models.IntegerField(choices=[(0, 'Practice'), (1, 'Competition')])),
('firearm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='firearm.Firearm')),
('member', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='member.Member')),
],
),
]
| agpl-3.0 | -5,273,537,555,909,035,000 | 35.066667 | 114 | 0.590573 | false |
PyMamba/mamba-framework | mamba/scripts/_controller.py | 3 | 6578 | # -*- test-case-name: mamba.scripts.test.test_mamba_admin -*-
# Copyright (c) 2012 - 2013 Oscar Campos <[email protected]>
# See LICENSE for more details
from __future__ import print_function
import re
import sys
import getpass
import datetime
from string import Template
from twisted.python import usage, filepath
from mamba import copyright
from mamba.scripts import commons
from mamba._version import versions
from mamba.utils.checkers import Checkers
from mamba.utils.camelcase import CamelCase
# This is an auto-generated property. Do not edit it.
version = versions.Version('controller', 0, 1, 8)
def show_version():
print('Mamba Controller Tools v{}'.format(version.short()))
print(format(copyright.copyright))
def mamba_services_not_found():
print(
'error: make sure you are inside a mamba application root '
'directory and then run this command again'
)
sys.exit(-1)
class ControllerOptions(usage.Options):
"""Controller Configuration options for mamba-admin tool
"""
synopsis = '[options] name'
optFlags = [
['dump', 'd', 'Dump to the standard output'],
['noquestions', 'n',
'When this option is set, mamba will NOT ask anything to the user '
'that means it will overwrite any other version of the controller '
'file that already exists in the file system. Use with caution']
]
optParameters = [
['description', None, None, 'Controller\'s description'],
['author', None, None, 'Controller\'s author'],
['email', None, None, 'Author\'s email'],
['route', None, None, 'Controller\'s register route (if any)'],
['classname', None, None,
'Set this parameter if you want that your new controller use a '
'specific class name'],
['platforms', None, None,
'Supported platforms (example: \'Unix, Windows\')']
]
def opt_version(self):
"""Show version information and exit
"""
show_version()
sys.exit(0)
def parseArgs(self, name=None):
"""Parse command arguments
"""
if name is None:
self['name'] = name
return
regex = re.compile(r'[^._a-zA-Z0-9]')
name = regex.sub('', name)
path, name = commons.process_path_name(name)
self['filename'] = filepath.joinpath(path.lower(), name.lower())
self['name'] = CamelCase(name.replace('_', ' ')).camelize(True)
def postOptions(self):
"""Post options processing
"""
if self['author'] is None:
self['author'] = getpass.getuser()
if self['email'] is not None:
if not Checkers.check_email(self['email']):
print(
'error: the given email address {} is not a valid RFC2822 '
'email address, '
'check http://www.rfc-editor.org/rfc/rfc2822.txt for '
'very extended details'.format(self['email'])
)
sys.exit(-1)
else:
# just set an invalid RFC2822 email address (thats what irony mean)
self['email'] = '{}@localhost'.format(self['author'])
if self['route'] is None:
self['route'] = ''
if self['platforms'] is None:
self['platforms'] = 'Linux'
class Controller(object):
"""
Controller creation tool
:param options: the command line options
:type options: :class:`~mamba.scripts._controller.ControllerOptions`
"""
def __init__(self, options):
self.options = options
self.process()
def process(self):
"""I process the Controller commands
"""
try:
mamba_services = commons.import_services()
del mamba_services
except Exception:
mamba_services_not_found()
if self.options.subOptions.opts['name'] is None:
print(self.options.subOptions)
sys.exit(-1)
if self.options.subOptions.opts['dump']:
self._dump_controller()
sys.exit(0)
self._write_controller()
sys.exit(0)
def _dump_controller(self):
"""Dump the controller to the standard output
"""
print('\n')
print(self._process_template())
@commons.decorate_output
def _write_controller(self):
"""Write the controller to a file in the file system
"""
controller_file = filepath.FilePath(
'application/controller/{}.py'.format(
self.options.subOptions.opts['filename'])
)
if controller_file.exists():
if commons.Interaction.userquery(
'{} file already exists in the file system.'
'Are you really sure do you want to overwrite it?'.format(
controller_file.path
)
) == 'No':
return
print('Writing the controller...'.ljust(73), end='')
try:
controller_file.open('w').write(self._process_template())
except IOError:
# package directory doesn't exists yet
commons.generate_sub_packages(controller_file)
controller_file.open('w').write(self._process_template())
def _process_template(self):
"""Prepare the template to write/dump
"""
sep = filepath.os.sep # windows needs '\\' as separator
controller_template = Template(
filepath.FilePath('{}/templates/controller.tpl'.format(
'/'.join(filepath.dirname(__file__).split(sep)[:-1])
)).open('r').read()
)
if self.options.subOptions.opts['classname'] is None:
classname = self.options.subOptions.opts['name']
else:
classname = self.options.subOptions.opts['classname']
args = {
'year': datetime.datetime.now().year,
'controller_name': self.options.subOptions.opts['name'],
'platforms': self.options.subOptions.opts['platforms'],
'synopsis': self.options.subOptions.opts['description'],
'author': self.options.subOptions.opts['author'],
'author_email': self.options.subOptions.opts['email'],
'synopsis': self.options.subOptions.opts['description'],
'controller_class': classname,
'register_path': self.options.subOptions.opts['route']
}
return controller_template.safe_substitute(**args)
| gpl-3.0 | 2,782,812,600,516,313,000 | 30.777778 | 79 | 0.579964 | false |
ddurieux/alignak | test/test_multi_attribute.py | 1 | 3021 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Grégory Starck, [email protected]
# Christophe Simon, [email protected]
# Jean Gabes, [email protected]
# Sebastien Coavoux, [email protected]
# Christophe SIMON, [email protected]
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test multi valued attribute feature.
#
import re
from alignak_test import unittest, AlignakTest
class TestMultiVuledAttributes(AlignakTest):
def setUp(self):
self.setup_with_file('etc/alignak_multi_attribute.cfg')
def test_multi_valued_attributes(self):
hst1 = self.sched.hosts.find_by_name("test_host_01")
srv1 = self.sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
self.assertIsNot(hst1, None)
self.assertIsNot(srv1, None)
# inherited parameter
self.assertIs(True, hst1.active_checks_enabled)
self.assertIs(True, srv1.active_checks_enabled)
# non list parameter (only the last value set should remain)
self.assertEqual(3, hst1.max_check_attempts)
self.assertEqual(3, srv1.max_check_attempts)
# list parameter (all items should appear in the order they are defined)
self.assertEqual([u'd', u'f', u'1', u's', u'r', u'u'], list(set(hst1.notification_options)))
self.assertEqual([u'c', u'f', u'1', u's', u'r', u'u', u'w'], list(set(srv1.notification_options)))
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | 9,179,553,471,528,741,000 | 35.829268 | 106 | 0.709272 | false |
doconix/django-mako-plus | django_mako_plus/provider/context.py | 1 | 2543 | from django.utils.module_loading import import_string
import json
import logging
from ..version import __version__
from ..util import log
from .base import BaseProvider
###################################
### JS Context Provider
class JsContextProvider(BaseProvider):
'''
Adds all js_context() variables to DMP_CONTEXT.
'''
DEFAULT_OPTIONS = {
# the group this provider is part of. this only matters when
# the html page limits the providers that will be called with
# ${ django_mako_plus.links(group="...") }
'group': 'scripts',
# the encoder to use for the JSON structure
'encoder': 'django.core.serializers.json.DjangoJSONEncoder',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.encoder = import_string(self.options['encoder'])
if log.isEnabledFor(logging.DEBUG):
log.debug('%s created', repr(self))
def provide(self):
# we output on the first run through - the context is only needed once
if not self.is_first():
return
# generate the context dictionary
data = {
'id': self.provider_run.uid,
'version': __version__,
'templates': [ '{}/{}'.format(p.app_config.name, p.template_relpath) for p in self.iter_related() ],
'app': self.provider_run.request.dmp.app if self.provider_run.request is not None else None,
'page': self.provider_run.request.dmp.page if self.provider_run.request is not None else None,
'log': log.isEnabledFor(logging.DEBUG),
'values': {
'id': self.provider_run.uid,
},
}
for k in self.provider_run.context.keys():
if isinstance(k, jscontext):
value = self.provider_run.context[k]
data['values'][k] = value.__jscontext__() if callable(getattr(value, '__jscontext__', None)) else value
# output the script
self.write('<script>')
self.write('DMP_CONTEXT.set({data});'.format(
data=json.dumps(data, cls=self.encoder, separators=(',', ':'))
))
self.write('</script>')
class jscontext(str):
'''
Marks a key in the context dictionary as a JS context item.
JS context items are sent to the template like normal,
but they are also added to the runtime JS namespace.
See the tutorial for more information on this function.
'''
# no code needed, just using the class for identity
| apache-2.0 | 7,978,148,596,370,466,000 | 36.397059 | 119 | 0.596146 | false |
mashaoze/esp-idf | tools/kconfig_new/kconfiglib.py | 3 | 153994 | # Copyright (c) 2011-2017, Ulf Magnusson
# Modifications (c) 2018 Espressif Systems
# SPDX-License-Identifier: ISC
#
# ******* IMPORTANT **********
#
# This is kconfiglib 2.1.0 with some modifications to match the behaviour
# of the ESP-IDF kconfig:
#
# - 'source' nows uses wordexp(3) behaviour to allow source-ing multiple
# files at once, and to expand environment variables directly in the source
# command (without them having to be set as properties in the Kconfig file)
#
# - Added walk_menu() function and refactored to use this internally.
#
# - BOOL & TRISTATE items are allowed to have blank values in .config
# (equivalent to n, this is backwards compatibility with old IDF conf.c)
#
"""
Overview
========
Kconfiglib is a Python 2/3 library for scripting and extracting information
from Kconfig configuration systems. It can be used for the following, among
other things:
- Programmatically get and set symbol values
allnoconfig.py and allyesconfig.py examples are provided, automatically
verified to produce identical output to the standard 'make allnoconfig' and
'make allyesconfig'.
- Read and write .config files
The generated .config files are character-for-character identical to what
the C implementation would generate (except for the header comment). The
test suite relies on this, as it compares the generated files.
- Inspect symbols
Printing a symbol gives output which could be fed back into a Kconfig parser
to redefine it***. The printing function (__str__()) is implemented with
public APIs, meaning you can fetch just whatever information you need as
well.
A helpful __repr__() is implemented on all objects too, also implemented
with public APIs.
***Choice symbols get their parent choice as a dependency, which shows up as
e.g. 'prompt "choice symbol" if <choice>' when printing the symbol. This
could easily be worked around if 100% reparsable output is needed.
- Inspect expressions
Expressions use a simple tuple-based format that can be processed manually
if needed. Expression printing and evaluation functions are provided,
implemented with public APIs.
- Inspect the menu tree
The underlying menu tree is exposed, including submenus created implicitly
from symbols depending on preceding symbols. This can be used e.g. to
implement menuconfig-like functionality. See the menuconfig.py example.
Here are some other features:
- Single-file implementation
The entire library is contained in this file.
- Runs unmodified under both Python 2 and Python 3
The code mostly uses basic Python features and has no third-party
dependencies. The most advanced things used are probably @property and
__slots__.
- Robust and highly compatible with the standard Kconfig C tools
The test suite automatically compares output from Kconfiglib and the C tools
by diffing the generated .config files for the real kernel Kconfig and
defconfig files, for all ARCHes.
This currently involves comparing the output for 36 ARCHes and 498 defconfig
files (or over 18000 ARCH/defconfig combinations in "obsessive" test suite
mode). All tests are expected to pass.
- Not horribly slow despite being a pure Python implementation
The allyesconfig.py example currently runs in about 1.6 seconds on a Core i7
2600K (with a warm file cache), where half a second is overhead from 'make
scriptconfig' (see below).
For long-running jobs, PyPy gives a big performance boost. CPython is faster
for short-running jobs as PyPy needs some time to warm up.
- Internals that (mostly) mirror the C implementation
While being simpler to understand.
Using Kconfiglib on the Linux kernel with the Makefile targets
==============================================================
For the Linux kernel, a handy interface is provided by the
scripts/kconfig/Makefile patch. Apply it with either 'git am' or the 'patch'
utility:
$ wget -qO- https://raw.githubusercontent.com/ulfalizer/Kconfiglib/master/makefile.patch | git am
$ wget -qO- https://raw.githubusercontent.com/ulfalizer/Kconfiglib/master/makefile.patch | patch -p1
Warning: Not passing -p1 to patch will cause the wrong file to be patched.
Please tell me if the patch does not apply. It should be trivial to apply
manually, as it's just a block of text that needs to be inserted near the other
*conf: targets in scripts/kconfig/Makefile.
If you do not wish to install Kconfiglib via pip, the Makefile patch is set up
so that you can also just clone Kconfiglib into the kernel root:
$ git clone git://github.com/ulfalizer/Kconfiglib.git
$ git am Kconfiglib/makefile.patch (or 'patch -p1 < Kconfiglib/makefile.patch')
Warning: The directory name Kconfiglib/ is significant in this case, because
it's added to PYTHONPATH by the new targets in makefile.patch.
Look further down for a motivation for the Makefile patch and for instructions
on how you can use Kconfiglib without it.
The Makefile patch adds the following targets:
make [ARCH=<arch>] iscriptconfig
--------------------------------
This target gives an interactive Python prompt where a Kconfig instance has
been preloaded and is available in 'kconf'. To change the Python interpreter
used, pass PYTHONCMD=<executable> to make. The default is "python".
To get a feel for the API, try evaluating and printing the symbols in
kconf.defined_syms, and explore the MenuNode menu tree starting at
kconf.top_node by following 'next' and 'list' pointers.
The item contained in a menu node is found in MenuNode.item (note that this can
be one of the constants MENU and COMMENT), and all symbols and choices have a
'nodes' attribute containing their menu nodes (usually only one). Printing a
menu node will print its item, in Kconfig format.
If you want to look up a symbol by name, use the kconf.syms dictionary.
make scriptconfig SCRIPT=<script> [SCRIPT_ARG=<arg>]
----------------------------------------------------
This target runs the Python script given by the SCRIPT parameter on the
configuration. sys.argv[1] holds the name of the top-level Kconfig file
(currently always "Kconfig" in practice), and sys.argv[2] holds the SCRIPT_ARG
argument, if given.
See the examples/ subdirectory for example scripts.
Using Kconfiglib without the Makefile targets
=============================================
The make targets are only needed for a trivial reason: The Kbuild makefiles
export environment variables which are referenced inside the Kconfig files (via
'option env="ENV_VARIABLE"').
In practice, the only variables referenced (as of writing, and for many years)
are ARCH, SRCARCH, and KERNELVERSION. To run Kconfiglib without the Makefile
patch, do this:
$ ARCH=x86 SRCARCH=x86 KERNELVERSION=`make kernelversion` python
>>> import kconfiglib
>>> kconf = kconfiglib.Kconfig() # filename defaults to "Kconfig"
Search the top-level Makefile for "Additional ARCH settings" to see other
possibilities for ARCH and SRCARCH. Kconfiglib will print a warning if an unset
environment variable is referenced inside the Kconfig files.
Gotcha
******
It's important to set $SRCARCH even if you don't care about values and only
want to extract information from Kconfig files, because the top-level Makefile
does this (as of writing):
source "arch/$SRCARCH/Kconfig"
If $SRCARCH is not set, this expands to "arch//Kconfig", and arch/Kconfig
happens to be an existing file, giving something that appears to work but is
actually a truncated configuration. The available symbols will differ depending
on the arch as well.
Intro to symbol values
======================
Kconfiglib has the same assignment semantics as the C implementation.
Any symbol can be assigned a value by the user (via Kconfig.load_config() or
Symbol.set_value()), but this user value is only respected if the symbol is
visible, which corresponds to it (currently) being visible in the menuconfig
interface.
Symbols without prompts are never visible (setting a user value on them is
pointless). For symbols with prompts, the visibility of the symbol is
determined by the condition on the prompt.
Dependencies from parents and 'if'/'depends on' are propagated to properties,
including prompts, so these two configurations are logically equivalent:
(1)
menu "menu"
depends on A
if B
config FOO
tristate "foo" if D
default y
depends on C
endif
endmenu
(2)
menu "menu"
depends on A
config FOO
tristate "foo" if A && B && C && D
default y if A && B && C
endmenu
In this example, A && B && C && D (the prompt condition) needs to be non-n for
FOO to be visible (assignable). If the value is m, the symbol can only be
assigned the value m. The visibility sets an upper bound on the value that can
be assigned by the user, and any higher user value will be truncated down.
'default' properties are independent of the visibility, though a 'default' will
often get the same condition as the prompt due to dependency propagation.
'default' properties are used if the symbol is not visible or has no user
value.
Symbols with no (active) user value and no (active) 'default' default to n for
bool/tristate symbols, and to the empty string for other symbols.
'select' works similarly to symbol visibility, but sets a lower bound on the
value of the symbol. The lower bound is determined by the value of the
select*ing* symbol. 'select' does not respect visibility, so non-visible
symbols can be forced to a particular (minimum) value by a select as well.
For non-bool/tristate symbols, it only matters whether the visibility is n or
non-n: m visibility acts the same as y visibility.
Conditions on 'default' and 'select' work in mostly intuitive ways. If the
condition is n, the 'default' or 'select' is disabled. If it is m, the
'default' or 'select' value (the value of the selecting symbol) is truncated
down to m.
When writing a configuration with Kconfig.write_config(), only symbols that are
visible, have an (active) default, or are selected will get written out (note
that this includes all symbols that would accept user values). Kconfiglib
matches the .config format produced by the C implementations down to the
character. This eases testing.
In Kconfiglib, the set of (currently) assignable values for a bool/tristate
symbol appear in Symbol.assignable. For other symbol types, just check if
sym.visibility is non-0 (non-n).
Intro to the menu tree
======================
The menu structure, as seen in e.g. menuconfig, is represented by a tree of
MenuNode objects. The top node of the configuration corresponds to an implicit
top-level menu, the title of which is shown at the top in the standard
menuconfig interface. (The title with variables expanded is available in
Kconfig.mainmenu_text in Kconfiglib.)
The top node is found in Kconfig.top_node. From there, you can visit child menu
nodes by following the 'list' pointer, and any following menu nodes by
following the 'next' pointer. Usually, a non-None 'list' pointer indicates a
menu or Choice, but menu nodes for symbols can sometimes have a non-None 'list'
pointer too due to submenus created implicitly from dependencies.
MenuNode.item is either a Symbol or a Choice object, or one of the constants
MENU and COMMENT. The prompt of the menu node (which also holds the text for
menus and comments) can be found in MenuNode.prompt. For Symbol and Choice,
MenuNode.help holds the help text (if any, otherwise None).
Note that prompts and help texts for symbols and choices are stored in the menu
node. This makes it possible to define a symbol in multiple locations with a
different prompt or help text in each location.
This organization mirrors the C implementation. MenuNode is called
'struct menu' there, but I thought "menu" was a confusing name.
The list of menu nodes for a Symbol or Choice can be found in the
Symbol/Choice.nodes attribute.
It is possible to give a Choice a name and define it in multiple locations,
hence why Choice.nodes is a list. In practice, you're unlikely to ever see a
choice defined in more than one location. I don't think I've even seen a named
choice outside of the test suite.
Intro to expressions
====================
Expressions can be evaluated with the expr_value() function and printed with
the expr_str() function (these are used internally as well). Evaluating an
expression always yields a tristate value, where n, m, and y are represented as
0, 1, and 2, respectively.
The following table should help you figure out how expressions are represented.
A, B, C, ... are symbols (Symbol instances), NOT is the kconfiglib.NOT
constant, etc.
Expression Representation
---------- --------------
A A
"A" A (constant symbol)
!A (NOT, A)
A && B (AND, A, B)
A && B && C (AND, A, (AND, B, C))
A || B (OR, A, B)
A || (B && C && D) (OR, A, (AND, B, (AND, C, D)))
A = B (EQUAL, A, B)
A != "foo" (UNEQUAL, A, foo (constant symbol))
A && B = C && D (AND, A, (AND, (EQUAL, B, C), D))
n Kconfig.n (constant symbol)
m Kconfig.m (constant symbol)
y Kconfig.y (constant symbol)
"y" Kconfig.y (constant symbol)
Strings like "foo" in 'default "foo"' or 'depends on SYM = "foo"' are
represented as constant symbols, so the only values that appear in expressions
are symbols***. This mirrors the C implementation.
***For choice symbols, the parent Choice will appear in expressions as well,
but it's usually invisible as the value interfaces of Symbol and Choice are
identical. This mirrors the C implementation and makes different choice modes
"just work".
Manual evaluation examples:
- The value of A && B is min(A.tri_value, B.tri_value)
- The value of A || B is max(A.tri_value, B.tri_value)
- The value of !A is 2 - A.tri_value
- The value of A = B is 2 (y) if A.str_value == B.str_value, and 0 (n)
otherwise. Note that str_value is used here instead of tri_value.
For constant (as well as undefined) symbols, str_value matches the name of
the symbol. This mirrors the C implementation and explains why
'depends on SYM = "foo"' above works as expected.
n/m/y are automatically converted to the corresponding constant symbols
"n"/"m"/"y" (Kconfig.n/m/y) during parsing.
Kconfig.const_syms is a dictionary like Kconfig.syms but for constant symbols.
If a condition is missing (e.g., <cond> when the 'if <cond>' is removed from
'default A if <cond>'), it is actually Kconfig.y. The standard __str__()
functions just avoid printing 'if y' conditions to give cleaner output.
Feedback
========
Send bug reports, suggestions, and questions to ulfalizer a.t Google's email
service, or open a ticket on the GitHub page.
"""
import errno
import os
import platform
import re
import sys
# File layout:
#
# Public classes
# Public functions
# Internal functions
# Public global constants
# Internal global constants
# Line length: 79 columns
#
# Public classes
#
class Kconfig(object):
"""
Represents a Kconfig configuration, e.g. for x86 or ARM. This is the set of
symbols, choices, and menu nodes appearing in the configuration. Creating
any number of Kconfig objects (including for different architectures) is
safe. Kconfiglib doesn't keep any global state.
The following attributes are available. They should be treated as
read-only, and some are implemented through @property magic.
syms:
A dictionary with all symbols in the configuration, indexed by name. Also
includes all symbols that are referenced in expressions but never
defined, except for constant (quoted) symbols.
const_syms:
A dictionary like 'syms' for constant (quoted) symbols.
named_choices:
A dictionary like 'syms' for named choices (choice FOO). This is for
completeness. I've never seen a named choice outside of the test suite.
defined_syms:
A list with all defined symbols, in the same order as they appear in the
Kconfig files. Provided as a convenience.
n/m/y:
The predefined constant symbols n/m/y. Also available in const_syms.
modules:
The Symbol instance for the modules symbol. Currently hardcoded to
MODULES, which is backwards compatible. Kconfiglib will warn if
'option modules' is set on some other symbol. Tell me if you need proper
'option modules' support.
'modules' is never None. If the MODULES symbol is not explicitly defined,
its tri_value will be 0 (n), as expected.
A simple way to enable modules is to do 'kconf.modules.set_value(2)'
(provided the MODULES symbol is defined and visible). Modules are
disabled by default in the kernel Kconfig files as of writing, though
nearly all defconfig files enable them (with 'CONFIG_MODULES=y').
defconfig_list:
The Symbol instance for the 'option defconfig_list' symbol, or None if no
defconfig_list symbol exists. The defconfig filename derived from this
symbol can be found in Kconfig.defconfig_filename.
defconfig_filename:
The filename given by the defconfig_list symbol. This is taken from the
first 'default' with a satisfied condition where the specified file
exists (can be opened for reading). If a defconfig file foo/defconfig is
not found and $srctree was set when the Kconfig was created,
$srctree/foo/defconfig is looked up as well.
References to Kconfig symbols ("$FOO") in the 'default' properties of the
defconfig_filename symbol are are expanded before the file is looked up.
'defconfig_filename' is None if either no defconfig_list symbol exists,
or if the defconfig_list symbol has no 'default' with a satisfied
condition that specifies a file that exists.
Gotcha: scripts/kconfig/Makefile might pass --defconfig=<defconfig> to
scripts/kconfig/conf when running e.g. 'make defconfig'. This option
overrides the defconfig_list symbol, meaning defconfig_filename might not
always match what 'make defconfig' would use.
top_node:
The menu node (see the MenuNode class) of the implicit top-level menu.
Acts as the root of the menu tree.
mainmenu_text:
The prompt (title) of the top_node menu, with Kconfig variable references
("$FOO") expanded. Defaults to "Linux Kernel Configuration" (like in the
C tools). Can be changed with the 'mainmenu' statement (see
kconfig-language.txt).
srctree:
The value of the $srctree environment variable when the configuration was
loaded, or None if $srctree wasn't set. Kconfig and .config files are
looked up relative to $srctree if they are not found in the base path
(unless absolute paths are used). This is used to support out-of-tree
builds. The C tools use this environment variable in the same way.
Changing $srctree after creating the Kconfig instance has no effect. Only
the value when the configuration is loaded matters. This avoids surprises
if multiple configurations are loaded with different values for $srctree.
config_prefix:
The value of the $CONFIG_ environment variable when the configuration was
loaded. This is the prefix used (and expected) in .config files. Defaults
to "CONFIG_". Used in the same way in the C tools.
Like for srctree, only the value of $CONFIG_ when the configuration is
loaded matters.
"""
__slots__ = (
"_choices",
"_print_undef_assign",
"_print_warnings",
"_set_re_match",
"_unset_re_match",
"_warn_no_prompt",
"config_prefix",
"const_syms",
"defconfig_list",
"defined_syms",
"m",
"modules",
"n",
"named_choices",
"srctree",
"syms",
"top_node",
"y",
# Parsing-related
"_parsing_kconfigs",
"_reuse_line",
"_file",
"_filename",
"_linenr",
"_filestack",
"_line",
"_tokens",
"_tokens_i",
"_has_tokens",
)
#
# Public interface
#
def __init__(self, filename="Kconfig", warn=True):
"""
Creates a new Kconfig object by parsing Kconfig files. Raises
KconfigSyntaxError on syntax errors. Note that Kconfig files are not
the same as .config files (which store configuration symbol values).
filename (default: "Kconfig"):
The base Kconfig file. For the Linux kernel, you'll want "Kconfig"
from the top-level directory, as environment variables will make sure
the right Kconfig is included from there (arch/$SRCARCH/Kconfig as of
writing).
If you are using Kconfiglib via 'make scriptconfig', the filename of
the base base Kconfig file will be in sys.argv[1]. It's currently
always "Kconfig" in practice.
The $srctree environment variable is used to look up Kconfig files if
set. See the class documentation.
warn (default: True):
True if warnings related to this configuration should be printed to
stderr. This can be changed later with
Kconfig.enable/disable_warnings(). It is provided as a constructor
argument since warnings might be generated during parsing.
"""
self.srctree = os.environ.get("srctree")
self.config_prefix = os.environ.get("CONFIG_")
if self.config_prefix is None:
self.config_prefix = "CONFIG_"
# Regular expressions for parsing .config files, with the get() method
# assigned directly as a small optimization (microscopic in this case,
# but it's consistent with the other regexes)
self._set_re_match = re.compile(r"{}(\w+)=(.*)"
.format(self.config_prefix)).match
self._unset_re_match = re.compile(r"# {}(\w+) is not set"
.format(self.config_prefix)).match
self._print_warnings = warn
self._print_undef_assign = False
self.syms = {}
self.const_syms = {}
self.defined_syms = []
self.named_choices = {}
# Used for quickly invalidating all choices
self._choices = []
for nmy in "n", "m", "y":
sym = Symbol()
sym.kconfig = self
sym.name = nmy
sym.is_constant = True
sym.orig_type = TRISTATE
sym._cached_tri_val = STR_TO_TRI[nmy]
self.const_syms[nmy] = sym
self.n = self.const_syms["n"]
self.m = self.const_syms["m"]
self.y = self.const_syms["y"]
# Make n/m/y well-formed symbols
for nmy in "n", "m", "y":
sym = self.const_syms[nmy]
sym.rev_dep = sym.weak_rev_dep = sym.direct_dep = self.n
# This is used to determine whether previously unseen symbols should be
# registered. They shouldn't be if we parse expressions after parsing,
# as part of Kconfig.eval_string().
self._parsing_kconfigs = True
self.modules = self._lookup_sym("MODULES")
self.defconfig_list = None
# The only predefined symbol besides n/m/y. DEFCONFIG_LIST uses this as
# of writing.
uname_sym = self._lookup_const_sym("UNAME_RELEASE")
uname_sym.orig_type = STRING
# env_var doubles as the SYMBOL_AUTO flag from the C implementation, so
# just set it to something. The naming breaks a bit here.
uname_sym.env_var = "<uname release>"
uname_sym.defaults.append(
(self._lookup_const_sym(platform.uname()[2]), self.y))
self.syms["UNAME_RELEASE"] = uname_sym
self.top_node = MenuNode()
self.top_node.kconfig = self
self.top_node.item = MENU
self.top_node.visibility = self.y
self.top_node.prompt = ("Linux Kernel Configuration", self.y)
self.top_node.parent = None
self.top_node.dep = self.y
self.top_node.filename = filename
self.top_node.linenr = 1
# Parse the Kconfig files
# These implement a single line of "unget" for the parser
self._reuse_line = False
self._has_tokens = False
# Keeps track of the location in the parent Kconfig files. Kconfig
# files usually source other Kconfig files.
self._filestack = []
# The current parsing location
self._filename = filename
self._linenr = 0
self._file = self._open(filename)
self._parse_block(None, # end_token
self.top_node, # parent
self.y, # visible_if_deps
self.top_node) # prev_node
self.top_node.list = self.top_node.next
self.top_node.next = None
self._parsing_kconfigs = False
# Do various post-processing of the menu tree
_finalize_tree(self.top_node)
# Build Symbol._dependents for all symbols
self._build_dep()
self._warn_no_prompt = True
@property
def mainmenu_text(self):
"""
See the class documentation.
"""
return self._expand_syms(self.top_node.prompt[0])
@property
def defconfig_filename(self):
"""
See the class documentation.
"""
if not self.defconfig_list:
return None
for filename, cond in self.defconfig_list.defaults:
if expr_value(cond):
try:
with self._open(self._expand_syms(filename.str_value)) as f:
return f.name
except IOError:
continue
return None
def load_config(self, filename, replace=True):
"""
Loads symbol values from a file in the .config format. Equivalent to
calling Symbol.set_value() to set each of the values.
"# CONFIG_FOO is not set" within a .config file sets the user value of
FOO to n. The C tools work the same way.
filename:
The file to load. Respects $srctree if set (see the class
documentation).
replace (default: True):
True if all existing user values should be cleared before loading the
.config.
"""
# Disable the warning about assigning to symbols without prompts. This
# is normal and expected within a .config file.
self._warn_no_prompt = False
# This stub only exists to make sure _warn_no_prompt gets reenabled
try:
self._load_config(filename, replace)
finally:
self._warn_no_prompt = True
def _load_config(self, filename, replace):
with self._open(filename) as f:
if replace:
# If we're replacing the configuration, keep track of which
# symbols and choices got set so that we can unset the rest
# later. This avoids invalidating everything and is faster.
# Another benefit is that invalidation must be rock solid for
# it to work, making it a good test.
for sym in self.defined_syms:
sym._was_set = False
for choice in self._choices:
choice._was_set = False
# Small optimizations
set_re_match = self._set_re_match
unset_re_match = self._unset_re_match
syms = self.syms
for linenr, line in enumerate(f, 1):
# The C tools ignore trailing whitespace
line = line.rstrip()
set_match = set_re_match(line)
if set_match:
name, val = set_match.groups()
if name not in syms:
self._warn_undef_assign_load(name, val, filename,
linenr)
continue
sym = syms[name]
if not sym.nodes:
self._warn_undef_assign_load(name, val, filename,
linenr)
continue
if sym.orig_type in (BOOL, TRISTATE):
if val == "":
val = "n" # C implementation allows 'blank' for 'no'
# The C implementation only checks the first character
# to the right of '=', for whatever reason
if not ((sym.orig_type == BOOL and
val.startswith(("n", "y"))) or \
(sym.orig_type == TRISTATE and
val.startswith(("n", "m", "y")))):
if val != "": # workaround for old IDF conf behaviour
self._warn("'{}' is not a valid value for the {} "
"symbol {}. Assignment ignored."
.format(val, TYPE_TO_STR[sym.orig_type],
sym.name))
continue
# We represent tristate values as 0, 1, 2
val = STR_TO_TRI[val[0]]
if sym.choice and val:
# During .config loading, we infer the mode of the
# choice from the kind of values that are assigned
# to the choice symbols
prev_mode = sym.choice.user_value
if prev_mode is not None and prev_mode != val:
self._warn("both m and y assigned to symbols "
"within the same choice",
filename, linenr)
# Set the choice's mode
sym.choice.set_value(val)
elif sym.orig_type == STRING:
string_match = _conf_string_re_match(val)
if not string_match:
self._warn("Malformed string literal in "
"assignment to {}. Assignment ignored."
.format(sym.name),
filename, linenr)
continue
val = unescape(string_match.group(1))
else:
unset_match = unset_re_match(line)
if not unset_match:
continue
name = unset_match.group(1)
if name not in syms:
self._warn_undef_assign_load(name, "n", filename,
linenr)
continue
sym = syms[name]
if sym.orig_type not in (BOOL, TRISTATE):
continue
val = 0
# Done parsing the assignment. Set the value.
if sym._was_set:
# Use strings for tristate values in the warning
if sym.orig_type in (BOOL, TRISTATE):
display_val = TRI_TO_STR[val]
display_user_val = TRI_TO_STR[sym.user_value]
else:
display_val = val
display_user_val = sym.user_value
self._warn('{} set more than once. Old value: "{}", new '
'value: "{}".'
.format(name, display_user_val, display_val),
filename, linenr)
sym.set_value(val)
if replace:
# If we're replacing the configuration, unset the symbols that
# didn't get set
for sym in self.defined_syms:
if not sym._was_set:
sym.unset_value()
for choice in self._choices:
if not choice._was_set:
choice.unset_value()
def write_autoconf(self, filename,
header="/* Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib) */\n"):
r"""
Writes out symbol values as a C header file, matching the format used
by include/generated/autoconf.h in the kernel (though possibly with a
different ordering of the #defines, as the order in the C
implementation depends on the hash table implementation as of writing).
filename:
Self-explanatory.
header (default: "/* Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib) */\n"):
Text that will be inserted verbatim at the beginning of the file. You
would usually want it enclosed in '/* */' to make it a C comment,
and include a final terminating newline.
"""
with open(filename, "w") as f:
# Small optimizations
write = f.write
config_prefix = self.config_prefix
write(header)
def write_node(node):
sym = node.item
if not isinstance(sym, Symbol):
return
# Note: _write_to_conf is determined when the value is
# calculated. This is a hidden function call due to
# property magic.
val = sym.str_value
if sym._write_to_conf:
orig_type = sym.orig_type
if orig_type in (BOOL, TRISTATE):
if val != "n":
write("#define {}{}{} 1\n"
.format(config_prefix, sym.name,
"_MODULE" if val == "m" else ""))
elif orig_type == STRING:
write('#define {}{} "{}"\n'
.format(config_prefix, sym.name,
escape(val)))
elif orig_type in (INT, HEX):
if orig_type == HEX and \
not val.startswith(("0x", "0X")):
val = "0x" + val
write("#define {}{} {}\n"
.format(self.config_prefix, sym.name, val))
else:
_internal_error("Internal error while creating C "
'header: unknown type "{}".'
.format(sym.orig_type))
self.walk_menu(write_node)
def write_config(self, filename,
header="# Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib)\n"):
r"""
Writes out symbol values in the .config format.
filename:
Self-explanatory.
header (default: "# Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib)\n"):
Text that will be inserted verbatim at the beginning of the file. You
would usually want each line to start with '#' to make it a comment,
and include a final terminating newline.
"""
with open(filename, "w") as f:
# Small optimization
write = f.write
write(header)
def write_node(node):
item = node.item
if isinstance(item, Symbol) and item.env_var is None:
config_string = item.config_string
if config_string:
write(config_string)
elif expr_value(node.dep) and \
((item == MENU and expr_value(node.visibility)) or
item == COMMENT):
write("\n#\n# {}\n#\n".format(node.prompt[0]))
self.walk_menu(write_node, True)
def walk_menu(self, callback, skip_duplicates=False):
"""
Walk the entire menu in order, calling callback(node)
for each menu node.
Used to implement write_config() & write_autoconf(), but can be
used to implement different types of custom processing as well.
callback:
Function which is called once for each node in the config tree.
Takes only one argument, the node.
skip_duplicates (default: False)
If set to True, for each item in the menu the callback will
only be called the first time it is encountered in the menu.
"""
node = self.top_node.list
if not node:
return # Empty configuration
seen_items = set()
while True:
if not (skip_duplicates and node.item in seen_items):
callback(node)
seen_items.add(node.item)
if node.list:
node = node.list
elif node.next:
node = node.next
else:
while node.parent:
node = node.parent
if node.next:
node = node.next
break
else:
return
def eval_string(self, s):
"""
Returns the tristate value of the expression 's', represented as 0, 1,
and 2 for n, m, and y, respectively. Raises KconfigSyntaxError if
syntax errors are detected in 's'. Warns if undefined symbols are
referenced.
As an example, if FOO and BAR are tristate symbols at least one of
which has the value y, then config.eval_string("y && (FOO || BAR)")
returns 2 (y).
To get the string value of non-bool/tristate symbols, use
Symbol.str_value. eval_string() always returns a tristate value, and
all non-bool/tristate symbols have the tristate value 0 (n).
The expression parsing is consistent with how parsing works for
conditional ('if ...') expressions in the configuration, and matches
the C implementation. m is rewritten to 'm && MODULES', so
eval_string("m") will return 0 (n) unless modules are enabled.
"""
# The parser is optimized to be fast when parsing Kconfig files (where
# an expression can never appear at the beginning of a line). We have
# to monkey-patch things a bit here to reuse it.
self._filename = None
self._line = "if " + s
self._tokenize()
# Remove the "if " to avoid giving confusing error messages
self._line = s
# Remove the _T_IF token
del self._tokens[0]
return expr_value(self._parse_expr(True)) # transform_m
def unset_values(self):
"""
Resets the user values of all symbols, as if Kconfig.load_config() or
Symbol.set_value() had never been called.
"""
self._warn_no_prompt = False
try:
# set_value() already rejects undefined symbols, and they don't
# need to be invalidated (because their value never changes), so we
# can just iterate over defined symbols
for sym in self.defined_syms:
sym.unset_value()
for choice in self._choices:
choice.unset_value()
finally:
self._warn_no_prompt = True
def enable_warnings(self):
"""
See Kconfig.__init__().
"""
self._print_warnings = True
def disable_warnings(self):
"""
See Kconfig.__init__().
"""
self._print_warnings = False
def enable_undef_warnings(self):
"""
Enables warnings for assignments to undefined symbols. Printed to
stderr. Disabled by default since they tend to be spammy for Kernel
configurations (and mostly suggests cleanups).
"""
self._print_undef_assign = True
def disable_undef_warnings(self):
"""
See enable_undef_assign().
"""
self._print_undef_assign = False
def __repr__(self):
"""
Returns a string with information about the Kconfig object when it is
evaluated on e.g. the interactive Python prompt.
"""
return "<{}>".format(", ".join((
"configuration with {} symbols".format(len(self.syms)),
'main menu prompt "{}"'.format(self.mainmenu_text),
"srctree not set" if self.srctree is None else
'srctree "{}"'.format(self.srctree),
'config symbol prefix "{}"'.format(self.config_prefix),
"warnings " + ("enabled" if self._print_warnings else "disabled"),
"undef. symbol assignment warnings " +
("enabled" if self._print_undef_assign else "disabled"),
)))
#
# Private methods
#
#
# File reading
#
def _open(self, filename):
"""
First tries to open 'filename', then '$srctree/filename' if $srctree
was set when the configuration was loaded.
"""
try:
return open(filename)
except IOError as e:
if not os.path.isabs(filename) and self.srctree is not None:
filename = os.path.join(self.srctree, filename)
try:
return open(filename)
except IOError as e2:
# This is needed for Python 3, because e2 is deleted after
# the try block:
#
# https://docs.python.org/3/reference/compound_stmts.html#the-try-statement
e = e2
raise IOError(
"Could not open '{}' ({}: {}). Perhaps the $srctree "
"environment variable (which was {}) is set incorrectly. Note "
"that the current value of $srctree is saved when the Kconfig "
"instance is created (for consistency and to cleanly "
"separate instances)."
.format(filename, errno.errorcode[e.errno], e.strerror,
"unset" if self.srctree is None else
'"{}"'.format(self.srctree)))
def _enter_file(self, filename):
"""
Jumps to the beginning of a sourced Kconfig file, saving the previous
position and file object.
"""
self._filestack.append((self._file, self._filename, self._linenr))
try:
self._file = self._open(filename)
except IOError as e:
# Extend the error message a bit in this case
raise IOError(
"{}:{}: {} Also note that e.g. $FOO in a 'source' "
"statement does not refer to the environment "
"variable FOO, but rather to the Kconfig Symbol FOO "
"(which would commonly have 'option env=\"FOO\"' in "
"its definition)."
.format(self._filename, self._linenr, e.message))
self._filename = filename
self._linenr = 0
def _leave_file(self):
"""
Returns from a Kconfig file to the file that sourced it.
"""
self._file.close()
self._file, self._filename, self._linenr = self._filestack.pop()
def _next_line(self):
"""
Fetches and tokenizes the next line from the current Kconfig file.
Returns False at EOF and True otherwise.
"""
# This provides a single line of "unget" if _reuse_line is set to True
if not self._reuse_line:
self._line = self._file.readline()
self._linenr += 1
self._reuse_line = False
# Handle line joining
while self._line.endswith("\\\n"):
self._line = self._line[:-2] + self._file.readline()
self._linenr += 1
if not self._line:
return False
self._tokenize()
return True
def _next_help_line(self):
"""
Used for help texts, where lines are not tokenized and no line joining
is done.
"""
self._line = self._file.readline()
self._linenr += 1
return self._line
#
# Tokenization
#
def _lookup_sym(self, name):
"""
Fetches the symbol 'name' from the symbol table, creating and
registering it if it does not exist. If '_parsing_kconfigs' is False,
it means we're in eval_string(), and new symbols won't be registered.
"""
if name in self.syms:
return self.syms[name]
sym = Symbol()
sym.kconfig = self
sym.name = name
sym.is_constant = False
sym.rev_dep = sym.weak_rev_dep = sym.direct_dep = self.n
if self._parsing_kconfigs:
self.syms[name] = sym
else:
self._warn("no symbol {} in configuration".format(name))
return sym
def _lookup_const_sym(self, name):
"""
Like _lookup_sym(), for constant (quoted) symbols
"""
if name in self.const_syms:
return self.const_syms[name]
sym = Symbol()
sym.kconfig = self
sym.name = name
sym.is_constant = True
sym.rev_dep = sym.weak_rev_dep = sym.direct_dep = self.n
if self._parsing_kconfigs:
self.const_syms[name] = sym
return sym
def _tokenize(self):
"""
Parses Kconfig._line, putting the tokens in Kconfig._tokens. Registers
any new symbols encountered with _lookup(_const)_sym().
Tries to be reasonably speedy by processing chunks of text via regexes
and string operations where possible. This is the biggest hotspot
during parsing.
"""
s = self._line
# Tricky implementation detail: While parsing a token, 'token' refers
# to the previous token. See _STRING_LEX for why this is needed.
# See comment at _initial_token_re_match definition
initial_token_match = _initial_token_re_match(s)
if not initial_token_match:
self._tokens = (None,)
self._tokens_i = -1
return
keyword = _get_keyword(initial_token_match.group(1))
if keyword == _T_HELP:
# Avoid junk after "help", e.g. "---", being registered as a
# symbol
self._tokens = (_T_HELP, None)
self._tokens_i = -1
return
if keyword is None:
self._parse_error("expected keyword as first token")
token = keyword
self._tokens = [keyword]
# The current index in the string being tokenized
i = initial_token_match.end()
# Main tokenization loop (for tokens past the first one)
while i < len(s):
# Test for an identifier/keyword first. This is the most common
# case.
id_keyword_match = _id_keyword_re_match(s, i)
if id_keyword_match:
# We have an identifier or keyword
# Jump past it
i = id_keyword_match.end()
# Check what it is. lookup_sym() will take care of allocating
# new symbols for us the first time we see them. Note that
# 'token' still refers to the previous token.
name = id_keyword_match.group(1)
keyword = _get_keyword(name)
if keyword is not None:
# It's a keyword
token = keyword
elif token not in _STRING_LEX:
# It's a non-const symbol...
if name in ("n", "m", "y"):
# ...except we translate n, m, and y into the
# corresponding constant symbols, like the C
# implementation
token = self.const_syms[name]
else:
token = self._lookup_sym(name)
else:
# It's a case of missing quotes. For example, the
# following is accepted:
#
# menu unquoted_title
#
# config A
# tristate unquoted_prompt
#
# endmenu
token = name
else:
# Not keyword/non-const symbol
# Note: _id_keyword_match and _initial_token_match strip
# trailing whitespace, making it safe to assume s[i] is the
# start of a token here. We manually strip trailing whitespace
# below as well.
#
# An old version stripped whitespace in this spot instead, but
# that leads to some redundancy and would cause
# _id_keyword_match to be tried against just "\n" fairly often
# (because file.readlines() keeps newlines).
c = s[i]
i += 1
if c in "\"'":
# String literal/constant symbol
if "\\" not in s:
# Fast path: If the line contains no backslashes, we
# can just find the matching quote.
end = s.find(c, i)
if end == -1:
self._parse_error("unterminated string")
val = s[i:end]
i = end + 1
else:
# Slow path for lines with backslashes (very rare,
# performance irrelevant)
quote = c
val = ""
while 1:
if i >= len(s):
self._parse_error("unterminated string")
c = s[i]
if c == quote:
break
if c == "\\":
if i + 1 >= len(s):
self._parse_error("unterminated string")
val += s[i + 1]
i += 2
else:
val += c
i += 1
i += 1
# This is the only place where we don't survive with a
# single token of lookback: 'option env="FOO"' does not
# refer to a constant symbol named "FOO".
token = val \
if token in _STRING_LEX or \
self._tokens[0] == _T_OPTION else \
self._lookup_const_sym(val)
elif c == "&":
# Invalid characters are ignored (backwards-compatible)
if i >= len(s) or s[i] != "&":
continue
token = _T_AND
i += 1
elif c == "|":
# Invalid characters are ignored (backwards-compatible)
if i >= len(s) or s[i] != "|":
continue
token = _T_OR
i += 1
elif c == "!":
if i < len(s) and s[i] == "=":
token = _T_UNEQUAL
i += 1
else:
token = _T_NOT
elif c == "=":
token = _T_EQUAL
elif c == "(":
token = _T_OPEN_PAREN
elif c == ")":
token = _T_CLOSE_PAREN
elif c == "#":
break
# Very rare
elif c == "<":
if i < len(s) and s[i] == "=":
token = _T_LESS_EQUAL
i += 1
else:
token = _T_LESS
# Very rare
elif c == ">":
if i < len(s) and s[i] == "=":
token = _T_GREATER_EQUAL
i += 1
else:
token = _T_GREATER
else:
# Invalid characters are ignored (backwards-compatible)
continue
# Skip trailing whitespace
while i < len(s) and s[i].isspace():
i += 1
self._tokens.append(token)
# None-terminating token streams makes the token fetching functions
# simpler/faster
self._tokens.append(None)
self._tokens_i = -1
def _next_token(self):
self._tokens_i += 1
return self._tokens[self._tokens_i]
def _peek_token(self):
return self._tokens[self._tokens_i + 1]
def _check_token(self, token):
"""
If the next token is 'token', removes it and returns True.
"""
if self._tokens[self._tokens_i + 1] == token:
self._tokens_i += 1
return True
return False
#
# Parsing
#
def _make_and(self, e1, e2):
"""
Constructs an AND (&&) expression. Performs trivial simplification.
"""
if e1 is self.y:
return e2
if e2 is self.y:
return e1
if e1 is self.n or e2 is self.n:
return self.n
return (AND, e1, e2)
def _make_or(self, e1, e2):
"""
Constructs an OR (||) expression. Performs trivial simplification.
"""
if e1 is self.n:
return e2
if e2 is self.n:
return e1
if e1 is self.y or e2 is self.y:
return self.y
return (OR, e1, e2)
def _parse_block(self, end_token, parent, visible_if_deps, prev_node):
"""
Parses a block, which is the contents of either a file or an if, menu,
or choice statement.
end_token:
The token that ends the block, e.g. _T_ENDIF ("endif") for ifs. None
for files.
parent:
The parent menu node, corresponding to e.g. a menu or Choice. Can
also be a Symbol, due to automatic submenu creation from
dependencies.
visible_if_deps:
'visible if' dependencies from enclosing menus. Propagated to Symbol
and Choice prompts.
prev_node:
The previous menu node. New nodes will be added after this one (by
modifying their 'next' pointer).
prev_node is reused to parse a list of child menu nodes (for a menu
or Choice): After parsing the children, the 'next' pointer is
assigned to the 'list' pointer to "tilt up" the children above the
node.
Returns the final menu node in the block (or prev_node if the block is
empty). This allows chaining.
"""
# We might already have tokens from parsing a line to check if it's a
# property and discovering it isn't. self._has_tokens functions as a
# kind of "unget".
while self._has_tokens or self._next_line():
self._has_tokens = False
t0 = self._next_token()
if t0 is None:
continue
if t0 in (_T_CONFIG, _T_MENUCONFIG):
# The tokenizer allocates Symbol objects for us
sym = self._next_token()
node = MenuNode()
node.kconfig = self
node.item = sym
node.help = node.list = None
node.parent = parent
node.filename = self._filename
node.linenr = self._linenr
node.is_menuconfig = (t0 == _T_MENUCONFIG)
self._parse_properties(node, visible_if_deps)
sym.nodes.append(node)
self.defined_syms.append(sym)
# Tricky Python semantics: This assign prev_node.next before
# prev_node
prev_node.next = prev_node = node
elif t0 == _T_SOURCE:
values = _wordexp_expand(self._next_token())
for sourced_file in values:
self._enter_file(sourced_file)
prev_node = self._parse_block(None, # end_token
parent,
visible_if_deps,
prev_node)
self._leave_file()
elif t0 == end_token:
# We have reached the end of the block. Terminate the final
# node and return it.
prev_node.next = None
return prev_node
elif t0 == _T_IF:
node = MenuNode()
node.item = node.prompt = None
node.parent = parent
node.filename = self._filename
node.linenr = self._linenr
# See similar code in _parse_properties()
if isinstance(parent.item, Choice):
parent_dep = parent.item
else:
parent_dep = parent.dep
node.dep = self._make_and(parent_dep, self._parse_expr(True))
self._parse_block(_T_ENDIF,
node, # parent
visible_if_deps,
node) # prev_node
node.list = node.next
prev_node.next = prev_node = node
elif t0 == _T_MENU:
node = MenuNode()
node.kconfig = self
node.item = MENU
node.visibility = self.y
node.parent = parent
node.filename = self._filename
node.linenr = self._linenr
prompt = self._next_token()
self._parse_properties(node, visible_if_deps)
node.prompt = (prompt, node.dep)
self._parse_block(_T_ENDMENU,
node, # parent
self._make_and(visible_if_deps,
node.visibility),
node) # prev_node
node.list = node.next
prev_node.next = prev_node = node
elif t0 == _T_COMMENT:
node = MenuNode()
node.kconfig = self
node.item = COMMENT
node.list = None
node.parent = parent
node.filename = self._filename
node.linenr = self._linenr
prompt = self._next_token()
self._parse_properties(node, visible_if_deps)
node.prompt = (prompt, node.dep)
prev_node.next = prev_node = node
elif t0 == _T_CHOICE:
name = self._next_token()
if name is None:
choice = Choice()
self._choices.append(choice)
else:
# Named choice
choice = self.named_choices.get(name)
if not choice:
choice = Choice()
self._choices.append(choice)
choice.name = name
self.named_choices[name] = choice
choice.kconfig = self
node = MenuNode()
node.kconfig = self
node.item = choice
node.help = None
node.parent = parent
node.filename = self._filename
node.linenr = self._linenr
self._parse_properties(node, visible_if_deps)
self._parse_block(_T_ENDCHOICE,
node, # parent
visible_if_deps,
node) # prev_node
node.list = node.next
choice.nodes.append(node)
prev_node.next = prev_node = node
elif t0 == _T_MAINMENU:
self.top_node.prompt = (self._next_token(), self.y)
self.top_node.filename = self._filename
self.top_node.linenr = self._linenr
else:
self._parse_error("unrecognized construct")
# End of file reached. Terminate the final node and return it.
if end_token is not None:
raise KconfigSyntaxError("Unexpected end of file " +
self._filename)
prev_node.next = None
return prev_node
def _parse_cond(self):
"""
Parses an optional 'if <expr>' construct and returns the parsed <expr>,
or self.y if the next token is not _T_IF
"""
return self._parse_expr(True) if self._check_token(_T_IF) else self.y
def _parse_properties(self, node, visible_if_deps):
"""
Parses properties for symbols, menus, choices, and comments. Also takes
care of propagating dependencies from the menu node to the properties
of the item (this mirrors the C tools, though they do it after
parsing).
node:
The menu node we're parsing properties on. Prompt, help text,
'depends on', and 'visible if' properties apply to the Menu node,
while the others apply to the contained item.
visible_if_deps:
'visible if' dependencies from enclosing menus. Propagated to Symbol
and Choice prompts.
"""
# New properties encountered at this location. A local 'depends on'
# only applies to these, in case a symbol is defined in multiple
# locations.
prompt = None
defaults = []
selects = []
implies = []
ranges = []
# Menu node dependencies from 'depends on'. Will get propagated to the
# properties above.
node.dep = self.y
while self._next_line():
t0 = self._next_token()
if t0 is None:
continue
if t0 in _TYPE_TOKENS:
node.item.orig_type = _TOKEN_TO_TYPE[t0]
if self._peek_token() is not None:
prompt = (self._next_token(), self._parse_cond())
elif t0 == _T_DEPENDS:
if not self._check_token(_T_ON):
self._parse_error('expected "on" after "depends"')
node.dep = self._make_and(node.dep, self._parse_expr(True))
elif t0 == _T_HELP:
# Find first non-blank (not all-space) line and get its
# indentation
while 1:
line = self._next_help_line()
if not line or not line.isspace():
break
if not line:
node.help = ""
break
indent = _indentation(line)
if indent == 0:
# If the first non-empty lines has zero indent, there is no
# help text
node.help = ""
self._reuse_line = True # "Unget" the line
break
# The help text goes on till the first non-empty line with less
# indent
help_lines = [_deindent(line, indent).rstrip()]
while 1:
line = self._next_help_line()
if not line or \
(not line.isspace() and _indentation(line) < indent):
node.help = "\n".join(help_lines).rstrip() + "\n"
break
help_lines.append(_deindent(line, indent).rstrip())
if not line:
break
self._reuse_line = True # "Unget" the line
elif t0 == _T_SELECT:
if not isinstance(node.item, Symbol):
self._parse_error("only symbols can select")
selects.append((self._next_token(), self._parse_cond()))
elif t0 == _T_IMPLY:
if not isinstance(node.item, Symbol):
self._parse_error("only symbols can imply")
implies.append((self._next_token(), self._parse_cond()))
elif t0 == _T_DEFAULT:
defaults.append((self._parse_expr(False), self._parse_cond()))
elif t0 in (_T_DEF_BOOL, _T_DEF_TRISTATE):
node.item.orig_type = _TOKEN_TO_TYPE[t0]
defaults.append((self._parse_expr(False), self._parse_cond()))
elif t0 == _T_PROMPT:
# 'prompt' properties override each other within a single
# definition of a symbol, but additional prompts can be added
# by defining the symbol multiple times
prompt = (self._next_token(), self._parse_cond())
elif t0 == _T_RANGE:
ranges.append((self._next_token(),
self._next_token(),
self._parse_cond()))
elif t0 == _T_OPTION:
if self._check_token(_T_ENV):
if not self._check_token(_T_EQUAL):
self._parse_error("expected '=' after 'env'")
env_var = self._next_token()
node.item.env_var = env_var
if env_var not in os.environ:
self._warn("'option env=\"{0}\"' on symbol {1} has "
"no effect, because the environment "
"variable {0} is not set"
.format(env_var, node.item.name),
self._filename, self._linenr)
else:
defaults.append(
(self._lookup_const_sym(os.environ[env_var]),
self.y))
elif self._check_token(_T_DEFCONFIG_LIST):
if not self.defconfig_list:
self.defconfig_list = node.item
else:
self._warn("'option defconfig_list' set on multiple "
"symbols ({0} and {1}). Only {0} will be "
"used.".format(self.defconfig_list.name,
node.item.name),
self._filename, self._linenr)
elif self._check_token(_T_MODULES):
# To reduce warning spam, only warn if 'option modules' is
# set on some symbol that isn't MODULES, which should be
# safe. I haven't run into any projects that make use
# modules besides the kernel yet, and there it's likely to
# keep being called "MODULES".
if node.item is not self.modules:
self._warn("the 'modules' option is not supported. "
"Let me know if this is a problem for you, "
"as it wouldn't be that hard to implement. "
"Note that modules are supported -- "
"Kconfiglib just assumes the symbol name "
"MODULES, like older versions of the C "
"implementation did when 'option modules' "
"wasn't used.",
self._filename, self._linenr)
elif self._check_token(_T_ALLNOCONFIG_Y):
if not isinstance(node.item, Symbol):
self._parse_error("the 'allnoconfig_y' option is only "
"valid for symbols")
node.item.is_allnoconfig_y = True
else:
self._parse_error("unrecognized option")
elif t0 == _T_VISIBLE:
if not self._check_token(_T_IF):
self._parse_error('expected "if" after "visible"')
node.visibility = \
self._make_and(node.visibility, self._parse_expr(True))
elif t0 == _T_OPTIONAL:
if not isinstance(node.item, Choice):
self._parse_error('"optional" is only valid for choices')
node.item.is_optional = True
else:
self._tokens_i = -1
# Reuse the tokens for the non-property line later
self._has_tokens = True
break
# Done parsing properties. Now add the new
# prompts/defaults/selects/implies/ranges properties, with dependencies
# from node.dep propagated.
# First propagate parent dependencies to node.dep
# If the parent node holds a Choice, we use the Choice itself as the
# parent dependency. This matches the C implementation, and makes sense
# as the value (mode) of the choice limits the visibility of the
# contained choice symbols. Due to the similar interface, Choice works
# as a drop-in replacement for Symbol here.
if isinstance(node.parent.item, Choice):
node.dep = self._make_and(node.dep, node.parent.item)
else:
node.dep = self._make_and(node.dep, node.parent.dep)
if isinstance(node.item, (Symbol, Choice)):
if isinstance(node.item, Symbol):
# See the class documentation
node.item.direct_dep = \
self._make_or(node.item.direct_dep, node.dep)
# Set the prompt, with dependencies propagated
if prompt:
node.prompt = (prompt[0],
self._make_and(self._make_and(prompt[1],
node.dep),
visible_if_deps))
else:
node.prompt = None
# Add the new defaults, with dependencies propagated
for val_expr, cond in defaults:
node.item.defaults.append(
(val_expr, self._make_and(cond, node.dep)))
# Add the new ranges, with dependencies propagated
for low, high, cond in ranges:
node.item.ranges.append(
(low, high, self._make_and(cond, node.dep)))
# Handle selects
for target, cond in selects:
# Only stored for inspection. Not used during evaluation.
node.item.selects.append(
(target, self._make_and(cond, node.dep)))
# Modify the dependencies of the selected symbol
target.rev_dep = \
self._make_or(target.rev_dep,
self._make_and(node.item,
self._make_and(cond,
node.dep)))
# Handle implies
for target, cond in implies:
# Only stored for inspection. Not used during evaluation.
node.item.implies.append(
(target, self._make_and(cond, node.dep)))
# Modify the dependencies of the implied symbol
target.weak_rev_dep = \
self._make_or(target.weak_rev_dep,
self._make_and(node.item,
self._make_and(cond,
node.dep)))
def _parse_expr(self, transform_m):
"""
Parses an expression from the tokens in Kconfig._tokens using a simple
top-down approach. See the module docs for the expression format.
transform_m:
True if m should be rewritten to m && MODULES. See the
Kconfig.eval_string() documentation.
"""
# Grammar:
#
# expr: and_expr ['||' expr]
# and_expr: factor ['&&' and_expr]
# factor: <symbol> ['='/'!='/'<'/... <symbol>]
# '!' factor
# '(' expr ')'
#
# It helps to think of the 'expr: and_expr' case as a single-operand OR
# (no ||), and of the 'and_expr: factor' case as a single-operand AND
# (no &&). Parsing code is always a bit tricky.
# Mind dump: parse_factor() and two nested loops for OR and AND would
# work as well. The straightforward implementation there gives a
# (op, (op, (op, A, B), C), D) parse for A op B op C op D. Representing
# expressions as (op, [list of operands]) instead goes nicely with that
# version, but is wasteful for short expressions and complicates
# expression evaluation and other code that works on expressions (more
# complicated code likely offsets any performance gain from less
# recursion too). If we also try to optimize the list representation by
# merging lists when possible (e.g. when ANDing two AND expressions),
# we end up allocating a ton of lists instead of reusing expressions,
# which is bad.
and_expr = self._parse_and_expr(transform_m)
# Return 'and_expr' directly if we have a "single-operand" OR.
# Otherwise, parse the expression on the right and make an OR node.
# This turns A || B || C || D into (OR, A, (OR, B, (OR, C, D))).
return and_expr \
if not self._check_token(_T_OR) else \
(OR, and_expr, self._parse_expr(transform_m))
def _parse_and_expr(self, transform_m):
factor = self._parse_factor(transform_m)
# Return 'factor' directly if we have a "single-operand" AND.
# Otherwise, parse the right operand and make an AND node. This turns
# A && B && C && D into (AND, A, (AND, B, (AND, C, D))).
return factor \
if not self._check_token(_T_AND) else \
(AND, factor, self._parse_and_expr(transform_m))
def _parse_factor(self, transform_m):
token = self._next_token()
if isinstance(token, Symbol):
# Plain symbol or relation
next_token = self._peek_token()
if next_token not in _TOKEN_TO_REL:
# Plain symbol
# For conditional expressions ('depends on <expr>',
# '... if <expr>', etc.), m is rewritten to m && MODULES.
if transform_m and token is self.m:
return (AND, self.m, self.modules)
return token
# Relation
return (_TOKEN_TO_REL[self._next_token()], token,
self._next_token())
if token == _T_NOT:
return (NOT, self._parse_factor(transform_m))
if token == _T_OPEN_PAREN:
expr_parse = self._parse_expr(transform_m)
if not self._check_token(_T_CLOSE_PAREN):
self._parse_error("missing end parenthesis")
return expr_parse
self._parse_error("malformed expression")
#
# Caching and invalidation
#
def _build_dep(self):
"""
Populates the Symbol/Choice._dependents sets, which contain all other
items (symbols and choices) that immediately depend on the item in the
sense that changing the value of the item might affect the value of the
dependent items. This is used for caching/invalidation.
The calculated sets might be larger than necessary as we don't do any
complex analysis of the expressions.
"""
# Only calculate _dependents for defined symbols. Constant and
# undefined symbols could theoretically be selected/implied, but it
# wouldn't change their value, so it's not a true dependency.
for sym in self.defined_syms:
# Symbols depend on the following:
# The prompt conditions
for node in sym.nodes:
if node.prompt:
_make_depend_on(sym, node.prompt[1])
# The default values and their conditions
for value, cond in sym.defaults:
_make_depend_on(sym, value)
_make_depend_on(sym, cond)
# The reverse and weak reverse dependencies
_make_depend_on(sym, sym.rev_dep)
_make_depend_on(sym, sym.weak_rev_dep)
# The ranges along with their conditions
for low, high, cond in sym.ranges:
_make_depend_on(sym, low)
_make_depend_on(sym, high)
_make_depend_on(sym, cond)
# The direct dependencies. This is usually redundant, as the direct
# dependencies get propagated to properties, but it's needed to get
# invalidation solid for 'imply', which only checks the direct
# dependencies (even if there are no properties to propagate it
# to).
_make_depend_on(sym, sym.direct_dep)
# In addition to the above, choice symbols depend on the choice
# they're in, but that's handled automatically since the Choice is
# propagated to the conditions of the properties before
# _build_dep() runs.
for choice in self._choices:
# Choices depend on the following:
# The prompt conditions
for node in choice.nodes:
if node.prompt:
_make_depend_on(choice, node.prompt[1])
# The default symbol conditions
for _, cond in choice.defaults:
_make_depend_on(choice, cond)
# The choice symbols themselves, because the y mode selection might
# change if a choice symbol's visibility changes
for sym in choice.syms:
# the default selection depends on the symbols
sym._dependents.add(choice)
def _invalidate_all(self):
# Undefined symbols never change value and don't need to be
# invalidated, so we can just iterate over defined symbols.
# Invalidating constant symbols would break things horribly.
for sym in self.defined_syms:
sym._invalidate()
for choice in self._choices:
choice._invalidate()
#
# Misc.
#
def _expand_syms(self, s):
"""
Expands $-references to symbols in 's' to symbol values, or to the
empty string for undefined symbols.
"""
while 1:
sym_ref_match = _sym_ref_re_search(s)
if not sym_ref_match:
return s
sym = self.syms.get(sym_ref_match.group(1))
s = s[:sym_ref_match.start()] + \
(sym.str_value if sym else "") + \
s[sym_ref_match.end():]
def _parse_error(self, msg):
if self._filename is None:
loc = ""
else:
loc = "{}:{}: ".format(self._filename, self._linenr)
raise KconfigSyntaxError(
"{}Couldn't parse '{}': {}".format(loc, self._line.rstrip(), msg))
def _warn(self, msg, filename=None, linenr=None):
"""
For printing general warnings.
"""
if self._print_warnings:
_stderr_msg("warning: " + msg, filename, linenr)
def _warn_undef_assign(self, msg, filename=None, linenr=None):
"""
See the class documentation.
"""
if self._print_undef_assign:
_stderr_msg("warning: " + msg, filename, linenr)
def _warn_undef_assign_load(self, name, val, filename, linenr):
"""
Special version for load_config().
"""
self._warn_undef_assign(
'attempt to assign the value "{}" to the undefined symbol {}' \
.format(val, name), filename, linenr)
class Symbol(object):
"""
Represents a configuration symbol:
(menu)config FOO
...
The following attributes are available. They should be viewed as read-only,
and some are implemented through @property magic (but are still efficient
to access due to internal caching).
Note: Prompts, help texts, and locations are stored in the Symbol's
MenuNode(s) rather than in the Symbol itself. Check the MenuNode class and
the Symbol.nodes attribute. This organization matches the C tools.
name:
The name of the symbol, e.g. "FOO" for 'config FOO'.
type:
The type of the symbol. One of BOOL, TRISTATE, STRING, INT, HEX, UNKNOWN.
UNKNOWN is for undefined symbols, (non-special) constant symbols, and
symbols defined without a type.
When running without modules (MODULES having the value n), TRISTATE
symbols magically change type to BOOL. This also happens for symbols
within choices in "y" mode. This matches the C tools, and makes sense for
menuconfig-like functionality.
orig_type:
The type as given in the Kconfig file, without any magic applied. Used
when printing the symbol.
str_value:
The value of the symbol as a string. Gives the value for string/int/hex
symbols. For bool/tristate symbols, gives "n", "m", or "y".
This is the symbol value that's used in relational expressions
(A = B, A != B, etc.)
Gotcha: For int/hex symbols, the exact format of the value must often be
preserved (e.g., when writing a .config file), hence why you can't get it
directly as an int. Do int(int_sym.str_value) or
int(hex_sym.str_value, 16) to get the integer value.
tri_value:
The tristate value of the symbol as an integer. One of 0, 1, 2,
representing n, m, y. Always 0 (n) for non-bool/tristate symbols.
This is the symbol value that's used outside of relation expressions
(A, !A, A && B, A || B).
assignable:
A tuple containing the tristate user values that can currently be
assigned to the symbol (that would be respected), ordered from lowest (0,
representing n) to highest (2, representing y). This corresponds to the
selections available in the menuconfig interface. The set of assignable
values is calculated from the symbol's visibility and selects/implies.
Returns the empty set for non-bool/tristate symbols and for symbols with
visibility n. The other possible values are (0, 2), (0, 1, 2), (1, 2),
(1,), and (2,). A (1,) or (2,) result means the symbol is visible but
"locked" to m or y through a select, perhaps in combination with the
visibility. menuconfig represents this as -M- and -*-, respectively.
For string/hex/int symbols, check if Symbol.visibility is non-0 (non-n)
instead to determine if the value can be changed.
Some handy 'assignable' idioms:
# Is 'sym' an assignable (visible) bool/tristate symbol?
if sym.assignable:
# What's the highest value it can be assigned? [-1] in Python
# gives the last element.
sym_high = sym.assignable[-1]
# The lowest?
sym_low = sym.assignable[0]
# Can the symbol be set to at least m?
if sym.assignable[-1] >= 1:
...
# Can the symbol be set to m?
if 1 in sym.assignable:
...
visibility:
The visibility of the symbol. One of 0, 1, 2, representing n, m, y. See
the module documentation for an overview of symbol values and visibility.
user_value:
The user value of the symbol. None if no user value has been assigned
(via Kconfig.load_config() or Symbol.set_value()).
Holds 0, 1, or 2 for bool/tristate symbols, and a string for the other
symbol types.
WARNING: Do not assign directly to this. It will break things. Use
Symbol.set_value().
config_string:
The .config assignment string that would get written out for the symbol
by Kconfig.write_config(). None if no .config assignment would get
written out. In general, visible symbols, symbols with (active) defaults,
and selected symbols get written out.
nodes:
A list of MenuNodes for this symbol. Will contain a single MenuNode for
most symbols. Undefined and constant symbols have an empty nodes list.
Symbols defined in multiple locations get one node for each location.
choice:
Holds the parent Choice for choice symbols, and None for non-choice
symbols. Doubles as a flag for whether a symbol is a choice symbol.
defaults:
List of (default, cond) tuples for the symbol's 'default' properties. For
example, 'default A && B if C || D' is represented as
((AND, A, B), (OR, C, D)). If no condition was given, 'cond' is
self.kconfig.y.
Note that 'depends on' and parent dependencies are propagated to
'default' conditions.
selects:
List of (symbol, cond) tuples for the symbol's 'select' properties. For
example, 'select A if B && C' is represented as (A, (AND, B, C)). If no
condition was given, 'cond' is self.kconfig.y.
Note that 'depends on' and parent dependencies are propagated to 'select'
conditions.
implies:
Like 'selects', for imply.
ranges:
List of (low, high, cond) tuples for the symbol's 'range' properties. For
example, 'range 1 2 if A' is represented as (1, 2, A). If there is no
condition, 'cond' is self.config.y.
Note that 'depends on' and parent dependencies are propagated to 'range'
conditions.
Gotcha: 1 and 2 above will be represented as (undefined) Symbols rather
than plain integers. Undefined symbols get their name as their string
value, so this works out. The C tools work the same way.
rev_dep:
Reverse dependency expression from other symbols selecting this symbol.
Multiple selections get ORed together. A condition on a select is ANDed
with the selecting symbol.
For example, if A has 'select FOO' and B has 'select FOO if C', then
FOO's rev_dep will be (OR, A, (AND, B, C)).
weak_rev_dep:
Like rev_dep, for imply.
direct_dep:
The 'depends on' dependencies. If a symbol is defined in multiple
locations, the dependencies at each location are ORed together.
Internally, this is only used to implement 'imply', which only applies if
the implied symbol has expr_value(self.direct_dep) != 0. 'depends on' and
parent dependencies are automatically propagated to the conditions of
properties, so normally it's redundant to check the direct dependencies.
env_var:
If the Symbol has an 'option env="FOO"' option, this contains the name
("FOO") of the environment variable. None for symbols that aren't set
from the environment.
'option env="FOO"' acts as a 'default' property whose value is the value
of $FOO.
env_var is set to "<uname release>" for the predefined symbol
UNAME_RELEASE, which holds the 'release' field from uname.
Symbols with an 'option env' option are never written out to .config
files, even if they are visible. env_var corresponds to a flag called
SYMBOL_AUTO in the C implementation.
is_allnoconfig_y:
True if the symbol has 'option allnoconfig_y' set on it. This has no
effect internally (except when printing symbols), but can be checked by
scripts.
is_constant:
True if the symbol is a constant (quoted) symbol.
kconfig:
The Kconfig instance this symbol is from.
"""
__slots__ = (
"_cached_assignable",
"_cached_str_val",
"_cached_tri_val",
"_cached_vis",
"_dependents",
"_was_set",
"_write_to_conf",
"choice",
"defaults",
"direct_dep",
"env_var",
"implies",
"is_allnoconfig_y",
"is_constant",
"kconfig",
"name",
"nodes",
"orig_type",
"ranges",
"rev_dep",
"selects",
"user_value",
"weak_rev_dep",
)
#
# Public interface
#
@property
def type(self):
"""
See the class documentation.
"""
if self.orig_type == TRISTATE and \
((self.choice and self.choice.tri_value == 2) or
not self.kconfig.modules.tri_value):
return BOOL
return self.orig_type
@property
def str_value(self):
"""
See the class documentation.
"""
if self._cached_str_val is not None:
return self._cached_str_val
if self.orig_type in (BOOL, TRISTATE):
# Also calculates the visibility, so invalidation safe
self._cached_str_val = TRI_TO_STR[self.tri_value]
return self._cached_str_val
# As a quirk of Kconfig, undefined symbols get their name as their
# string value. This is why things like "FOO = bar" work for seeing if
# FOO has the value "bar".
if self.orig_type == UNKNOWN:
self._cached_str_val = self.name
return self.name
val = ""
# Warning: See Symbol._rec_invalidate(), and note that this is a hidden
# function call (property magic)
vis = self.visibility
self._write_to_conf = (vis != 0)
if self.orig_type in (INT, HEX):
# The C implementation checks the user value against the range in a
# separate code path (post-processing after loading a .config).
# Checking all values here instead makes more sense for us. It
# requires that we check for a range first.
base = _TYPE_TO_BASE[self.orig_type]
# Check if a range is in effect
low, high = self.active_range
if vis and self.user_value is not None and \
_is_base_n(self.user_value, base) and \
(low is None or
low <= int(self.user_value, base) <= high):
# If the user value is well-formed and satisfies range
# contraints, it is stored in exactly the same form as
# specified in the assignment (with or without "0x", etc.)
val = self.user_value
else:
# No user value or invalid user value. Look at defaults.
for val_expr, cond in self.defaults:
if expr_value(cond):
self._write_to_conf = True
val = val_expr.str_value
if _is_base_n(val, base):
val_num = int(val, base)
else:
val_num = 0 # strtoll() on empty string
break
else:
val_num = 0 # strtoll() on empty string
# This clamping procedure runs even if there's no default
if low is not None:
clamp = None
if val_num < low:
clamp = low
elif val_num > high:
clamp = high
if clamp is not None:
# The value is rewritten to a standard form if it is
# clamped
val = str(clamp) \
if self.orig_type == INT else \
hex(clamp)
elif self.orig_type == STRING:
if vis and self.user_value is not None:
# If the symbol is visible and has a user value, use that
val = self.user_value
else:
# Otherwise, look at defaults
for val_expr, cond in self.defaults:
if expr_value(cond):
self._write_to_conf = True
val = val_expr.str_value
break
# Corresponds to SYMBOL_AUTO in the C implementation
if self.env_var is not None:
self._write_to_conf = False
self._cached_str_val = val
return val
@property
def tri_value(self):
"""
See the class documentation.
"""
if self._cached_tri_val is not None:
return self._cached_tri_val
if self.orig_type not in (BOOL, TRISTATE):
self._cached_tri_val = 0
return self._cached_tri_val
val = 0
# Warning: See Symbol._rec_invalidate(), and note that this is a hidden
# function call (property magic)
vis = self.visibility
self._write_to_conf = (vis != 0)
if not self.choice:
# Non-choice symbol
if vis and self.user_value is not None:
# If the symbol is visible and has a user value, use that
val = min(self.user_value, vis)
else:
# Otherwise, look at defaults and weak reverse dependencies
# (implies)
for default, cond in self.defaults:
cond_val = expr_value(cond)
if cond_val:
val = min(expr_value(default), cond_val)
self._write_to_conf = True
break
# Weak reverse dependencies are only considered if our
# direct dependencies are met
weak_rev_dep_val = expr_value(self.weak_rev_dep)
if weak_rev_dep_val and expr_value(self.direct_dep):
val = max(weak_rev_dep_val, val)
self._write_to_conf = True
# Reverse (select-related) dependencies take precedence
rev_dep_val = expr_value(self.rev_dep)
if rev_dep_val:
val = max(rev_dep_val, val)
self._write_to_conf = True
# m is promoted to y for (1) bool symbols and (2) symbols with a
# weak_rev_dep (from imply) of y
if val == 1 and \
(self.type == BOOL or expr_value(self.weak_rev_dep) == 2):
val = 2
elif vis == 2:
# Visible choice symbol in y-mode choice. The choice mode limits
# the visibility of choice symbols, so it's sufficient to just
# check the visibility of the choice symbols themselves.
val = 2 if self.choice.selection is self else 0
elif vis and self.user_value:
# Visible choice symbol in m-mode choice, with set non-0 user value
val = 1
self._cached_tri_val = val
return val
@property
def assignable(self):
"""
See the class documentation.
"""
if self._cached_assignable is not None:
return self._cached_assignable
self._cached_assignable = self._get_assignable()
return self._cached_assignable
@property
def visibility(self):
"""
See the class documentation.
"""
if self._cached_vis is not None:
return self._cached_vis
self._cached_vis = _get_visibility(self)
return self._cached_vis
@property
def config_string(self):
"""
See the class documentation.
"""
# Note: _write_to_conf is determined when the value is calculated. This
# is a hidden function call due to property magic.
val = self.str_value
if not self._write_to_conf:
return None
if self.orig_type in (BOOL, TRISTATE):
return "{}{}={}\n" \
.format(self.kconfig.config_prefix, self.name, val) \
if val != "n" else \
"# {}{} is not set\n" \
.format(self.kconfig.config_prefix, self.name)
if self.orig_type in (INT, HEX):
return "{}{}={}\n" \
.format(self.kconfig.config_prefix, self.name, val)
if self.orig_type == STRING:
# Escape \ and "
return '{}{}="{}"\n' \
.format(self.kconfig.config_prefix, self.name, escape(val))
_internal_error("Internal error while creating .config: unknown "
'type "{}".'.format(self.orig_type))
def set_value(self, value):
"""
Sets the user value of the symbol.
Equal in effect to assigning the value to the symbol within a .config
file. For bool and tristate symbols, use the 'assignable' attribute to
check which values can currently be assigned. Setting values outside
'assignable' will cause Symbol.user_str/tri_value to differ from
Symbol.str/tri_value (be truncated down or up).
Setting a choice symbol to 2 (y) only updates Choice.user_selection on
the parent choice and not Symbol.user_value itself. This gives the
expected behavior when a choice is switched between different modes.
Choice.user_selection is considered when the choice is in y mode (the
"normal" mode).
Other symbols that depend (possibly indirectly) on this symbol are
automatically recalculated to reflect the assigned value.
value:
The user value to give to the symbol. For bool and tristate symbols,
pass 0, 1, 2 for n, m, and y, respectively. For other symbol types,
pass a string.
Values that are invalid for the type (such as "foo" or 1 (m) for a
BOOL) are ignored and won't be stored in Symbol.user_str/tri_value.
Kconfiglib will print a warning by default for invalid assignments,
and set_value() will return False.
Returns True if the value is valid for the type of the symbol, and
False otherwise. This only looks at the form of the value. For BOOL and
TRISTATE symbols, check the Symbol.assignable attribute to see what
values are currently in range and would actually be reflected in the
value of the symbol. For other symbol types, check whether the
visibility is non-n.
"""
if value == self.user_value:
# We know the value must be valid if it was successfully set
# previously
self._was_set = True
return True
# Check if the value is valid for our type
if not ((self.orig_type == BOOL and value in (0, 2) ) or
(self.orig_type == TRISTATE and value in (0, 1, 2) ) or
(self.orig_type == STRING and isinstance(value, str)) or
(self.orig_type == INT and isinstance(value, str)
and _is_base_n(value, 10) ) or
(self.orig_type == HEX and isinstance(value, str)
and _is_base_n(value, 16)
and int(value, 16) >= 0)):
# Display tristate values as n, m, y in the warning
warning = "the value {} is invalid for {}, which has type {}" \
.format(TRI_TO_STR[value] if value in (0, 1, 2) else
"'{}'".format(value),
self.name, TYPE_TO_STR[self.orig_type])
if self.orig_type in (BOOL, TRISTATE) and value in ("n", "m", "y"):
warning += ' (pass 0, 1, 2 for n, m, y, respectively)'
self.kconfig._warn(warning)
return False
if self.env_var is not None:
self.kconfig._warn("ignored attempt to assign user value to "
"{}, which gets its value from the environment"
.format(self.name))
return False
if self.choice and value == 2:
# Remember this as a choice selection only. Makes switching back
# and forth between choice modes work as expected, and makes the
# check for whether the user value is the same as before above
# safe.
self.choice.user_selection = self
self.choice._was_set = True
if self._is_user_assignable():
self.choice._rec_invalidate()
else:
self.user_value = value
self._was_set = True
if self._is_user_assignable():
self._rec_invalidate()
return True
def unset_value(self):
"""
Resets the user value of the symbol, as if the symbol had never gotten
a user value via Kconfig.load_config() or Symbol.set_value().
"""
if self.user_value is not None:
self.user_value = None
if self._is_user_assignable():
self._rec_invalidate()
@property
def active_range(self):
"""
Returns a tuple of (low, high) integer values if a range
limit is active for this symbol, or (None, None) if no range
limit exists.
"""
base = _TYPE_TO_BASE[self.orig_type]
for low_expr, high_expr, cond in self.ranges:
if expr_value(cond):
# The zeros are from the C implementation running strtoll()
# on empty strings
low = int(low_expr.str_value, base) if \
_is_base_n(low_expr.str_value, base) else 0
high = int(high_expr.str_value, base) if \
_is_base_n(high_expr.str_value, base) else 0
return (low, high)
return (None, None)
def __repr__(self):
"""
Returns a string with information about the symbol (including its name,
value, visibility, and location(s)) when it is evaluated on e.g. the
interactive Python prompt.
"""
fields = []
fields.append("symbol " + self.name)
fields.append(TYPE_TO_STR[self.type])
for node in self.nodes:
if node.prompt:
fields.append('"{}"'.format(node.prompt[0]))
# Only add quotes for non-bool/tristate symbols
fields.append("value " +
(self.str_value
if self.orig_type in (BOOL, TRISTATE) else
'"{}"'.format(self.str_value)))
if not self.is_constant:
# These aren't helpful to show for constant symbols
if self.user_value is not None:
# Only add quotes for non-bool/tristate symbols
fields.append("user value " +
(TRI_TO_STR[self.user_value]
if self.orig_type in (BOOL, TRISTATE) else
'"{}"'.format(self.user_value)))
fields.append("visibility " + TRI_TO_STR[self.visibility])
if self.choice:
fields.append("choice symbol")
if self.is_allnoconfig_y:
fields.append("allnoconfig_y")
if self is self.kconfig.defconfig_list:
fields.append("is the defconfig_list symbol")
if self.env_var is not None:
fields.append("from environment variable " + self.env_var)
if self is self.kconfig.modules:
fields.append("is the modules symbol")
fields.append("direct deps " +
TRI_TO_STR[expr_value(self.direct_dep)])
if self.nodes:
for node in self.nodes:
fields.append("{}:{}".format(node.filename, node.linenr))
else:
if self.is_constant:
fields.append("constant")
else:
fields.append("undefined")
return "<{}>".format(", ".join(fields))
def __str__(self):
"""
Returns a string representation of the symbol when it is printed,
matching the Kconfig format. Prompts and help texts are included,
though they really belong to the symbol's menu nodes rather than the
symbol itself.
The output is designed so that feeding it back to a Kconfig parser
redefines the symbol as is. This also works for symbols defined in
multiple locations, where all the definitions are output. See the
module documentation for a small gotcha related to choice symbols.
An empty string is returned for undefined and constant symbols.
"""
return _sym_choice_str(self)
#
# Private methods
#
def __init__(self):
"""
Symbol constructor -- not intended to be called directly by Kconfiglib
clients.
"""
# These attributes are always set on the instance from outside and
# don't need defaults:
# kconfig
# direct_dep
# is_constant
# name
# rev_dep
# weak_rev_dep
self.orig_type = UNKNOWN
self.defaults = []
self.selects = []
self.implies = []
self.ranges = []
self.nodes = []
self.user_value = \
self.choice = \
self.env_var = \
self._cached_str_val = self._cached_tri_val = self._cached_vis = \
self._cached_assignable = None
# _write_to_conf is calculated along with the value. If True, the
# Symbol gets a .config entry.
self.is_allnoconfig_y = \
self._was_set = \
self._write_to_conf = False
# See Kconfig._build_dep()
self._dependents = set()
def _get_assignable(self):
"""
Worker function for the 'assignable' attribute.
"""
if self.orig_type not in (BOOL, TRISTATE):
return ()
# Warning: See Symbol._rec_invalidate(), and note that this is a hidden
# function call (property magic)
vis = self.visibility
if not vis:
return ()
rev_dep_val = expr_value(self.rev_dep)
if vis == 2:
if self.choice:
return (2,)
if not rev_dep_val:
if self.type == BOOL or expr_value(self.weak_rev_dep) == 2:
return (0, 2)
return (0, 1, 2)
if rev_dep_val == 2:
return (2,)
# rev_dep_val == 1
if self.type == BOOL or expr_value(self.weak_rev_dep) == 2:
return (2,)
return (1, 2)
# vis == 1
# Must be a tristate here, because bool m visibility gets promoted to y
if not rev_dep_val:
return (0, 1) if expr_value(self.weak_rev_dep) != 2 else (0, 2)
if rev_dep_val == 2:
return (2,)
# vis == rev_dep_val == 1
return (1,)
def _is_user_assignable(self):
"""
Returns True if the symbol has a prompt, meaning a user value might
have an effect on it. Used as an optimization to skip invalidation when
promptless symbols are assigned to (given a user value).
Prints a warning if the symbol has no prompt. In some contexts (e.g.
when loading a .config files) assignments to promptless symbols are
normal and expected, so the warning can be disabled.
"""
for node in self.nodes:
if node.prompt:
return True
if self.kconfig._warn_no_prompt:
self.kconfig._warn(self.name + " has no prompt, meaning user "
"values have no effect on it")
return False
def _invalidate(self):
"""
Marks the symbol as needing to be recalculated.
"""
self._cached_str_val = self._cached_tri_val = self._cached_vis = \
self._cached_assignable = None
def _rec_invalidate(self):
"""
Invalidates the symbol and all items that (possibly) depend on it.
"""
if self is self.kconfig.modules:
# Invalidating MODULES has wide-ranging effects
self.kconfig._invalidate_all()
else:
self._invalidate()
for item in self._dependents:
# _cached_vis doubles as a flag that tells us whether 'item'
# has cached values, because it's calculated as a side effect
# of calculating all other (non-constant) cached values.
#
# If item._cached_vis is None, it means there can't be cached
# values on other items that depend on 'item', because if there
# were, some value on 'item' would have been calculated and
# item._cached_vis set as a side effect. It's therefore safe to
# stop the invalidation at symbols with _cached_vis None.
#
# This approach massively speeds up scripts that set a lot of
# values, vs simply invalidating all possibly dependent symbols
# (even when you already have a list of all the dependent
# symbols, because some symbols get huge dependency trees).
#
# This gracefully handles dependency loops too, which is nice
# for choices, where the choice depends on the choice symbols
# and vice versa.
if item._cached_vis is not None:
item._rec_invalidate()
class Choice(object):
"""
Represents a choice statement:
choice
...
endchoice
The following attributes are available on Choice instances. They should be
treated as read-only, and some are implemented through @property magic (but
are still efficient to access due to internal caching).
Note: Prompts, help texts, and locations are stored in the Choice's
MenuNode(s) rather than in the Choice itself. Check the MenuNode class and
the Choice.nodes attribute. This organization matches the C tools.
name:
The name of the choice, e.g. "FOO" for 'choice FOO', or None if the
Choice has no name. I can't remember ever seeing named choices in
practice, but the C tools support them too.
type:
The type of the choice. One of BOOL, TRISTATE, UNKNOWN. UNKNOWN is for
choices defined without a type where none of the contained symbols have a
type either (otherwise the choice inherits the type of the first symbol
defined with a type).
When running without modules (CONFIG_MODULES=n), TRISTATE choices
magically change type to BOOL. This matches the C tools, and makes sense
for menuconfig-like functionality.
orig_type:
The type as given in the Kconfig file, without any magic applied. Used
when printing the choice.
tri_value:
The tristate value (mode) of the choice. A choice can be in one of three
modes:
0 (n) - The choice is disabled and no symbols can be selected. For
visible choices, this mode is only possible for choices with
the 'optional' flag set (see kconfig-language.txt).
1 (m) - Any number of choice symbols can be set to m, the rest will
be n.
2 (y) - One symbol will be y, the rest n.
Only tristate choices can be in m mode. The visibility of the choice is
an upper bound on the mode, and the mode in turn is an upper bound on the
visibility of the choice symbols.
To change the mode, use Choice.set_value().
Implementation note:
The C tools internally represent choices as a type of symbol, with
special-casing in many code paths. This is why there is a lot of
similarity to Symbol. The value (mode) of a choice is really just a
normal symbol value, and an implicit reverse dependency forces its
lower bound to m for visible non-optional choices (the reverse
dependency is 'm && <visibility>').
Symbols within choices get the choice propagated as a dependency to
their properties. This turns the mode of the choice into an upper bound
on e.g. the visibility of choice symbols, and explains the gotcha
related to printing choice symbols mentioned in the module docstring.
Kconfiglib uses a separate Choice class only because it makes the code
and interface less confusing (especially in a user-facing interface).
Corresponding attributes have the same name in the Symbol and Choice
classes, for consistency and compatibility.
assignable:
See the symbol class documentation. Gives the assignable values (modes).
visibility:
See the Symbol class documentation. Acts on the value (mode).
selection:
The Symbol instance of the currently selected symbol. None if the Choice
is not in y mode or has no selected symbol (due to unsatisfied
dependencies on choice symbols).
WARNING: Do not assign directly to this. It will break things. Call
sym.set_value(2) on the choice symbol you want to select instead.
user_value:
The value (mode) selected by the user through Choice.set_value(). Either
0, 1, or 2, or None if the user hasn't selected a mode. See
Symbol.user_value.
WARNING: Do not assign directly to this. It will break things. Use
Choice.set_value() instead.
user_selection:
The symbol selected by the user (by setting it to y). Ignored if the
choice is not in y mode, but still remembered so that the choice "snaps
back" to the user selection if the mode is changed back to y. This might
differ from 'selection' due to unsatisfied dependencies.
WARNING: Do not assign directly to this. It will break things. Call
sym.set_value(2) on the choice symbol to be selected instead.
syms:
List of symbols contained in the choice.
Gotcha: If a symbol depends on the previous symbol within a choice so
that an implicit menu is created, it won't be a choice symbol, and won't
be included in 'syms'. There are real-world examples of this, and it was
a PITA to support in older versions of Kconfiglib that didn't implement
the menu structure.
nodes:
A list of MenuNodes for this choice. In practice, the list will probably
always contain a single MenuNode, but it is possible to give a choice a
name and define it in multiple locations (i've never even seen a named
choice though).
defaults:
List of (symbol, cond) tuples for the choice's 'defaults' properties. For
example, 'default A if B && C' is represented as (A, (AND, B, C)). If
there is no condition, 'cond' is self.config.y.
Note that 'depends on' and parent dependencies are propagated to
'default' conditions.
is_optional:
True if the choice has the 'optional' flag set on it and can be in
n mode.
kconfig:
The Kconfig instance this choice is from.
"""
__slots__ = (
"_cached_assignable",
"_cached_selection",
"_cached_vis",
"_dependents",
"_was_set",
"defaults",
"is_constant",
"is_optional",
"kconfig",
"name",
"nodes",
"orig_type",
"syms",
"user_selection",
"user_value",
)
#
# Public interface
#
@property
def type(self):
"""
Returns the type of the choice. See Symbol.type.
"""
if self.orig_type == TRISTATE and not self.kconfig.modules.tri_value:
return BOOL
return self.orig_type
@property
def str_value(self):
"""
See the class documentation.
"""
return TRI_TO_STR[self.tri_value]
@property
def tri_value(self):
"""
See the class documentation.
"""
# This emulates a reverse dependency of 'm && visibility' for
# non-optional choices, which is how the C implementation does it
val = 0 if self.is_optional else 1
if self.user_value is not None:
val = max(val, self.user_value)
# Warning: See Symbol._rec_invalidate(), and note that this is a hidden
# function call (property magic)
val = min(val, self.visibility)
# Promote m to y for boolean choices
return 2 if val == 1 and self.type == BOOL else val
@property
def assignable(self):
"""
See the class documentation.
"""
if self._cached_assignable is not None:
return self._cached_assignable
self._cached_assignable = self._get_assignable()
return self._cached_assignable
@property
def visibility(self):
"""
See the class documentation.
"""
if self._cached_vis is not None:
return self._cached_vis
self._cached_vis = _get_visibility(self)
return self._cached_vis
@property
def selection(self):
"""
See the class documentation.
"""
if self._cached_selection is not _NO_CACHED_SELECTION:
return self._cached_selection
self._cached_selection = self._get_selection()
return self._cached_selection
def set_value(self, value):
"""
Sets the user value (mode) of the choice. Like for Symbol.set_value(),
the visibility might truncate the value. Choices without the 'optional'
attribute (is_optional) can never be in n mode, but 0 is still accepted
since it's not a malformed value (though it will have no effect).
Returns True if the value is valid for the type of the choice, and
False otherwise. This only looks at the form of the value. Check the
Choice.assignable attribute to see what values are currently in range
and would actually be reflected in the mode of the choice.
"""
if value == self.user_value:
# We know the value must be valid if it was successfully set
# previously
self._was_set = True
return True
if not ((self.orig_type == BOOL and value in (0, 2) ) or
(self.orig_type == TRISTATE and value in (0, 1, 2))):
self.kconfig._warn("the value '{}' is invalid for the choice, "
"which has type {}. Assignment ignored"
.format(value, TYPE_TO_STR[self.orig_type]))
return False
self.user_value = value
self._was_set = True
self._rec_invalidate()
return True
def unset_value(self):
"""
Resets the user value (mode) and user selection of the Choice, as if
the user had never touched the mode or any of the choice symbols.
"""
if self.user_value is not None or self.user_selection:
self.user_value = self.user_selection = None
self._rec_invalidate()
def __repr__(self):
"""
Returns a string with information about the choice when it is evaluated
on e.g. the interactive Python prompt.
"""
fields = []
fields.append("choice" if self.name is None else \
"choice " + self.name)
fields.append(TYPE_TO_STR[self.type])
for node in self.nodes:
if node.prompt:
fields.append('"{}"'.format(node.prompt[0]))
fields.append("mode " + self.str_value)
if self.user_value is not None:
fields.append('user mode {}'.format(TRI_TO_STR[self.user_value]))
if self.selection:
fields.append("{} selected".format(self.selection.name))
if self.user_selection:
user_sel_str = "{} selected by user" \
.format(self.user_selection.name)
if self.selection is not self.user_selection:
user_sel_str += " (overridden)"
fields.append(user_sel_str)
fields.append("visibility " + TRI_TO_STR[self.visibility])
if self.is_optional:
fields.append("optional")
for node in self.nodes:
fields.append("{}:{}".format(node.filename, node.linenr))
return "<{}>".format(", ".join(fields))
def __str__(self):
"""
Returns a string representation of the choice when it is printed,
matching the Kconfig format (though without the contained choice
symbols). Prompts and help texts are included, though they really
belong to the choice's menu nodes rather than the choice itself.
See Symbol.__str__() as well.
"""
return _sym_choice_str(self)
#
# Private methods
#
def __init__(self):
"""
Choice constructor -- not intended to be called directly by Kconfiglib
clients.
"""
# These attributes are always set on the instance from outside and
# don't need defaults:
# kconfig
self.orig_type = UNKNOWN
self.syms = []
self.defaults = []
self.nodes = []
self.name = \
self.user_value = self.user_selection = \
self._cached_vis = self._cached_assignable = None
self._cached_selection = _NO_CACHED_SELECTION
# is_constant is checked by _make_depend_on(). Just set it to avoid
# having to special-case choices.
self.is_constant = self.is_optional = False
# See Kconfig._build_dep()
self._dependents = set()
def _get_assignable(self):
"""
Worker function for the 'assignable' attribute.
"""
# Warning: See Symbol._rec_invalidate(), and note that this is a hidden
# function call (property magic)
vis = self.visibility
if not vis:
return ()
if vis == 2:
if not self.is_optional:
return (2,) if self.type == BOOL else (1, 2)
return (0, 2) if self.type == BOOL else (0, 1, 2)
# vis == 1
return (0, 1) if self.is_optional else (1,)
def _get_selection(self):
"""
Worker function for the 'selection' attribute.
"""
# Warning: See Symbol._rec_invalidate(), and note that this is a hidden
# function call (property magic)
if self.tri_value != 2:
return None
# Use the user selection if it's visible
if self.user_selection and self.user_selection.visibility == 2:
return self.user_selection
# Otherwise, check if we have a default
for sym, cond in self.defaults:
# The default symbol must be visible too
if expr_value(cond) and sym.visibility:
return sym
# Otherwise, pick the first visible symbol, if any
for sym in self.syms:
if sym.visibility:
return sym
# Couldn't find a selection
return None
def _invalidate(self):
self._cached_vis = self._cached_assignable = None
self._cached_selection = _NO_CACHED_SELECTION
def _rec_invalidate(self):
"""
See Symbol._rec_invalidate()
"""
self._invalidate()
for item in self._dependents:
if item._cached_vis is not None:
item._rec_invalidate()
class MenuNode(object):
"""
Represents a menu node in the configuration. This corresponds to an entry
in e.g. the 'make menuconfig' interface, though non-visible choices, menus,
and comments also get menu nodes. If a symbol or choice is defined in
multiple locations, it gets one menu node for each location.
The top-level menu node, corresponding to the implicit top-level menu, is
available in Kconfig.top_node.
The menu nodes for a Symbol or Choice can be found in the
Symbol/Choice.nodes attribute. Menus and comments are represented as plain
menu nodes, with their text stored in the prompt attribute (prompt[0]).
This mirrors the C implementation.
The following attributes are available on MenuNode instances. They should
be viewed as read-only.
item:
Either a Symbol, a Choice, or one of the constants MENU and COMMENT.
Menus and comments are represented as plain menu nodes. Ifs are collapsed
(matching the C implementation) and do not appear in the final menu tree.
next:
The following menu node. None if there is no following node.
list:
The first child menu node. None if there are no children.
Choices and menus naturally have children, but Symbols can also have
children because of menus created automatically from dependencies (see
kconfig-language.txt).
parent:
The parent menu node. None if there is no parent.
prompt:
A (string, cond) tuple with the prompt for the menu node and its
conditional expression (which is self.kconfig.y if there is no
condition). None if there is no prompt.
For symbols and choices, the prompt is stored in the MenuNode rather than
the Symbol or Choice instance. For menus and comments, the prompt holds
the text.
help:
The help text for the menu node for Symbols and Choices. None if there is
no help text. Always stored in the node rather than the Symbol or Choice.
It is possible to have a separate help text at each location if a symbol
is defined in multiple locations.
dep:
The 'depends on' dependencies for the menu node, or self.kconfig.y if
there are no dependencies. Parent dependencies are propagated to this
attribute, and this attribute is then in turn propagated to the
properties of symbols and choices.
If a symbol is defined in multiple locations, only the properties defined
at a particular location get the corresponding MenuNode.dep dependencies
propagated to them.
visibility:
The 'visible if' dependencies for the menu node (which must represent a
menu), or self.kconfig.y if there are no 'visible if' dependencies.
'visible if' dependencies are recursively propagated to the prompts of
symbols and choices within the menu.
is_menuconfig:
True if the symbol for the menu node (it must be a symbol) was defined
with 'menuconfig' rather than 'config' (at this location). This is a hint
on how to display the menu entry (display the children in a separate menu
rather than indenting them). It's ignored internally by Kconfiglib,
except when printing symbols.
filename/linenr:
The location where the menu node appears.
kconfig:
The Kconfig instance the menu node is from.
"""
__slots__ = (
"dep",
"filename",
"help",
"is_menuconfig",
"item",
"kconfig",
"linenr",
"list",
"next",
"parent",
"prompt",
"visibility",
)
def __repr__(self):
"""
Returns a string with information about the menu node when it is
evaluated on e.g. the interactive Python prompt.
"""
fields = []
if isinstance(self.item, Symbol):
fields.append("menu node for symbol " + self.item.name)
elif isinstance(self.item, Choice):
s = "menu node for choice"
if self.item.name is not None:
s += " " + self.item.name
fields.append(s)
elif self.item == MENU:
fields.append("menu node for menu")
elif self.item == COMMENT:
fields.append("menu node for comment")
elif self.item is None:
fields.append("menu node for if (should not appear in the final "
" tree)")
else:
raise InternalError("unable to determine type in "
"MenuNode.__repr__()")
if self.prompt:
fields.append('prompt "{}" (visibility {})'
.format(self.prompt[0],
TRI_TO_STR[expr_value(self.prompt[1])]))
if isinstance(self.item, Symbol) and self.is_menuconfig:
fields.append("is menuconfig")
fields.append("deps " + TRI_TO_STR[expr_value(self.dep)])
if self.item == MENU:
fields.append("'visible if' deps " + \
TRI_TO_STR[expr_value(self.visibility)])
if isinstance(self.item, (Symbol, Choice)) and self.help is not None:
fields.append("has help")
if self.list:
fields.append("has child")
if self.next:
fields.append("has next")
fields.append("{}:{}".format(self.filename, self.linenr))
return "<{}>".format(", ".join(fields))
def __str__(self):
"""
Returns a string representation of the MenuNode, matching the Kconfig
format.
For Symbol and Choice menu nodes, this function simply calls through to
MenuNode.item.__str__(). For MENU and COMMENT nodes, a Kconfig-like
representation of the menu or comment is returned.
"""
if isinstance(self.item, (Symbol, Choice)):
return self.item.__str__()
if self.item in (MENU, COMMENT):
s = ("menu" if self.item == MENU else "comment") + \
' "{}"\n'.format(escape(self.prompt[0]))
if self.dep is not self.kconfig.y:
s += "\tdepends on {}\n".format(expr_str(self.dep))
if self.item == MENU and self.visibility is not self.kconfig.y:
s += "\tvisible if {}\n".format(expr_str(self.visibility))
return s
# 'if' node. Should never appear in the final tree.
return "if " + expr_str(self.dep)
class KconfigSyntaxError(Exception):
"""
Exception raised for syntax errors.
"""
pass
class InternalError(Exception):
"""
Exception raised for internal errors.
"""
pass
#
# Public functions
#
def expr_value(expr):
"""
Evaluates the expression 'expr' to a tristate value. Returns 0 (n), 1 (m),
or 2 (y).
'expr' must be an already-parsed expression from a Symbol, Choice, or
MenuNode property. To evaluate an expression represented as a string, use
Kconfig.eval_string().
Passing subexpressions of expressions to this function works as expected.
"""
if not isinstance(expr, tuple):
return expr.tri_value
if expr[0] == AND:
v1 = expr_value(expr[1])
# Short-circuit the n case as an optimization (~5% faster
# allnoconfig.py and allyesconfig.py, as of writing)
return 0 if not v1 else min(v1, expr_value(expr[2]))
if expr[0] == OR:
v1 = expr_value(expr[1])
# Short-circuit the y case as an optimization
return 2 if v1 == 2 else max(v1, expr_value(expr[2]))
if expr[0] == NOT:
return 2 - expr_value(expr[1])
if expr[0] in _RELATIONS:
# Implements <, <=, >, >= comparisons as well. These were added to
# kconfig in 31847b67 (kconfig: allow use of relations other than
# (in)equality).
# This mirrors the C tools pretty closely. Perhaps there's a more
# pythonic way to structure this.
oper, op1, op2 = expr
# If both operands are strings...
if op1.orig_type == STRING and op2.orig_type == STRING:
# ...then compare them lexicographically
comp = _strcmp(op1.str_value, op2.str_value)
else:
# Otherwise, try to compare them as numbers...
try:
comp = int(op1.str_value, _TYPE_TO_BASE[op1.orig_type]) - \
int(op2.str_value, _TYPE_TO_BASE[op2.orig_type])
except ValueError:
# Fall back on a lexicographic comparison if the operands don't
# parse as numbers
comp = _strcmp(op1.str_value, op2.str_value)
if oper == EQUAL: res = comp == 0
elif oper == UNEQUAL: res = comp != 0
elif oper == LESS: res = comp < 0
elif oper == LESS_EQUAL: res = comp <= 0
elif oper == GREATER: res = comp > 0
elif oper == GREATER_EQUAL: res = comp >= 0
return 2*res
_internal_error("Internal error while evaluating expression: "
"unknown operation {}.".format(expr[0]))
def expr_str(expr):
"""
Returns the string representation of the expression 'expr', as in a Kconfig
file.
Passing subexpressions of expressions to this function works as expected.
"""
if not isinstance(expr, tuple):
if isinstance(expr, Choice):
if expr.name is not None:
return "<choice {}>".format(expr.name)
return "<choice>"
# Symbol
if expr.is_constant:
return '"{}"'.format(escape(expr.name))
return expr.name
if expr[0] == NOT:
if isinstance(expr[1], Symbol):
return "!" + expr_str(expr[1])
return "!({})".format(expr_str(expr[1]))
if expr[0] == AND:
return "{} && {}".format(_format_and_op(expr[1]),
_format_and_op(expr[2]))
if expr[0] == OR:
return "{} || {}".format(expr_str(expr[1]), expr_str(expr[2]))
# Relation
return "{} {} {}".format(expr_str(expr[1]),
_REL_TO_STR[expr[0]],
expr_str(expr[2]))
# escape()/unescape() helpers
_escape_re_sub = re.compile(r'(["\\])').sub
_unescape_re_sub = re.compile(r"\\(.)").sub
def escape(s):
r"""
Escapes the string 's' in the same fashion as is done for display in
Kconfig format and when writing strings to a .config file. " and \ are
replaced by \" and \\, respectively.
"""
return _escape_re_sub(r"\\\1", s)
def unescape(s):
r"""
Unescapes the string 's'. \ followed by any character is replaced with just
that character. Used internally when reading .config files.
"""
return _unescape_re_sub(r"\1", s)
#
# Internal functions
#
def _get_visibility(sc):
"""
Symbols and Choices have a "visibility" that acts as an upper bound on the
values a user can set for them, corresponding to the visibility in e.g.
'make menuconfig'. This function calculates the visibility for the Symbol
or Choice 'sc' -- the logic is nearly identical.
"""
vis = 0
for node in sc.nodes:
if node.prompt:
vis = max(vis, expr_value(node.prompt[1]))
if isinstance(sc, Symbol) and sc.choice:
if sc.choice.orig_type == TRISTATE and sc.orig_type != TRISTATE and \
sc.choice.tri_value != 2:
# Non-tristate choice symbols are only visible in y mode
return 0
if sc.orig_type == TRISTATE and vis == 1 and sc.choice.tri_value == 2:
# Choice symbols with m visibility are not visible in y mode
return 0
# Promote m to y if we're dealing with a non-tristate (possibly due to
# modules being disabled)
if vis == 1 and sc.type != TRISTATE:
return 2
return vis
def _make_depend_on(sym, expr):
"""
Adds 'sym' as a dependency to all symbols in 'expr'. Constant symbols in
'expr' are skipped as they can never change value anyway.
"""
if not isinstance(expr, tuple):
if not expr.is_constant:
expr._dependents.add(sym)
elif expr[0] in (AND, OR):
_make_depend_on(sym, expr[1])
_make_depend_on(sym, expr[2])
elif expr[0] == NOT:
_make_depend_on(sym, expr[1])
elif expr[0] in _RELATIONS:
if not expr[1].is_constant:
expr[1]._dependents.add(sym)
if not expr[2].is_constant:
expr[2]._dependents.add(sym)
else:
_internal_error("Internal error while fetching symbols from an "
"expression with token stream {}.".format(expr))
def _format_and_op(expr):
"""
expr_str() helper. Returns the string representation of 'expr', which is
assumed to be an operand to AND, with parentheses added if needed.
"""
if isinstance(expr, tuple) and expr[0] == OR:
return "({})".format(expr_str(expr))
return expr_str(expr)
def _indentation(line):
"""
Returns the length of the line's leading whitespace, treating tab stops as
being spaced 8 characters apart.
"""
line = line.expandtabs()
return len(line) - len(line.lstrip())
def _deindent(line, indent):
"""
Deindents 'line' by 'indent' spaces.
"""
line = line.expandtabs()
if len(line) <= indent:
return line
return line[indent:]
def _is_base_n(s, n):
try:
int(s, n)
return True
except ValueError:
return False
def _strcmp(s1, s2):
"""
strcmp()-alike that returns -1, 0, or 1.
"""
return (s1 > s2) - (s1 < s2)
def _stderr_msg(msg, filename, linenr):
if filename is not None:
msg = "{}:{}: {}".format(filename, linenr, msg)
sys.stderr.write(msg + "\n")
def _internal_error(msg):
raise InternalError(
msg +
"\nSorry! You may want to send an email to ulfalizer a.t Google's "
"email service to tell me about this. Include the message above and "
"the stack trace and describe what you were doing.")
# Printing functions
def _sym_choice_str(sc):
"""
Symbol/choice __str__() implementation. These have many properties in
common, so it makes sense to handle them together.
"""
lines = []
def indent_add(s):
lines.append("\t" + s)
# We print the prompt(s) and help text(s) too as a convenience, even though
# they're actually part of the MenuNode. If a symbol or choice is defined
# in multiple locations (has more than one MenuNode), we output one
# statement for each location, and print all the properties that belong to
# the symbol/choice itself only at the first location. This gives output
# that would function if fed to a Kconfig parser, even for such
# symbols/choices (choices defined in multiple locations gets a bit iffy
# since they also have child nodes, though I've never seen such a choice).
if not sc.nodes:
return ""
for node in sc.nodes:
if isinstance(sc, Symbol):
if node.is_menuconfig:
lines.append("menuconfig " + sc.name)
else:
lines.append("config " + sc.name)
else:
if sc.name is None:
lines.append("choice")
else:
lines.append("choice " + sc.name)
if node is sc.nodes[0] and sc.orig_type != UNKNOWN:
indent_add(TYPE_TO_STR[sc.orig_type])
if node.prompt:
prompt, cond = node.prompt
prompt_str = 'prompt "{}"'.format(escape(prompt))
if cond is not sc.kconfig.y:
prompt_str += " if " + expr_str(cond)
indent_add(prompt_str)
if node is sc.nodes[0]:
if isinstance(sc, Symbol):
if sc.is_allnoconfig_y:
indent_add("option allnoconfig_y")
if sc is sc.kconfig.defconfig_list:
indent_add("option defconfig_list")
if sc.env_var is not None:
indent_add('option env="{}"'.format(sc.env_var))
if sc is sc.kconfig.modules:
indent_add("option modules")
if isinstance(sc, Symbol):
for low, high, cond in sc.ranges:
range_string = "range {} {}" \
.format(expr_str(low), expr_str(high))
if cond is not sc.kconfig.y:
range_string += " if " + expr_str(cond)
indent_add(range_string)
for default, cond in sc.defaults:
default_string = "default " + expr_str(default)
if cond is not sc.kconfig.y:
default_string += " if " + expr_str(cond)
indent_add(default_string)
if isinstance(sc, Choice) and sc.is_optional:
indent_add("optional")
if isinstance(sc, Symbol):
for select, cond in sc.selects:
select_string = "select " + select.name
if cond is not sc.kconfig.y:
select_string += " if " + expr_str(cond)
indent_add(select_string)
for imply, cond in sc.implies:
imply_string = "imply " + imply.name
if cond is not sc.kconfig.y:
imply_string += " if " + expr_str(cond)
indent_add(imply_string)
if node.help is not None:
indent_add("help")
for line in node.help.splitlines():
indent_add(" " + line)
# Add a blank line if there are more nodes to print
if node is not sc.nodes[-1]:
lines.append("")
return "\n".join(lines) + "\n"
# Menu manipulation
def _expr_depends_on(expr, sym):
"""
Reimplementation of expr_depends_symbol() from mconf.c. Used to
determine if a submenu should be implicitly created. This also influences
which items inside choice statements are considered choice items.
"""
if not isinstance(expr, tuple):
return expr is sym
if expr[0] in (EQUAL, UNEQUAL):
# Check for one of the following:
# sym = m/y, m/y = sym, sym != n, n != sym
left, right = expr[1:]
if right is sym:
left, right = right, left
if left is not sym:
return False
return (expr[0] == EQUAL and right is sym.kconfig.m or \
right is sym.kconfig.y) or \
(expr[0] == UNEQUAL and right is sym.kconfig.n)
if expr[0] == AND:
return _expr_depends_on(expr[1], sym) or \
_expr_depends_on(expr[2], sym)
return False
def _has_auto_menu_dep(node1, node2):
"""
Returns True if node2 has an "automatic menu dependency" on node1. If node2
has a prompt, we check its condition. Otherwise, we look directly at
node2.dep.
"""
if node2.prompt:
return _expr_depends_on(node2.prompt[1], node1.item)
# If we have no prompt, use the menu node dependencies instead
return _expr_depends_on(node2.dep, node1.item)
def _check_auto_menu(node):
"""
Looks for menu nodes after 'node' that depend on it. Creates an implicit
menu rooted at 'node' with the nodes as the children if such nodes are
found. The recursive call to _finalize_tree() makes this work recursively.
"""
cur = node
while cur.next and _has_auto_menu_dep(node, cur.next):
_finalize_tree(cur.next)
cur = cur.next
cur.parent = node
if cur is not node:
node.list = node.next
node.next = cur.next
cur.next = None
def _flatten(node):
"""
"Flattens" menu nodes without prompts (e.g. 'if' nodes and non-visible
symbols with children from automatic menu creation) so that their children
appear after them instead. This gives a clean menu structure with no
unexpected "jumps" in the indentation.
"""
while node:
if node.list and (not node.prompt or node.prompt[0] == ""):
last_node = node.list
while 1:
last_node.parent = node.parent
if not last_node.next:
break
last_node = last_node.next
last_node.next = node.next
node.next = node.list
node.list = None
node = node.next
def _remove_ifs(node):
"""
Removes 'if' nodes (which can be recognized by MenuNode.item being None),
which are assumed to already have been flattened. The C implementation
doesn't bother to do this, but we expose the menu tree directly, and it
makes it nicer to work with.
"""
first = node.list
while first and first.item is None:
first = first.next
cur = first
while cur:
if cur.next and cur.next.item is None:
cur.next = cur.next.next
cur = cur.next
node.list = first
def _finalize_choice(node):
"""
Finalizes a choice, marking each symbol whose menu node has the choice as
the parent as a choice symbol, and automatically determining types if not
specified.
"""
choice = node.item
cur = node.list
while cur:
if isinstance(cur.item, Symbol):
cur.item.choice = choice
choice.syms.append(cur.item)
cur = cur.next
# If no type is specified for the choice, its type is that of
# the first choice item with a specified type
if choice.orig_type == UNKNOWN:
for item in choice.syms:
if item.orig_type != UNKNOWN:
choice.orig_type = item.orig_type
break
# Each choice item of UNKNOWN type gets the type of the choice
for sym in choice.syms:
if sym.orig_type == UNKNOWN:
sym.orig_type = choice.orig_type
def _finalize_tree(node):
"""
Creates implicit menus from dependencies (see kconfig-language.txt),
removes 'if' nodes, and finalizes choices. This pretty closely mirrors
menu_finalize() from the C implementation, though we propagate dependencies
during parsing instead.
"""
# The ordering here gets a bit tricky. It's important to do things in this
# order to have everything work out correctly.
if node.list:
# The menu node has children. Finalize them.
cur = node.list
while cur:
_finalize_tree(cur)
# Note: _finalize_tree() might have changed cur.next. This is
# expected, so that we jump over e.g. implicitly created submenus.
cur = cur.next
elif node.item is not None:
# The menu node has no children (yet). See if we can create an implicit
# menu rooted at it (due to menu nodes after it depending on it).
_check_auto_menu(node)
if node.list:
# We have a node with finalized children. Do final steps to finalize
# this node.
_flatten(node.list)
_remove_ifs(node)
# Empty choices (node.list None) are possible, so this needs to go outside
if isinstance(node.item, Choice):
_finalize_choice(node)
def _wordexp_expand(value):
"""
Return a list of expanded tokens, using roughly the same algorithm
as wordexp(3)
"""
ifs = os.environ.get("IFS", " \t\n")
value = os.path.expandvars(value).strip(ifs)
if len(ifs) > 0:
for i in ifs[1:]: # collapse all IFS delimiters
value = value.replace(i, ifs[0])
return value.split(ifs[0])
else:
return [value]
#
# Public global constants
#
# Integers representing symbol types
(
BOOL,
HEX,
INT,
STRING,
TRISTATE,
UNKNOWN
) = range(6)
# Integers representing expression types
(
AND,
OR,
NOT,
EQUAL,
UNEQUAL,
LESS,
LESS_EQUAL,
GREATER,
GREATER_EQUAL,
) = range(9)
# Integers representing menu and comment menu nodes
(
MENU,
COMMENT,
) = range(2)
# Converts a symbol/choice type to a string
TYPE_TO_STR = {
UNKNOWN: "unknown",
BOOL: "bool",
TRISTATE: "tristate",
STRING: "string",
HEX: "hex",
INT: "int",
}
TRI_TO_STR = {
0: "n",
1: "m",
2: "y",
}
STR_TO_TRI = {
"n": 0,
"m": 1,
"y": 2,
}
#
# Internal global constants
#
# Tokens
(
_T_ALLNOCONFIG_Y,
_T_AND,
_T_BOOL,
_T_CHOICE,
_T_CLOSE_PAREN,
_T_COMMENT,
_T_CONFIG,
_T_DEFAULT,
_T_DEFCONFIG_LIST,
_T_DEF_BOOL,
_T_DEF_TRISTATE,
_T_DEPENDS,
_T_ENDCHOICE,
_T_ENDIF,
_T_ENDMENU,
_T_ENV,
_T_EQUAL,
_T_GREATER,
_T_GREATER_EQUAL,
_T_HELP,
_T_HEX,
_T_IF,
_T_IMPLY,
_T_INT,
_T_LESS,
_T_LESS_EQUAL,
_T_MAINMENU,
_T_MENU,
_T_MENUCONFIG,
_T_MODULES,
_T_NOT,
_T_ON,
_T_OPEN_PAREN,
_T_OPTION,
_T_OPTIONAL,
_T_OR,
_T_PROMPT,
_T_RANGE,
_T_SELECT,
_T_SOURCE,
_T_STRING,
_T_TRISTATE,
_T_UNEQUAL,
_T_VISIBLE,
) = range(44)
# Keyword to token map, with the get() method assigned directly as a small
# optimization
_get_keyword = {
"allnoconfig_y": _T_ALLNOCONFIG_Y,
"bool": _T_BOOL,
"boolean": _T_BOOL,
"choice": _T_CHOICE,
"comment": _T_COMMENT,
"config": _T_CONFIG,
"def_bool": _T_DEF_BOOL,
"def_tristate": _T_DEF_TRISTATE,
"default": _T_DEFAULT,
"defconfig_list": _T_DEFCONFIG_LIST,
"depends": _T_DEPENDS,
"endchoice": _T_ENDCHOICE,
"endif": _T_ENDIF,
"endmenu": _T_ENDMENU,
"env": _T_ENV,
"help": _T_HELP,
"hex": _T_HEX,
"if": _T_IF,
"imply": _T_IMPLY,
"int": _T_INT,
"mainmenu": _T_MAINMENU,
"menu": _T_MENU,
"menuconfig": _T_MENUCONFIG,
"modules": _T_MODULES,
"on": _T_ON,
"option": _T_OPTION,
"optional": _T_OPTIONAL,
"prompt": _T_PROMPT,
"range": _T_RANGE,
"select": _T_SELECT,
"source": _T_SOURCE,
"string": _T_STRING,
"tristate": _T_TRISTATE,
"visible": _T_VISIBLE,
}.get
# Tokens after which identifier-like lexemes are treated as strings. _T_CHOICE
# is included to avoid symbols being registered for named choices.
_STRING_LEX = frozenset((
_T_BOOL,
_T_CHOICE,
_T_COMMENT,
_T_HEX,
_T_INT,
_T_MAINMENU,
_T_MENU,
_T_PROMPT,
_T_SOURCE,
_T_STRING,
_T_TRISTATE,
))
# Tokens for types, excluding def_bool, def_tristate, etc., for quick
# checks during parsing
_TYPE_TOKENS = frozenset((
_T_BOOL,
_T_TRISTATE,
_T_INT,
_T_HEX,
_T_STRING,
))
# Note: This hack is no longer needed as of upstream commit c226456
# (kconfig: warn of unhandled characters in Kconfig commands). It
# is kept around for backwards compatibility.
#
# The initial word on a line is parsed specially. Let
# command_chars = [A-Za-z0-9_]. Then
# - leading non-command_chars characters are ignored, and
# - the first token consists the following one or more
# command_chars characters.
# This is why things like "----help--" are accepted.
#
# In addition to the initial token, the regex also matches trailing whitespace
# so that we can jump straight to the next token (or to the end of the line if
# there's just a single token).
#
# As an optimization, this regex fails to match for lines containing just a
# comment.
_initial_token_re_match = re.compile(r"[^\w#]*(\w+)\s*").match
# Matches an identifier/keyword, also eating trailing whitespace
_id_keyword_re_match = re.compile(r"([\w./-]+)\s*").match
# Regular expression for finding $-references to symbols in strings
_sym_ref_re_search = re.compile(r"\$([A-Za-z0-9_]+)").search
# Matches a valid right-hand side for an assignment to a string symbol in a
# .config file, including escaped characters. Extracts the contents.
_conf_string_re_match = re.compile(r'"((?:[^\\"]|\\.)*)"').match
# Token to type mapping
_TOKEN_TO_TYPE = {
_T_BOOL: BOOL,
_T_DEF_BOOL: BOOL,
_T_DEF_TRISTATE: TRISTATE,
_T_HEX: HEX,
_T_INT: INT,
_T_STRING: STRING,
_T_TRISTATE: TRISTATE,
}
# Constant representing that there's no cached choice selection. This is
# distinct from a cached None (no selection). We create a unique object (any
# will do) for it so we can test with 'is'.
_NO_CACHED_SELECTION = object()
# Used in comparisons. 0 means the base is inferred from the format of the
# string. The entries for BOOL and TRISTATE are an implementation convenience:
# They should never convert to valid numbers.
_TYPE_TO_BASE = {
BOOL: 0,
HEX: 16,
INT: 10,
STRING: 0,
TRISTATE: 0,
UNKNOWN: 0,
}
_RELATIONS = frozenset((
EQUAL,
UNEQUAL,
LESS,
LESS_EQUAL,
GREATER,
GREATER_EQUAL,
))
# Token to relation (=, !=, <, ...) mapping
_TOKEN_TO_REL = {
_T_EQUAL: EQUAL,
_T_GREATER: GREATER,
_T_GREATER_EQUAL: GREATER_EQUAL,
_T_LESS: LESS,
_T_LESS_EQUAL: LESS_EQUAL,
_T_UNEQUAL: UNEQUAL,
}
_REL_TO_STR = {
EQUAL: "=",
GREATER: ">",
GREATER_EQUAL: ">=",
LESS: "<",
LESS_EQUAL: "<=",
UNEQUAL: "!=",
}
| apache-2.0 | 1,443,340,173,474,915,000 | 34.57265 | 107 | 0.56384 | false |
tow/sunburnt | sunburnt/walktree.py | 3 | 14187 | #!/usr/bin/env python
# -*-coding: utf8-*-
# Title: walktree.py
# Author: Gribouillis for the python forum at www.daniweb.com
# Created: 2011-11-18 23:28:39.608291 (isoformat date)
# License: Public Domain
# Use this code freely.
# IP: http://www.daniweb.com/software-development/python/code/395270
"""This module implements a generic depth first tree and graph traversal.
"""
from __future__ import print_function
from collections import deque, namedtuple
from functools import reduce
import operator
import sys
import types
version_info = (1, 4)
version = ".".join(map(str, version_info))
__all__ = ["walk", "event", "event_repr",
"enter", "within", "exit", "leaf", "bounce", "cycle"]
class ConstSequence(object):
"Read-only wrapper around a sequence type instance"
def __init__(self, seq):
if isinstance(seq, ConstSequence):
seq = seq._adaptee
self._adaptee = seq
def __getitem__(self, key):
if isinstance(key, types.SliceType):
return ConstSequence(self._adaptee[key])
else:
return self._adaptee[key]
def __len__(self):
return len(self._adaptee)
def __contains__(self, key):
return key in self._adaptee
def __iter__(self):
return (x for x in self._adaptee)
def __reversed__(self):
return (x for x in reversed(self._adaptee))
class _Int(int):
pass
_cs = _Int()
for _i, _line in enumerate("""
lnr: leaf non bounce
lr: leaf bounce
irnc: inner bounce non cycle
ie: inner enter
iw: inner within
ix: inner exit
ic: inner bounce cycle
""".strip().splitlines()):
_name = _line.lstrip().split(":")[0]
setattr(_cs, _name, 1 << _i)
_NamedEvent = namedtuple("_NamedEvent", "name value")
def _event_items():
yield "leaf", _cs.lnr | _cs.lr
yield "inner", _cs.irnc | _cs.ie | _cs.iw | _cs.ix | _cs.ic
yield "enter", _cs.ie
yield "within", _cs.iw
yield "exit", _cs.ix
yield "bounce", _cs.lr | _cs.irnc | _cs.ic
yield "cycle", _cs.ic
_named_events = tuple(_NamedEvent(*pair) for pair in _event_items())
globals().update(dict(_named_events))
_event_names = tuple(e.name for e in _named_events)
def _test_events():
for i, t in enumerate((
_cs.lnr == (leaf & ~bounce),
_cs.lr == (leaf & bounce),
0 == (leaf & inner),
_cs.irnc == (inner & bounce & ~cycle),
(_cs.ie == enter) and (_cs.ie == (inner & enter)),
(_cs.iw == within) and (within == (inner & within)),
(_cs.ix == exit) and (exit == (inner & exit)),
(_cs.ic == cycle) and (cycle == (inner & cycle)),
(cycle & bounce) == cycle,
(cycle | bounce) == bounce,
)):
assert t, i
_enter, _within, _exit, _cycle, _pop = (
_Int(enter), _Int(within), _Int(exit), _Int(cycle), _Int(1 << 15))
def parse_event_arg(events):
if isinstance(events, int):
events = (events,)
events = event(reduce(operator.or_, events))
selector = [_pop, None, '', None, '', None]
for i, ev in ((1, _exit),(3, _within),(5, _enter)):
if ev & events:
selector[i] = ev
selector = list(item for item in selector if item is not None)
mask = event(events)
return mask, selector
def event(n):
"""Keep only the lowest byte of an integer.
This function is useful because bitwise operations in python
yield integers out of the range(128), which represents walk events."""
return n & 127
if sys.version_info < (3,):
def bytes(x, **args):
return x
def event_repr(_event_names):
import base64, re, zlib
s = """eNpVklEOwyAMQ2+D2r8CaX+4CyeJOPtsJ3SbtIYM8jDEXKWOq6wbAd+o5S7rGXXe
E4PyyzW0cVzeziz1hvmG8vWU1cuyWJ1RGoTmmXQpeBeIiA9gy9UDZAd5qjTRvdhQyyxFRbf
gA66+SO4/bx7RQtlEI+IL5b6VbSvbV7mrhOKmS2xxk7i2EI/ZGRlmv3fmLUwbBdgF9lc7wc
zWTiNWUvjBAUBMdpnXnzui/Bk5r/0YnTgwoIRvHCtLWhZpVKzh4Txg1knHwi4cPZGeiEbF9
GykX/QqjKJLHi3nOXAjNtafM8wKVLc311vjJFhD01PNUk2jYvo00iP6E+ao2er0Qbkz9frW
S7i/byMIXpDGuDr9hzamWPD9MlUhWgSFdWbBavXMDdBzmTSqBmff6wdNK+td"""
s = str(zlib.decompress(base64.b64decode(bytes(s, encoding="ascii"))))
s = re.sub(r"\d", (lambda mo: _event_names[int(mo.group(0))]), s)
s = re.sub(r"([|&^])", r" \1 ", s)
s = tuple("event(%s)" % x for x in s.split(";"))
def event_repr(n):
"""return a human readable, and evaluable representation of an event
@ event: an integer (modulo 128)
"""
return s[n & 127]
return event_repr
event_repr = event_repr(_event_names) # overwrite event_repr()
class _MockDict(object):
"Helper class for walk() in the tree mode"
def __getitem__(self, key):
pass
def __setitem__(self, key, value):
pass
def __contains__(self, key):
pass
def walk(node, gen_subnodes, event = enter, reverse_path = False, tree=True):
"""Traverse a tree or a graph based at 'node' and generate a sequence
of paths in the graph from the initial node to the visited node.
The arguments are
@ node : an arbitrary python object used as root node.
@ gen_subnodes : a function defining the graph structure. It must
have the interface gen_subnodes(node) --> iterable containing
other nodes. This function will be called with the initial
node and the descendent nodes that it generates through
this function.
@ event: an integral value specifying which paths will be generated
during the depth-first walk. This is usually a value obtained
by composing the walk events (see below) with bitwise operators.
For example passing event = event(enter|leaf|bounce) will
generate inner nodes the first time they are entered, leaf
nodes and all the nodes every time they are revisited during
the walk.
@ reverse_path: a boolean indicating that the path should be read
from right to left (defaults to False).
@ tree: a boolean indicating that the walked graph is a tree,
which means that applying gen_subnodes() will only generate
new nodes (defaults to True). Passing True if the graph
is not a tree will walk multiple subgraphs several times,
or lead to an infinite walk and a memory error if the graph
contains cycles. When a False value is given, this function
stores all the previoulsy visited nodes during the walk.
When a True value is given, only the nodes in the current
path are stored.
Typical use:
for path in walk(node, func, event(enter|leaf)):
# this choice of events results in a preorder traversal
visited = path[-1]
if path.event & leaf:
print(visited, 'is a leaf node!')
The generated 'path' is a read-only sequence of nodes with path[0] being
the base node of the walk and path[-1] being the visited node. If
reverse_path is set to True, the path will appear from right to left,
with the visited node in position 0. During the whole walk, the function
generates the same path object, each time in a different state.
Internally, this path is implemented using a collections.deque object,
which means that indexing an element in the middle of the path (but not
near both ends) may require a time proportional to its length.
The generated paths have an attribute path.event which value is an
integer in the range [0,128[ representing a bitwise combination of
the base events (which are also integers) explained below
enter: the currently visited node is an inner node of the tree
generated before this node's subgraph is visited.
within: the currently visited node is an inner node generated after
its first subgraph has been visited but before the other
subgraphs.
exit: the currently visited node is an inner node generated after
all its subgraphs have been visited.
leaf: the currently visited node is a leaf node.
inner: the currently visited node is an inner node
cycle: the currently visited node is an internal node already on
the path, which means that the graph has a cycle. The subgraph
based on this node will not be walked.
bounce: the currently visited node is either an internal node which
subgraph has already been walked, or a leaf already met.
Subgraphs are never walked a twice with the argument tree=False.
The actual events generated are often a combination of these events, for
exemple, one may have a value of event(leaf & ~bounce). This attribute
path.event is best tested with bitwise operators. For example to test if
the walk is on a leaf, use 'if path.event & leaf:'.
The constant events are also attributes of the walk function, namely
(walk.enter, walk.within, ...)
"""
mask, selector = parse_event_arg(event)
isub = selector.index('', 1)
ileft = selector.index('', isub + 1)
tcycle = mask & cycle
tleaf = mask & leaf
tibounce = mask & bounce & inner
tfbounce = mask & bounce & leaf
tffirst = mask & ~bounce & leaf
todo = deque((iter((node,)),))
path = deque()
const_path = ConstSequence(path)
if reverse_path:
ppush, ppop, ivisited = path.appendleft, path.popleft, 0
else:
ppush, ppop, ivisited = path.append, path.pop, -1
less, more = todo.pop, todo.extend
hist = _MockDict() if tree else dict()
try:
while True:
sequence = todo[-1]
if sequence.__class__ is _Int:
less()
if sequence is _pop:
# this node's subtree is exhausted, prepare for bounce
hist[path[ivisited]] = tibounce
ppop()
else:
const_path.event = sequence
yield const_path
else:
try:
node = next(sequence)
except StopIteration:
less()
else:
ppush(node)
# if node in history, generate a bounce event
# (actually one of (leaf & bounce, inner & bounce, cycle))
if node in hist:
const_path.event = hist[node]
if const_path.event:
yield const_path
ppop()
else:
sub = iter(gen_subnodes(node))
try:
snode = next(sub)
except StopIteration:
hist[node] = tfbounce
if tleaf:
const_path.event = tffirst
yield const_path
ppop()
else:
# ajouter node
hist[node] = tcycle
selector[ileft] = iter((snode,))
selector[isub] = sub
more(selector)
except IndexError:
if todo: # this allows gen_subnodes() to raise IndexError
raise
for _e in _named_events:
setattr(walk, _e.name, _e.value)
if __name__ == "__main__":
def _graph_example(n=4):
from string import ascii_uppercase as labels
from random import Random
n = min(n, 26)
class Node(object):
def __init__(self, letter):
self.letter = str(letter)
self.neigh = list()
def __str__(self):
return self.letter
__repr__ = __str__
# create a reproductible random graph
nodes = [Node(x) for x in labels[:n]]
ran = Random()
ran.seed(6164554331563)
neighmax = 3
for n in nodes:
n.neigh[:] = sorted((x for x in ran.sample(nodes, neighmax)
if x is not n), key=lambda n: n.letter)
#for n in nodes:
# print(n, ":", list(n.neigh))
for path in walk(nodes[0], (lambda n: n.neigh), event(~0), tree=False):
print(list(path), "{0:<7}".format(event_repr(path.event)))
def _tree_example():
# an example tree
root = (
((1,2), (4,5), 6),
(7, 9),
)
# a function to generates subnodes for this tree
def subn(node):
return node if isinstance(node, tuple) else ()
# use of the walk() generator to traverse the tree
for path in walk(root, subn, event(enter|exit|leaf)):
print(list(path), "{0:<7}".format(event_repr(path.event)))
_graph_example(7)
#_tree_example()
""" example code output --->
# this example shows all the possible walk events for the graph shown
# in the attached image when starting from node A
[A] event(enter)
[A, B] event(enter)
[A, B, C] event(enter)
[A, B, C, D] event(enter)
[A, B, C, D, B] event(cycle)
[A, B, C, D] event(within)
[A, B, C, D, F] event(enter)
[A, B, C, D, F, C] event(cycle)
[A, B, C, D, F] event(within)
[A, B, C, D, F, G] event(enter)
[A, B, C, D, F, G, B] event(cycle)
[A, B, C, D, F, G] event(within)
[A, B, C, D, F, G, D] event(cycle)
[A, B, C, D, F, G, E] event(enter)
[A, B, C, D, F, G, E, C] event(cycle)
[A, B, C, D, F, G, E] event(within)
[A, B, C, D, F, G, E, D] event(cycle)
[A, B, C, D, F, G, E, G] event(cycle)
[A, B, C, D, F, G, E] event(exit)
[A, B, C, D, F, G] event(exit)
[A, B, C, D, F] event(exit)
[A, B, C, D] event(exit)
[A, B, C] event(within)
[A, B, C, G] event(inner & bounce)
[A, B, C] event(exit)
[A, B] event(within)
[A, B, E] event(inner & bounce)
[A, B, G] event(inner & bounce)
[A, B] event(exit)
[A] event(within)
[A, C] event(inner & bounce)
[A, G] event(inner & bounce)
[A] event(exit)
"""
| mit | 8,880,664,391,554,048,000 | 38.739496 | 80 | 0.578769 | false |
jimmykimani/Cp2-Bucketlist-API | manage.py | 1 | 2244 |
import os
import unittest
import coverage
COV = coverage.coverage(
branch=True,
omit=[
'*/*bc/*',
'manage.py/*',
'app/errors.py'
'tests/base.py',
'tests/test_authentication.py',
'tests/test_endpoints.py'
'instance/config.py',
'/*/__init__.py'
]
)
COV.start()
from flask_script import Manager, prompt_bool, Shell
from flask_migrate import Migrate, MigrateCommand
from app import db, create_app
from app.models import User, Bucketlist, Item
# create the app
app = create_app(config_name=os.getenv('APP_SETTINGS')or 'default')
migrate = Migrate(app, db)
manager = Manager(app)
@manager.command
def test():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
@manager.command
def cov():
"""Runs the unit tests with coverage."""
tests = unittest.TestLoader().discover('tests')
result = unittest.TextTestRunner(verbosity=1).run(tests)
if result.wasSuccessful():
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
return 0
return 1
# manually create_db
@manager.command
def create_db():
"""Creates database tables from models"""
db.create_all()
print ('Intialized....!')
# manually drop db
@manager.command
def drop_db():
"""Drops database tables"""
if prompt_bool("Are you sure you want to lose all your data?"):
db.drop_all()
print ('Db droped....!')
def make_shell_context():
return dict(User=User,
Bucketlist=Bucketlist,
Item=Item)
# Allows us to make migrations using the db command
# Allows use to access shell as above.
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| mit | 7,057,149,605,993,123,000 | 24.5 | 71 | 0.63369 | false |
Reinaesaya/OUIRL-ChatBot | chatterbot/input/microsoft.py | 4 | 3644 | from __future__ import unicode_literals
from time import sleep
from chatterbot.input import InputAdapter
from chatterbot.conversation import Statement
class Microsoft(InputAdapter):
"""
An input adapter that allows a ChatterBot instance to get
input statements from a Microsoft Bot using *Directline client protocol*.
https://docs.botframework.com/en-us/restapi/directline/#navtitle
"""
def __init__(self, **kwargs):
super(Microsoft, self).__init__(**kwargs)
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
self.directline_host = kwargs.get('directline_host', 'https://directline.botframework.com')
# NOTE: Direct Line client credentials are different from your bot's
# credentials
self.direct_line_token_or_secret = kwargs.\
get('direct_line_token_or_secret')
authorization_header = 'BotConnector {}'.\
format(self.direct_line_token_or_secret)
self.headers = {
'Authorization': authorization_header,
'Content-Type': 'application/json',
'Accept': 'application/json',
'charset': 'utf-8'
}
conversation_data = self.start_conversation()
self.conversation_id = conversation_data.get('conversationId')
self.conversation_token = conversation_data.get('token')
def _validate_status_code(self, response):
code = response.status_code
if not code == 200:
raise self.HTTPStatusException('{} status code recieved'.
format(code))
def start_conversation(self):
import requests
endpoint = '{host}/api/conversations'.format(host=self.directline_host)
response = requests.post(
endpoint,
headers=self.headers,
verify=False
)
self.logger.info('{} starting conversation {}'.format(
response.status_code, endpoint
))
self._validate_status_code(response)
return response.json()
def get_most_recent_message(self):
import requests
endpoint = '{host}/api/conversations/{id}/messages'\
.format(host=self.directline_host,
id=self.conversation_id)
response = requests.get(
endpoint,
headers=self.headers,
verify=False
)
self.logger.info('{} retrieving most recent messages {}'.format(
response.status_code, endpoint
))
self._validate_status_code(response)
data = response.json()
if data['messages']:
last_msg = int(data['watermark'])
return data['messages'][last_msg - 1]
return None
def process_input(self, statement):
new_message = False
data = None
while not new_message:
data = self.get_most_recent_message()
if data and data['id']:
new_message = True
else:
pass
sleep(3.5)
text = data['text']
statement = Statement(text)
self.logger.info('processing user statement {}'.format(statement))
return statement
class HTTPStatusException(Exception):
"""
Exception raised when unexpected non-success HTTP
status codes are returned in a response.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| bsd-3-clause | 2,350,987,387,894,654,000 | 30.686957 | 99 | 0.594951 | false |
indexofire/gork | src/gork/application/mlst/migrations/0008_auto__add_field_sttype_locus_data__add_field_sttype_creat_time__add_fi.py | 1 | 15133 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'STType.locus_data'
db.add_column(u'mlst_sttype', 'locus_data',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding field 'STType.creat_time'
db.add_column(u'mlst_sttype', 'creat_time',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 5, 10, 0, 0), blank=True),
keep_default=False)
# Adding field 'STType.lastedit_time'
db.add_column(u'mlst_sttype', 'lastedit_time',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 5, 10, 0, 0), blank=True),
keep_default=False)
# Removing M2M table for field locus on 'STType'
db.delete_table('mlst_sttype_locus')
# Deleting field 'ExperimentData.seq'
db.delete_column(u'mlst_experimentdata', 'seq')
# Adding field 'ExperimentData.sequence'
db.add_column(u'mlst_experimentdata', 'sequence',
self.gf('django.db.models.fields.TextField')(default='', unique=True, blank=True),
keep_default=False)
# Adding field 'ExperimentData.lastedit_time'
db.add_column(u'mlst_experimentdata', 'lastedit_time',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 5, 10, 0, 0), blank=True),
keep_default=False)
# Adding M2M table for field sttype on 'ExperimentData'
db.create_table(u'mlst_experimentdata_sttype', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('experimentdata', models.ForeignKey(orm[u'mlst.experimentdata'], null=False)),
('sttype', models.ForeignKey(orm[u'mlst.sttype'], null=False))
))
db.create_unique(u'mlst_experimentdata_sttype', ['experimentdata_id', 'sttype_id'])
# Changing field 'ExperimentData.creat_time'
db.alter_column(u'mlst_experimentdata', 'creat_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
# Adding unique constraint on 'ExperimentData', fields ['value']
db.create_unique(u'mlst_experimentdata', ['value'])
def backwards(self, orm):
# Removing unique constraint on 'ExperimentData', fields ['value']
db.delete_unique(u'mlst_experimentdata', ['value'])
# Deleting field 'STType.locus_data'
db.delete_column(u'mlst_sttype', 'locus_data')
# Deleting field 'STType.creat_time'
db.delete_column(u'mlst_sttype', 'creat_time')
# Deleting field 'STType.lastedit_time'
db.delete_column(u'mlst_sttype', 'lastedit_time')
# Adding M2M table for field locus on 'STType'
db.create_table(u'mlst_sttype_locus', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('sttype', models.ForeignKey(orm[u'mlst.sttype'], null=False)),
('locus', models.ForeignKey(orm[u'mlst.locus'], null=False))
))
db.create_unique(u'mlst_sttype_locus', ['sttype_id', 'locus_id'])
# Adding field 'ExperimentData.seq'
db.add_column(u'mlst_experimentdata', 'seq',
self.gf('django.db.models.fields.TextField')(default=''),
keep_default=False)
# Deleting field 'ExperimentData.sequence'
db.delete_column(u'mlst_experimentdata', 'sequence')
# Deleting field 'ExperimentData.lastedit_time'
db.delete_column(u'mlst_experimentdata', 'lastedit_time')
# Removing M2M table for field sttype on 'ExperimentData'
db.delete_table('mlst_experimentdata_sttype')
# Changing field 'ExperimentData.creat_time'
db.alter_column(u'mlst_experimentdata', 'creat_time', self.gf('django.db.models.fields.DateTimeField')())
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gauth.guser': {
'Meta': {'ordering': "['-date_joined']", 'object_name': 'GUser'},
'about_me': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'about_me_html': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'avatar': ('imagekit.models.fields.ProcessedImageField', [], {'default': "'/media/avatars/default.png'", 'max_length': '100'}),
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'bronze_badges': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_visited': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '3', 'blank': 'True'}),
'gold_badges': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'new_messages': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'nickname': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'qa_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'scholar': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'silver_badges': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'user_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30', 'db_index': 'True'})
},
u'mlst.dataset': {
'Meta': {'ordering': "['name']", 'object_name': 'DataSet'},
'creat_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'creator'", 'to': "orm['gauth.GUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'lastedit_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'moderator': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderator'", 'symmetrical': 'False', 'to': "orm['gauth.GUser']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'remote_uri': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'scheme': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'taxon': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mlst.Taxon']"})
},
u'mlst.experimentdata': {
'Meta': {'ordering': "['creat_time']", 'object_name': 'ExperimentData'},
'creat_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastedit_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'locus': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mlst.Locus']", 'unique': 'True'}),
'raw': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'sequence': ('django.db.models.fields.TextField', [], {'unique': 'True', 'blank': 'True'}),
'sttype': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['mlst.STType']", 'symmetrical': 'False'}),
'value': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'unique': 'True'})
},
u'mlst.locus': {
'Meta': {'ordering': "['name']", 'object_name': 'Locus'},
'create_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mlst.DataSet']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'lastedit_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'remote_uri': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'mlst.strain': {
'Meta': {'ordering': "['sttype']", 'object_name': 'Strain'},
'cc': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'data_source': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isolate_country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'isolate_source': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'max_length': '3'}),
'isolate_year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'serotype': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serotype_formula': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'strain_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'strain_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'sttype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'strain_type'", 'to': u"orm['mlst.STType']"}),
'submit_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'submittor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submittor'", 'to': "orm['gauth.GUser']"}),
'taxon': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mlst.Taxon']"})
},
u'mlst.sttype': {
'Meta': {'ordering': "['value']", 'unique_together': "(('dataset', 'value'),)", 'object_name': 'STType'},
'creat_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'type_dataset'", 'to': u"orm['mlst.DataSet']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastedit_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'locus_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'unique': 'True'})
},
u'mlst.taxon': {
'Meta': {'ordering': "['name']", 'object_name': 'Taxon'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
}
}
complete_apps = ['mlst'] | mit | 4,052,530,125,085,207,600 | 68.422018 | 187 | 0.564726 | false |
DMS-Aus/Roam | ext_libs/pdoc/__init__.py | 1 | 43827 | """
Module pdoc provides types and functions for accessing the public
documentation of a Python module. This includes modules (and
sub-modules), functions, classes and module, class and instance
variables. Docstrings are taken from modules, functions and classes
using the special `__doc__` attribute. Docstrings for variables are
extracted by examining the module's abstract syntax tree.
The public interface of a module is determined through one of two
ways. If `__all__` is defined in the module, then all identifiers in
that list will be considered public. No other identifiers will be
considered as public. Conversely, if `__all__` is not defined, then
`pdoc` will heuristically determine the public interface. There are
three rules that are applied to each identifier in the module:
1. If the name starts with an underscore, it is **not** public.
2. If the name is defined in a different module, it is **not** public.
3. If the name refers to an immediate sub-module, then it is public.
Once documentation for a module is created with `pdoc.Module`, it
can be output as either HTML or plain text using the covenience
functions `pdoc.html` and `pdoc.text`, or the corresponding methods
`pdoc.Module.html` and `pdoc.Module.text`.
Alternatively, you may run an HTTP server with the `pdoc` script
included with this module.
Compatibility
-------------
`pdoc` has been tested on Python 2.6, 2.7 and 3.3. It seems to work
on all three.
Contributing
------------
`pdoc` [is on GitHub](https://github.com/BurntSushi/pdoc). Pull
requests and bug reports are welcome.
Linking to other identifiers
----------------------------
In your documentation, you may link to other identifiers in
your module or submodules. Linking is automatically done for
you whenever you surround an identifier with a back quote
(grave). The identifier name must be fully qualified. For
example, <code>\`pdoc.Doc.docstring\`</code> is correct while
<code>\`Doc.docstring\`</code> is incorrect.
If the `pdoc` script is used to run an HTTP server, then external
linking to other packages installed is possible. No extra work is
necessary; simply use the fully qualified path. For example,
<code>\`nflvid.slice\`</code> will create a link to the `nflvid.slice`
function, which is **not** a part of `pdoc` at all.
Where does pdoc get documentation from?
---------------------------------------
Broadly speaking, `pdoc` gets everything you see from introspecting the
module. This includes words describing a particular module, class,
function or variable. While `pdoc` does some analysis on the source
code of a module, importing the module itself is necessary to use
Python's introspection features.
In Python, objects like modules, functions, classes and methods have
a special attribute named `__doc__` which contains that object's
*docstring*. The docstring comes from a special placement of a string
in your source code. For example, the following code shows how to
define a function with a docstring and access the contents of that
docstring:
#!python
>>> def test():
... '''This is a docstring.'''
... pass
...
>>> test.__doc__
'This is a docstring.'
Something similar can be done for classes and modules too. For classes,
the docstring should come on the line immediately following `class
...`. For modules, the docstring should start on the first line of
the file. These docstrings are what you see for each module, class,
function and method listed in the documentation produced by `pdoc`.
The above just about covers *standard* uses of docstrings in Python.
`pdoc` extends the above in a few important ways.
### Special docstring conventions used by `pdoc`
**Firstly**, docstrings can be inherited. Consider the following code
sample:
#!python
>>> class A (object):
... def test():
... '''Docstring for A.'''
...
>>> class B (A):
... def test():
... pass
...
>>> print(A.test.__doc__)
Docstring for A.
>>> print(B.test.__doc__)
None
In Python, the docstring for `B.test` is empty, even though one was
defined in `A.test`. If `pdoc` generates documentation for the above
code, then it will automatically attach the docstring for `A.test` to
`B.test` only if `B.test` does not have a docstring. In the default
HTML output, an inherited docstring is grey.
**Secondly**, docstrings can be attached to variables, which includes
module (or global) variables, class variables and instance variables.
Python by itself [does not allow docstrings to be attached to
variables](http://www.python.org/dev/peps/pep-0224). For example:
#!python
variable = "SomeValue"
'''Docstring for variable.'''
The resulting `variable` will have no `__doc__` attribute. To
compensate, `pdoc` will read the source code when it's available to
infer a connection between a variable and a docstring. The connection
is only made when an assignment statement is followed by a docstring.
Something similar is done for instance variables as well. By
convention, instance variables are initialized in a class's `__init__`
method. Therefore, `pdoc` adheres to that convention and looks for
docstrings of variables like so:
#!python
def __init__(self):
self.variable = "SomeValue"
'''Docstring for instance variable.'''
Note that `pdoc` only considers attributes defined on `self` as
instance variables.
Class and instance variables can also have inherited docstrings.
**Thirdly and finally**, docstrings can be overridden with a special
`__pdoc__` dictionary that `pdoc` inspects if it exists. The keys of
`__pdoc__` should be identifiers within the scope of the module. (In
the case of an instance variable `self.variable` for class `A`, its
module identifier would be `A.variable`.) The values of `__pdoc__`
should be docstrings.
This particular feature is useful when there's no feasible way of
attaching a docstring to something. A good example of this is a
[namedtuple](http://goo.gl/akfXJ9):
#!python
__pdoc__ = {}
Table = namedtuple('Table', ['types', 'names', 'rows'])
__pdoc__['Table.types'] = 'Types for each column in the table.'
__pdoc__['Table.names'] = 'The names of each column in the table.'
__pdoc__['Table.rows'] = 'Lists corresponding to each row in the table.'
`pdoc` will then show `Table` as a class with documentation for the
`types`, `names` and `rows` members.
Note that assignments to `__pdoc__` need to placed where they'll be
executed when the module is imported. For example, at the top level
of a module or in the definition of a class.
If `__pdoc__[key] = None`, then `key` will not be included in the
public interface of the module.
License
-------
`pdoc` is in the public domain via the
[UNLICENSE](http://unlicense.org).
"""
from __future__ import absolute_import, division, print_function
import ast
import imp
import inspect
import os
import os.path as path
import pkgutil
import re
import sys
from mako.lookup import TemplateLookup
from mako.exceptions import TopLevelLookupException
__version__ = '0.3.2'
"""
The current version of pdoc. This value is read from `setup.py`.
"""
html_module_suffix = '.m.html'
"""
The suffix to use for module HTML files. By default, this is set to
`.m.html`, where the extra `.m` is used to differentiate a package's
`index.html` from a submodule called `index`.
"""
html_package_name = 'index.html'
"""
The file name to use for a package's `__init__.py` module.
"""
import_path = sys.path[:]
"""
A list of paths to restrict imports to. Any module that cannot be
found in `import_path` will not be imported. By default, it is set to a
copy of `sys.path` at initialization.
"""
_template_path = [
path.join(path.dirname(__file__), 'templates'),
]
"""
A list of paths to search for Mako templates used to produce the
plain text and HTML output. Each path is tried until a template is
found.
"""
if os.getenv('XDG_CONFIG_HOME'):
_template_path.insert(0, path.join(os.getenv('XDG_CONFIG_HOME'), 'pdoc'))
__pdoc__ = {}
tpl_lookup = TemplateLookup(directories=_template_path,
cache_args={'cached': True,
'cache_type': 'memory'})
"""
A `mako.lookup.TemplateLookup` object that knows how to load templates
from the file system. You may add additional paths by modifying the
object's `directories` attribute.
"""
def html(module_name, docfilter=None, allsubmodules=False,
external_links=False, link_prefix='', source=True):
"""
Returns the documentation for the module `module_name` in HTML
format. The module must be importable.
`docfilter` is an optional predicate that controls which
documentation objects are shown in the output. It is a single
argument function that takes a documentation object and returns
`True` or `False`. If `False`, that object will not be included in
the output.
If `allsubmodules` is `True`, then every submodule of this module
that can be found will be included in the documentation, regardless
of whether `__all__` contains it.
If `external_links` is `True`, then identifiers to external modules
are always turned into links.
If `link_prefix` is `True`, then all links will have that prefix.
Otherwise, links are always relative.
If `source` is `True`, then source code will be retrieved for
every Python object whenever possible. This can dramatically
decrease performance when documenting large modules.
"""
mod = Module(import_module(module_name),
docfilter=docfilter,
allsubmodules=allsubmodules)
return mod.html(external_links=external_links,
link_prefix=link_prefix, source=source)
def text(module_name, docfilter=None, allsubmodules=False):
"""
Returns the documentation for the module `module_name` in plain
text format. The module must be importable.
`docfilter` is an optional predicate that controls which
documentation objects are shown in the output. It is a single
argument function that takes a documentation object and returns
True of False. If False, that object will not be included in the
output.
If `allsubmodules` is `True`, then every submodule of this module
that can be found will be included in the documentation, regardless
of whether `__all__` contains it.
"""
mod = Module(import_module(module_name),
docfilter=docfilter,
allsubmodules=allsubmodules)
return mod.text()
def import_module(module_name):
"""
Imports a module. A single point of truth for importing modules to
be documented by `pdoc`. In particular, it makes sure that the top
module in `module_name` can be imported by using only the paths in
`pdoc.import_path`.
If a module has already been imported, then its corresponding entry
in `sys.modules` is returned. This means that modules that have
changed on disk cannot be re-imported in the same process and have
its documentation updated.
"""
if import_path != sys.path:
# Such a kludge. Only restrict imports if the `import_path` has
# been changed. We don't want to always restrict imports, since
# providing a path to `imp.find_module` stops it from searching
# in special locations for built ins or frozen modules.
#
# The problem here is that this relies on the `sys.path` not being
# independently changed since the initialization of this module.
# If it is changed, then some packages may fail.
#
# Any other options available?
# Raises an exception if the parent module cannot be imported.
# This hopefully ensures that we only explicitly import modules
# contained in `pdoc.import_path`.
imp.find_module(module_name.split('.')[0], import_path)
if module_name in sys.modules:
return sys.modules[module_name]
else:
__import__(module_name)
return sys.modules[module_name]
def _source(obj):
"""
Returns the source code of the Python object `obj` as a list of
lines. This tries to extract the source from the special
`__wrapped__` attribute if it exists. Otherwise, it falls back
to `inspect.getsourcelines`.
If neither works, then the empty list is returned.
"""
try:
return inspect.getsourcelines(obj.__wrapped__)[0]
except:
pass
try:
return inspect.getsourcelines(obj)[0]
except:
return []
def _get_tpl(name):
"""
Returns the Mako template with the given name. If the template
cannot be found, a nicer error message is displayed.
"""
try:
t = tpl_lookup.get_template(name)
except TopLevelLookupException:
locs = [path.join(p, name.lstrip('/')) for p in _template_path]
raise IOError(2, 'No template at any of: %s' % ', '.join(locs))
return t
def _eprint(*args, **kwargs):
"""Print to stderr."""
kwargs['file'] = sys.stderr
print(*args, **kwargs)
def _safe_import(module_name):
"""
A function for safely importing `module_name`, where errors are
suppressed and `stdout` and `stderr` are redirected to a null
device. The obligation is on the caller to close `stdin` in order
to avoid impolite modules from blocking on `stdin` when imported.
"""
class _Null (object):
def write(self, *_):
pass
sout, serr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = _Null(), _Null()
try:
m = import_module(module_name)
except:
m = None
sys.stdout, sys.stderr = sout, serr
return m
def _var_docstrings(tree, module, cls=None, init=False):
"""
Extracts variable docstrings given `tree` as the abstract syntax,
`module` as a `pdoc.Module` containing `tree` and an option `cls`
as a `pdoc.Class` corresponding to the tree. In particular, `cls`
should be specified when extracting docstrings from a class or an
`__init__` method. Finally, `init` should be `True` when searching
the AST of an `__init__` method so that `_var_docstrings` will only
accept variables starting with `self.` as instance variables.
A dictionary mapping variable name to a `pdoc.Variable` object is
returned.
"""
vs = {}
children = list(ast.iter_child_nodes(tree))
for i, child in enumerate(children):
if isinstance(child, ast.Assign) and len(child.targets) == 1:
if not init and isinstance(child.targets[0], ast.Name):
name = child.targets[0].id
elif (isinstance(child.targets[0], ast.Attribute)
and isinstance(child.targets[0].value, ast.Name)
and child.targets[0].value.id == 'self'):
name = child.targets[0].attr
else:
continue
if not _is_exported(name) \
and name not in getattr(module, '__all__', []):
continue
docstring = ''
if (i+1 < len(children)
and isinstance(children[i+1], ast.Expr)
and isinstance(children[i+1].value, ast.Str)):
docstring = children[i+1].value.s
vs[name] = Variable(name, module, docstring, cls=cls)
return vs
def _is_exported(ident_name):
"""
Returns `True` if `ident_name` matches the export criteria for an
identifier name.
This should not be used by clients. Instead, use
`pdoc.Module.is_public`.
"""
return not ident_name.startswith('_')
class Doc (object):
"""
A base class for all documentation objects.
A documentation object corresponds to *something* in a Python module
that has a docstring associated with it. Typically, this only includes
modules, classes, functions and methods. However, `pdoc` adds support
for extracting docstrings from the abstract syntax tree, which means
that variables (module, class or instance) are supported too.
A special type of documentation object `pdoc.External` is used to
represent identifiers that are not part of the public interface of
a module. (The name "External" is a bit of a misnomer, since it can
also correspond to unexported members of the module, particularly in
a class's ancestor list.)
"""
def __init__(self, name, module, docstring):
"""
Initializes a documentation object, where `name` is the public
identifier name, `module` is a `pdoc.Module` object, and
`docstring` is a string containing the docstring for `name`.
"""
self.module = module
"""
The module documentation object that this object was defined
in.
"""
self.name = name
"""
The identifier name for this object.
"""
self.docstring = inspect.cleandoc(docstring or '')
"""
The docstring for this object. It has already been cleaned
by `inspect.cleandoc`.
"""
@property
def source(self):
"""
Returns the source code of the Python object `obj` as a list of
lines. This tries to extract the source from the special
`__wrapped__` attribute if it exists. Otherwise, it falls back
to `inspect.getsourcelines`.
If neither works, then the empty list is returned.
"""
assert False, 'subclass responsibility'
@property
def refname(self):
"""
Returns an appropriate reference name for this documentation
object. Usually this is its fully qualified path. Every
documentation object must provide this property.
e.g., The refname for this property is
<code>pdoc.Doc.refname</code>.
"""
assert False, 'subclass responsibility'
def __lt__(self, other):
return self.name < other.name
def is_empty(self):
"""
Returns true if the docstring for this object is empty.
"""
return len(self.docstring.strip()) == 0
class Module (Doc):
"""
Representation of a module's documentation.
"""
__pdoc__['Module.module'] = 'The Python module object.'
__pdoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None, allsubmodules=False):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pdoc.Module.classes`, `pdoc.Module.functions`,
`pdoc.Module.variables` and `pdoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pdoc.Class`
object.
If `allsubmodules` is `True`, then every submodule of this
module that can be found will be included in the
documentation, regardless of whether `__all__` contains it.
"""
name = getattr(module, '__pdoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = allsubmodules
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pdoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __pdoc__ override.
for name, docstring in getattr(self.module, '__pdoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def text(self):
"""
Returns the documentation for this module as plain text.
"""
t = _get_tpl('/text.mako')
text, _ = re.subn('\n\n\n+', '\n\n', t.render(module=self).strip())
return text
def html(self, external_links=False, link_prefix='',
source=True, **kwargs):
"""
Returns the documentation for this module as
self-contained HTML.
If `external_links` is `True`, then identifiers to external
modules are always turned into links.
If `link_prefix` is `True`, then all links will have that
prefix. Otherwise, links are always relative.
If `source` is `True`, then source code will be retrieved for
every Python object whenever possible. This can dramatically
decrease performance when documenting large modules.
`kwargs` is passed to the `mako` render function.
"""
t = _get_tpl('/html.mako')
t = t.render(module=self,
external_links=external_links,
link_prefix=link_prefix,
show_source_code=source,
**kwargs)
return t.strip()
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pdoc.Class` or
`pdoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pdoc.Class` or
`pdoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pdoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pdoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pdoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pdoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pdoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pdoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pdoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__pdoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
class Class (Doc):
"""
Representation of a class's documentation.
"""
def __init__(self, name, module, class_obj):
"""
Same as `pdoc.Doc.__init__`, except `class_obj` must be a
Python class object. The docstring is gathered automatically.
"""
super(Class, self).__init__(name, module, inspect.getdoc(class_obj))
self.cls = class_obj
"""The class Python object."""
self.doc = {}
"""A mapping from identifier name to a `pdoc.Doc` objects."""
self.doc_init = {}
"""
A special version of `pdoc.Class.doc` that contains
documentation for instance variables found in the `__init__`
method.
"""
public = self.__public_objs()
try:
# First try and find docstrings for class variables.
# Then move on to finding docstrings for instance variables.
# This must be optional, since not all modules have source
# code available.
cls_ast = ast.parse(inspect.getsource(self.cls)).body[0]
self.doc = _var_docstrings(cls_ast, self.module, cls=self)
for n in (cls_ast.body if '__init__' in public else []):
if isinstance(n, ast.FunctionDef) and n.name == '__init__':
self.doc_init = _var_docstrings(n, self.module,
cls=self, init=True)
break
except:
pass
# Convert the public Python objects to documentation objects.
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
if name in self.doc_init:
# Let instance members override class members.
continue
if inspect.ismethod(obj):
self.doc[name] = Function(name, self.module, obj.__func__,
cls=self, method=True)
elif inspect.isfunction(obj):
self.doc[name] = Function(name, self.module, obj,
cls=self, method=False)
elif isinstance(obj, property):
docstring = getattr(obj, '__doc__', '')
self.doc_init[name] = Variable(name, self.module, docstring,
cls=self)
elif not inspect.isbuiltin(obj) \
and not inspect.isroutine(obj):
if name in getattr(self.cls, '__slots__', []):
self.doc_init[name] = Variable(name, self.module,
'', cls=self)
else:
self.doc[name] = Variable(name, self.module, '', cls=self)
@property
def source(self):
return _source(self.cls)
@property
def refname(self):
return '%s.%s' % (self.module.refname, self.cls.__name__)
def class_variables(self):
"""
Returns all documented class variables in the class, sorted
alphabetically as a list of `pdoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return sorted(filter(p, self.doc.values()))
def instance_variables(self):
"""
Returns all instance variables in the class, sorted
alphabetically as a list of `pdoc.Variable`. Instance variables
are attributes of `self` defined in a class's `__init__`
method.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return sorted(filter(p, self.doc_init.values()))
def methods(self):
"""
Returns all documented methods as `pdoc.Function` objects in
the class, sorted alphabetically with `__init__` always coming
first.
Unfortunately, this also includes class methods.
"""
p = lambda o: (isinstance(o, Function)
and o.method
and self.module._docfilter(o))
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented static functions as `pdoc.Function`
objects in the class, sorted alphabetically.
"""
p = lambda o: (isinstance(o, Function)
and not o.method
and self.module._docfilter(o))
return sorted(filter(p, self.doc.values()))
def _fill_inheritance(self):
"""
Traverses this class's ancestor list and attempts to fill in
missing documentation from its ancestor's documentation.
The first pass connects variables, methods and functions with
their inherited couterparts. (The templates will decide how to
display docstrings.) The second pass attempts to add instance
variables to this class that were only explicitly declared in
a parent class. This second pass is necessary since instance
variables are only discoverable by traversing the abstract
syntax tree.
"""
mro = filter(lambda c: c != self and isinstance(c, Class),
self.module.mro(self))
def search(d, fdoc):
for c in mro:
doc = fdoc(c)
if d.name in doc and isinstance(d, type(doc[d.name])):
return doc[d.name]
return None
for fdoc in (lambda c: c.doc_init, lambda c: c.doc):
for d in fdoc(self).values():
dinherit = search(d, fdoc)
if dinherit is not None:
d.inherits = dinherit
# Since instance variables aren't part of a class's members,
# we need to manually deduce inheritance. Oh lawdy.
for c in mro:
for name in filter(lambda n: n not in self.doc_init, c.doc_init):
d = c.doc_init[name]
self.doc_init[name] = Variable(d.name, d.module, '', cls=self)
self.doc_init[name].inherits = d
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object. This counts the `__init__` method as being
public.
"""
_pdoc = getattr(self.module.module, '__pdoc__', {})
def forced_out(name):
return _pdoc.get('%s.%s' % (self.name, name), False) is None
def exported(name):
exported = name == '__init__' or _is_exported(name)
return not forced_out(name) and exported
idents = dict(inspect.getmembers(self.cls))
return dict([(n, o) for n, o in idents.items() if exported(n)])
class Function (Doc):
"""
Representation of documentation for a Python function or method.
"""
def __init__(self, name, module, func_obj, cls=None, method=False):
"""
Same as `pdoc.Doc.__init__`, except `func_obj` must be a
Python function object. The docstring is gathered automatically.
`cls` should be set when this is a method or a static function
beloing to a class. `cls` should be a `pdoc.Class` object.
`method` should be `True` when the function is a method. In
all other cases, it should be `False`.
"""
super(Function, self).__init__(name, module, inspect.getdoc(func_obj))
self.func = func_obj
"""The Python function object."""
self.cls = cls
"""
The `pdoc.Class` documentation object if this is a method. If
not, this is None.
"""
self.method = method
"""
Whether this function is a method or not.
In particular, static class methods have this set to False.
"""
@property
def source(self):
return _source(self.func)
@property
def refname(self):
if self.cls is None:
return '%s.%s' % (self.module.refname, self.name)
else:
return '%s.%s' % (self.cls.refname, self.name)
def spec(self):
"""
Returns a nicely formatted spec of the function's parameter
list as a string. This includes argument lists, keyword
arguments and default values.
"""
return ', '.join(self.params())
def params(self):
"""
Returns a list where each element is a nicely formatted
parameter of this function. This includes argument lists,
keyword arguments and default values.
"""
def fmt_param(el):
if isinstance(el, str) or isinstance(el, unicode):
return el
else:
return '(%s)' % (', '.join(map(fmt_param, el)))
try:
getspec = getattr(inspect, 'getfullargspec', inspect.getargspec)
s = getspec(self.func)
except TypeError:
# I guess this is for C builtin functions?
return ['...']
params = []
for i, param in enumerate(s.args):
if s.defaults is not None and len(s.args) - i <= len(s.defaults):
defind = len(s.defaults) - (len(s.args) - i)
params.append('%s=%s' % (param, repr(s.defaults[defind])))
else:
params.append(fmt_param(param))
if s.varargs is not None:
params.append('*%s' % s.varargs)
# TODO: This needs to be adjusted in Python 3. There's more stuff
# returned from getfullargspec than what we're looking at here.
keywords = getattr(s, 'varkw', getattr(s, 'keywords', None))
if keywords is not None:
params.append('**%s' % keywords)
return params
def __lt__(self, other):
# Push __init__ to the top.
if '__init__' in (self.name, other.name):
return self.name != other.name and self.name == '__init__'
else:
return self.name < other.name
class Variable (Doc):
"""
Representation of a variable's documentation. This includes
module, class and instance variables.
"""
def __init__(self, name, module, docstring, cls=None):
"""
Same as `pdoc.Doc.__init__`, except `cls` should be provided
as a `pdoc.Class` object when this is a class or instance
variable.
"""
super(Variable, self).__init__(name, module, docstring)
self.cls = cls
"""
The `podc.Class` object if this is a class or instance
variable. If not, this is None.
"""
@property
def source(self):
return []
@property
def refname(self):
if self.cls is None:
return '%s.%s' % (self.module.refname, self.name)
else:
return '%s.%s' % (self.cls.refname, self.name)
class External (Doc):
"""
A representation of an external identifier. The textual
representation is the same as an internal identifier, but without
any context. (Usually this makes linking more difficult.)
External identifiers are also used to represent something that is
not exported but appears somewhere in the public interface (like
the ancestor list of a class).
"""
__pdoc__['External.docstring'] = \
"""
An empty string. External identifiers do not have
docstrings.
"""
__pdoc__['External.module'] = \
"""
Always `None`. External identifiers have no associated
`pdoc.Module`.
"""
__pdoc__['External.name'] = \
"""
Always equivalent to `pdoc.External.refname` since external
identifiers are always expressed in their fully qualified
form.
"""
def __init__(self, name):
"""
Initializes an external identifier with `name`, where `name`
should be a fully qualified name.
"""
super(External, self).__init__(name, None, '')
@property
def source(self):
return []
@property
def refname(self):
return self.name
| gpl-2.0 | 4,057,034,538,068,730,400 | 35.340796 | 78 | 0.609784 | false |
tylergibson/electron | script/bootstrap.py | 11 | 7131 | #!/usr/bin/env python
import argparse
import os
import subprocess
import sys
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, PLATFORM, \
enable_verbose_mode, is_verbose_mode, get_target_arch
from lib.util import execute_stdout, get_atom_shell_version, scoped_cwd
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
VENDOR_DIR = os.path.join(SOURCE_ROOT, 'vendor')
PYTHON_26_URL = 'https://chromium.googlesource.com/chromium/deps/python_26'
if os.environ.has_key('CI'):
NPM = os.path.join(SOURCE_ROOT, 'node_modules', '.bin', 'npm')
else:
NPM = 'npm'
if sys.platform in ['win32', 'cygwin']:
NPM += '.cmd'
def main():
os.chdir(SOURCE_ROOT)
args = parse_args()
if not args.yes and PLATFORM != 'win32':
check_root()
if args.verbose:
enable_verbose_mode()
if sys.platform == 'cygwin':
update_win32_python()
if PLATFORM != 'win32':
update_clang()
update_submodules()
setup_python_libs()
update_node_modules('.')
bootstrap_brightray(args.dev, args.url, args.target_arch,
args.libcc_source_path, args.libcc_shared_library_path,
args.libcc_static_library_path)
if args.target_arch in ['arm', 'ia32'] and PLATFORM == 'linux':
download_sysroot(args.target_arch)
create_chrome_version_h()
touch_config_gypi()
run_update()
update_electron_modules('spec', args.target_arch)
def parse_args():
parser = argparse.ArgumentParser(description='Bootstrap this project')
parser.add_argument('-u', '--url',
help='The base URL from which to download '
'libchromiumcontent (i.e., the URL you passed to '
'libchromiumcontent\'s script/upload script',
default=BASE_URL,
required=False)
parser.add_argument('-v', '--verbose',
action='store_true',
help='Prints the output of the subprocesses')
parser.add_argument('-d', '--dev', action='store_true',
help='Do not download static_library build')
parser.add_argument('-y', '--yes', '--assume-yes',
action='store_true',
help='Run non-interactively by assuming "yes" to all ' \
'prompts.')
parser.add_argument('--target_arch', default=get_target_arch(),
help='Manually specify the arch to build for')
parser.add_argument('--libcc_source_path', required=False,
help='The source path of libchromiumcontent. ' \
'NOTE: All options of libchromiumcontent are ' \
'required OR let electron choose it')
parser.add_argument('--libcc_shared_library_path', required=False,
help='The shared library path of libchromiumcontent.')
parser.add_argument('--libcc_static_library_path', required=False,
help='The static library path of libchromiumcontent.')
return parser.parse_args()
def check_root():
if os.geteuid() == 0:
print "We suggest not running this as root, unless you're really sure."
choice = raw_input("Do you want to continue? [y/N]: ")
if choice not in ('y', 'Y'):
sys.exit(0)
def update_submodules():
execute_stdout(['git', 'submodule', 'sync'])
execute_stdout(['git', 'submodule', 'update', '--init', '--recursive'])
def setup_python_libs():
for lib in ('requests', 'boto'):
with scoped_cwd(os.path.join(VENDOR_DIR, lib)):
execute_stdout([sys.executable, 'setup.py', 'build'])
def bootstrap_brightray(is_dev, url, target_arch, libcc_source_path,
libcc_shared_library_path,
libcc_static_library_path):
bootstrap = os.path.join(VENDOR_DIR, 'brightray', 'script', 'bootstrap')
args = [
'--commit', LIBCHROMIUMCONTENT_COMMIT,
'--target_arch', target_arch,
url
]
if is_dev:
args = ['--dev'] + args
if (libcc_source_path != None and
libcc_shared_library_path != None and
libcc_static_library_path != None):
args += ['--libcc_source_path', libcc_source_path,
'--libcc_shared_library_path', libcc_shared_library_path,
'--libcc_static_library_path', libcc_static_library_path]
execute_stdout([sys.executable, bootstrap] + args)
def update_node_modules(dirname, env=None):
if env is None:
env = os.environ
if PLATFORM == 'linux':
# Use prebuilt clang for building native modules.
llvm_dir = os.path.join(SOURCE_ROOT, 'vendor', 'llvm-build',
'Release+Asserts', 'bin')
env['CC'] = os.path.join(llvm_dir, 'clang')
env['CXX'] = os.path.join(llvm_dir, 'clang++')
env['npm_config_clang'] = '1'
with scoped_cwd(dirname):
args = [NPM, 'install']
if is_verbose_mode():
args += ['--verbose']
# Ignore npm install errors when running in CI.
if os.environ.has_key('CI'):
try:
execute_stdout(args, env)
except subprocess.CalledProcessError:
pass
else:
execute_stdout(args, env)
def update_electron_modules(dirname, target_arch):
env = os.environ.copy()
env['npm_config_arch'] = target_arch
env['npm_config_target'] = get_atom_shell_version()
env['npm_config_disturl'] = 'https://atom.io/download/atom-shell'
update_node_modules(dirname, env)
def update_win32_python():
with scoped_cwd(VENDOR_DIR):
if not os.path.exists('python_26'):
execute_stdout(['git', 'clone', PYTHON_26_URL])
def update_clang():
execute_stdout([os.path.join(SOURCE_ROOT, 'script', 'update-clang.sh')])
def download_sysroot(target_arch):
if target_arch == 'ia32':
target_arch = 'i386'
execute_stdout([os.path.join(SOURCE_ROOT, 'script', 'install-sysroot.py'),
'--arch', target_arch])
def create_chrome_version_h():
version_file = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'libchromiumcontent', 'VERSION')
target_file = os.path.join(SOURCE_ROOT, 'atom', 'common', 'chrome_version.h')
template_file = os.path.join(SOURCE_ROOT, 'script', 'chrome_version.h.in')
with open(version_file, 'r') as f:
version = f.read()
with open(template_file, 'r') as f:
template = f.read()
content = template.replace('{PLACEHOLDER}', version.strip())
# We update the file only if the content has changed (ignoring line ending
# differences).
should_write = True
if os.path.isfile(target_file):
with open(target_file, 'r') as f:
should_write = f.read().replace('r', '') != content.replace('r', '')
if should_write:
with open(target_file, 'w') as f:
f.write(content)
def touch_config_gypi():
config_gypi = os.path.join(SOURCE_ROOT, 'vendor', 'node', 'config.gypi')
with open(config_gypi, 'w+') as f:
content = "\n{'variables':{}}"
if f.read() != content:
f.write(content)
def run_update():
update = os.path.join(SOURCE_ROOT, 'script', 'update.py')
execute_stdout([sys.executable, update])
if __name__ == '__main__':
sys.exit(main())
| mit | -126,740,728,718,322,400 | 32.957143 | 79 | 0.61436 | false |
KenanBek/rslservices | app/account/views.py | 1 | 3706 | from django.contrib.auth.models import User
from django.forms.util import ErrorList
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout as auth_logout
from django.core.urlresolvers import reverse
from django.shortcuts import render, redirect
from core.decorators import anonymous_required
import forms
def index(request, template="bootstrap3/account/index.html", context={}):
return render(request, template, context)
@anonymous_required
def login(request, template="bootstrap3/account/login.html", context={}):
next = request.GET.get('next', False)
login_form = forms.LoginForm(request.POST or None)
if request.method == 'POST':
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
auth_login(request, user)
messages.add_message(request, messages.SUCCESS, _('You have successfully logged in.'))
if next:
return redirect(next)
else:
return redirect(reverse('index'))
else:
messages.add_message(request, messages.WARNING, _('Non active user.'))
else:
messages.add_message(request, messages.ERROR, _('Wrong username or password.'))
context['login_form'] = login_form
return render(request, template, context)
@anonymous_required
def register(request, template="bootstrap3/account/register.html", context={}):
user_form = forms.UserForm(request.POST or None)
profile_form = forms.ProfileForm(request.POST or None, request.FILES or None)
if request.method == 'POST':
if user_form.is_valid() and profile_form.is_valid():
user_username = user_form.cleaned_data['username']
user_email = user_form.cleaned_data['email']
user_password = user_form.cleaned_data['password']
user = user_form.save(commit=False)
if user.email and User.objects.filter(email=user_email).exclude(username=user_username).count():
errors = user_form._errors.setdefault("email", ErrorList())
errors.append(_('User with this Email already exists.'))
messages.add_message(request, messages.ERROR, _('Please fix errors bellow.'))
else:
user.set_password(user_password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
messages.add_message(request, messages.SUCCESS, _('You have successfully registered.'))
user = authenticate(username=user_username, password=user_password)
auth_login(request, user)
return redirect(reverse('account_index'))
else:
messages.add_message(request, messages.ERROR, _('Please fix errors bellow.'))
context['user_form'] = user_form
context['profile_form'] = profile_form
context['forms'] = (user_form, profile_form, )
return render(request, template, context)
@login_required
def logout(request):
auth_logout(request)
messages.add_message(request, messages.SUCCESS, _('You have successfully logged out.'))
return redirect(reverse('index'))
| gpl-3.0 | -1,262,388,006,191,288,300 | 39.725275 | 108 | 0.643281 | false |
shi2wei3/tp-libvirt | libvirt/tests/src/virsh_cmd/host/virsh_version.py | 8 | 1475 | from autotest.client.shared import error
from virttest import libvirt_vm
from virttest import virsh
from virttest import utils_libvirtd
def run(test, params, env):
"""
Test the command virsh version
(1) Call virsh version
(2) Call virsh version with an unexpected option
(3) Call virsh version with libvirtd service stop
"""
connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri",
"default"))
libvirtd = params.get("libvirtd", "on")
option = params.get("virsh_version_options")
status_error = (params.get("status_error") == "yes")
# Prepare libvirtd service
if libvirtd == "off":
utils_libvirtd.libvirtd_stop()
# Run test case
result = virsh.version(option, uri=connect_uri, debug=True)
# Recover libvirtd service start
if libvirtd == "off":
utils_libvirtd.libvirtd_start()
# Check status_error
if status_error:
if not result.exit_status:
raise error.TestFail("Command 'virsh version %s' succeeded "
"(incorrect command)" % option)
else:
if result.exit_status:
raise error.TestFail("Command 'virsh version %s' failed "
"(correct command)" % option)
if option.count("daemon") and not result.stdout.count("daemon"):
raise error.TestFail("No daemon information outputed!")
| gpl-2.0 | 6,781,127,001,119,999,000 | 33.302326 | 76 | 0.608136 | false |
bertugatt/textmt | textmt/corpus/MlrsReader.py | 1 | 1610 | from __future__ import unicode_literals
import nltk
from nltk.util import LazyMap
from nltk.corpus.reader.api import *
class MlrsReader(nltk.corpus.reader.conll.ConllCorpusReader):
WORDS = 'words' #: column type for words
POS = 'pos' #: column type for part-of-speech tags
LEMMA = 'lemma' #: column type for lemmatised words
ROOT = 'root' #: column type for root
COLUMN_TYPES = (WORDS, POS, LEMMA, ROOT)
def _read_grid_block(self, stream):
"""
Modified _read_grid_block method - goes through files line by line
and seperates blocks once '</s>' is found in the file,
signifying the end of sentence. ~ is used in the reading process to
to mark where a sentence ends.
"""
s = ''
while True:
line = stream.readline()
if not line:
if s:
break
else:
s = []
elif line.strip() == '</s>':
s += '~'
if s: break
else:
s += line
for block in [s]:
block = block.strip()
if not block: continue
pre_grid = []
grid = []
for line in block.split('\n'):
splitline = line.split()
try:
if splitline[0] == '~':
grid.append(pre_grid)
pre_grid = []
if len(splitline) == 4:
pre_grid.append(splitline)
except:
pass
return grid | mit | 2,809,956,066,992,557,000 | 25.85 | 71 | 0.477019 | false |
huggingface/transformers | src/transformers/models/xlnet/tokenization_xlnet.py | 2 | 14406 | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for XLNet model."""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...file_utils import SPIECE_UNDERLINE
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
SEG_ID_A = 0
SEG_ID_B = 1
SEG_ID_CLS = 2
SEG_ID_SEP = 3
SEG_ID_PAD = 4
class XLNetTokenizer(PreTrainedTokenizer):
"""
Construct an XLNet tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a .spm extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to lowercase the input when tokenizing.
remove_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to strip the text when tokenizing (removing excess spaces before and after the string).
keep_accents (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to keep accents when tokenizing.
bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the :obj:`cls_token`.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"<sep>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"<cls>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<eop>", "<eod>"]`):
Additional special tokens used by the tokenizer.
sp_model_kwargs (:obj:`dict`, `optional`):
Will be passed to the ``SentencePieceProcessor.__init__()`` method. The `Python wrapper for SentencePiece
<https://github.com/google/sentencepiece/tree/master/python>`__ can be used, among other things, to set:
- ``enable_sampling``: Enable subword regularization.
- ``nbest_size``: Sampling parameters for unigram. Invalid for BPE-Dropout.
- ``nbest_size = {0,1}``: No sampling is performed.
- ``nbest_size > 1``: samples from the nbest_size results.
- ``nbest_size < 0``: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- ``alpha``: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (:obj:`SentencePieceProcessor`):
The `SentencePiece` processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
padding_side = "left"
def __init__(
self,
vocab_file,
do_lower_case=False,
remove_space=True,
keep_accents=False,
bos_token="<s>",
eos_token="</s>",
unk_token="<unk>",
sep_token="<sep>",
pad_token="<pad>",
cls_token="<cls>",
mask_token="<mask>",
additional_special_tokens=["<eop>", "<eod>"],
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
) -> None:
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=do_lower_case,
remove_space=remove_space,
keep_accents=keep_accents,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
sp_model_kwargs=self.sp_model_kwargs,
**kwargs,
)
self._pad_token_type_id = 3
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
@property
def vocab_size(self):
return len(self.sp_model)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = " ".join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if not self.keep_accents:
outputs = unicodedata.normalize("NFKD", outputs)
outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text: str) -> List[str]:
"""Tokenize a string."""
text = self.preprocess_text(text)
pieces = self.sp_model.encode(text, out_type=str)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_model.IdToPiece(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLNet sequence has the following format:
- single sequence: ``X <sep> <cls>``
- pair of sequences: ``A <sep> B <sep> <cls>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return token_ids_0 + sep + cls
return token_ids_0 + sep + token_ids_1 + sep + cls
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is not None:
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1]
return ([0] * len(token_ids_0)) + [1, 1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls_segment_id = [2]
if token_ids_1 is None:
return len(token_ids_0 + sep) * [0] + cls_segment_id
return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
| apache-2.0 | 1,638,247,104,463,545,900 | 41.122807 | 119 | 0.609052 | false |
ntts-clo/ryu | ryu/tests/unit/packet/test_ipv4.py | 18 | 4367 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
import struct
from struct import *
from nose.tools import *
from nose.plugins.skip import Skip, SkipTest
from ryu.ofproto import ether, inet
from ryu.lib.packet import packet_utils
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.packet import Packet
from ryu.lib.packet.ipv4 import ipv4
from ryu.lib.packet.tcp import tcp
from ryu.lib import addrconv
LOG = logging.getLogger('test_ipv4')
class Test_ipv4(unittest.TestCase):
""" Test case for ipv4
"""
version = 4
header_length = 5 + 10
ver_hlen = version << 4 | header_length
tos = 0
total_length = header_length + 64
identification = 30774
flags = 4
offset = 1480
flg_off = flags << 13 | offset
ttl = 64
proto = inet.IPPROTO_TCP
csum = 0xadc6
src = '131.151.32.21'
dst = '131.151.32.129'
length = header_length * 4
option = '\x86\x28\x00\x00\x00\x01\x01\x22' \
+ '\x00\x01\xae\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01'
buf = pack(ipv4._PACK_STR, ver_hlen, tos, total_length, identification,
flg_off, ttl, proto, csum,
addrconv.ipv4.text_to_bin(src),
addrconv.ipv4.text_to_bin(dst)) \
+ option
ip = ipv4(version, header_length, tos, total_length, identification,
flags, offset, ttl, proto, csum, src, dst, option)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.version, self.ip.version)
eq_(self.header_length, self.ip.header_length)
eq_(self.tos, self.ip.tos)
eq_(self.total_length, self.ip.total_length)
eq_(self.identification, self.ip.identification)
eq_(self.flags, self.ip.flags)
eq_(self.offset, self.ip.offset)
eq_(self.ttl, self.ip.ttl)
eq_(self.proto, self.ip.proto)
eq_(self.csum, self.ip.csum)
eq_(self.src, self.ip.src)
eq_(self.dst, self.ip.dst)
eq_(self.length, len(self.ip))
eq_(self.option, self.ip.option)
def test_parser(self):
res, ptype, _ = self.ip.parser(self.buf)
eq_(res.version, self.version)
eq_(res.header_length, self.header_length)
eq_(res.tos, self.tos)
eq_(res.total_length, self.total_length)
eq_(res.identification, self.identification)
eq_(res.flags, self.flags)
eq_(res.offset, self.offset)
eq_(res.ttl, self.ttl)
eq_(res.proto, self.proto)
eq_(res.csum, self.csum)
eq_(res.src, self.src)
eq_(res.dst, self.dst)
eq_(ptype, tcp)
def test_serialize(self):
buf = self.ip.serialize(bytearray(), None)
res = struct.unpack_from(ipv4._PACK_STR, str(buf))
option = buf[ipv4._MIN_LEN:ipv4._MIN_LEN + len(self.option)]
eq_(res[0], self.ver_hlen)
eq_(res[1], self.tos)
eq_(res[2], self.total_length)
eq_(res[3], self.identification)
eq_(res[4], self.flg_off)
eq_(res[5], self.ttl)
eq_(res[6], self.proto)
eq_(res[8], addrconv.ipv4.text_to_bin(self.src))
eq_(res[9], addrconv.ipv4.text_to_bin(self.dst))
eq_(option, self.option)
# checksum
csum = packet_utils.checksum(buf)
eq_(csum, 0)
@raises(Exception)
def test_malformed_ipv4(self):
m_short_buf = self.buf[1:ipv4._MIN_LEN]
ipv4.parser(m_short_buf)
def test_json(self):
jsondict = self.ip.to_jsondict()
ip = ipv4.from_jsondict(jsondict['ipv4'])
eq_(str(self.ip), str(ip))
| apache-2.0 | 1,247,668,826,204,057,000 | 30.875912 | 75 | 0.618731 | false |
Nic30/hwtHls | hwtHls/examples/query/selectFF.py | 1 | 2244 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.hdl.assignment import Assignment
from hwt.hdl.operator import Operator
from hwt.hdl.operatorDefs import AllOps
from hwt.interfaces.std import Signal, Clk
from hwt.synthesizer.unit import Unit
from hwtHls.examples.query.rtlNetlistManipulator import RtlNetlistManipulator
class FF_result():
def __init__(self, parent, clkSig, inputSig, regSig):
self.parent = parent
self.clkSig = clkSig
self.inputSig = inputSig
self.regSig = regSig
def __repr__(self):
return "<FF_result clk:%r, inputSig:%r, regSig:%r>" % (
self.clkSig, self.inputSig, self.regSig)
def replace(self, newOutput, newInput):
inp = self.inputSig
assig = inp.drivers[0]
m = RtlNetlistManipulator(self.parent)
if newInput is None:
m.disconnect_driver_of(inp, assig)
else:
m.reconnectDriverOf(inp, assig, newInput)
reg = self.regSig
if newOutput is None:
m.disconnect_endpoint_of(reg, assig)
else:
m.reconnect_endpoints_of(reg, newOutput)
class FF_select():
def __init__(self, ctx: Unit):
self.ctx = ctx
def on_rising_edge_found(self, sig):
for ep in sig.endpoints:
if isinstance(ep, Assignment):
if sig in ep.cond:
clk = sig.drivers[0].operands[0]
yield FF_result(self, clk, ep.src, ep.dst)
def select(self):
for sig in self.ctx.signals:
if len(sig.drivers) == 1:
driver = sig.drivers[0]
if isinstance(driver, Operator):
if driver.operator == AllOps.RISING_EDGE:
yield from self.on_rising_edge_found(sig)
class OneFF(Unit):
def _declr(self):
self.clk = Clk()
self.a = Signal()
self.b = Signal()._m()
def _impl(self):
r = self._reg
a_reg = r("a_reg")
a_reg(self.a)
self.b(a_reg)
s = FF_select(self._ctx)
for ff in s.select():
ff.replace(1, None)
if __name__ == "__main__":
from hwt.synthesizer.utils import to_rtl_str
print(to_rtl_str(OneFF()))
| mit | -6,456,931,574,568,119,000 | 27.769231 | 77 | 0.571301 | false |
Andrew-McNab-UK/DIRAC | DataManagementSystem/scripts/dirac-admin-user-quota.py | 9 | 1201 | #!/usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( """
Show storage quotas for specified users or for all registered users if nobody is specified
Usage:
%s [user1 ...]
""" % Script.scriptName )
Script.parseCommandLine()
users = Script.getPositionalArgs()
from DIRAC import gLogger, gConfig
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
if not users:
res = gConfig.getSections( '/Registry/Users' )
if not res['OK']:
gLogger.error( "Failed to retrieve user list from CS", res['Message'] )
DIRAC.exit( 2 )
users = res['Value']
gLogger.notice( "-"*30 )
gLogger.notice( "%s|%s" % ( 'Username'.ljust( 15 ), 'Quota (GB)'.rjust( 15 ) ) )
gLogger.notice( "-"*30 )
for user in sorted( users ):
quota = gConfig.getValue( '/Registry/Users/%s/Quota' % user, 0 )
if not quota:
quota = gConfig.getValue( '/Registry/DefaultStorageQuota' )
gLogger.notice( "%s|%s" % ( user.ljust( 15 ), str( quota ).rjust( 15 ) ) )
gLogger.notice( "-"*30 )
DIRAC.exit( 0 )
| gpl-3.0 | -7,788,512,496,321,830,000 | 30.605263 | 90 | 0.591174 | false |
dbentley/pants | contrib/node/tests/python/pants_test/contrib/node/tasks/test_node_task.py | 1 | 4139 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
import string
from textwrap import dedent
from pants.build_graph.target import Target
from pants.util.contextutil import temporary_dir
from pants_test.tasks.task_test_base import TaskTestBase
from pants.contrib.node.targets.node_module import NodeModule
from pants.contrib.node.targets.node_remote_module import NodeRemoteModule
from pants.contrib.node.targets.node_test import NodeTest
from pants.contrib.node.tasks.node_task import NodeTask
class NodeTaskTest(TaskTestBase):
class TestNodeTask(NodeTask):
def execute(self):
# We never execute the task, we just want to exercise the helpers it provides subclasses.
raise NotImplementedError()
@classmethod
def task_type(cls):
return cls.TestNodeTask
def test_is_node_package(self):
expected = {
NodeRemoteModule: True,
NodeModule: True,
NodeTest: False,
Target: False,
}
self.assertEqual(expected, self._type_check(expected.keys(), NodeTask.is_node_package))
def test_is_node_module(self):
expected = {
NodeRemoteModule: False,
NodeModule: True,
NodeTest: False,
Target: False,
}
self.assertEqual(expected, self._type_check(expected.keys(), NodeTask.is_node_module))
def test_is_node_remote_module(self):
expected = {
NodeRemoteModule: True,
NodeModule: False,
NodeTest: False,
Target: False,
}
self.assertEqual(expected, self._type_check(expected.keys(), NodeTask.is_node_remote_module))
def test_is_node_test(self):
expected = {
NodeRemoteModule: False,
NodeModule: False,
NodeTest: True,
Target: False,
}
self.assertEqual(expected, self._type_check(expected.keys(), NodeTask.is_node_test))
def _type_check(self, types, type_check_function):
# Make sure the diff display length is long enough for the test_is_* tests.
# It's a little weird to include this side effect here, but otherwise it would have to
# be duplicated or go in the setup (in which case it would affect all tests).
self.maxDiff = None
target_names = [':' + letter for letter in list(string.ascii_lowercase)]
types_with_target_names = zip(types, target_names)
type_check_results = [(type, type_check_function(self.make_target(target_name, type)))
for (type, target_name) in types_with_target_names]
return dict(type_check_results)
def test_execute_node(self):
task = self.create_task(self.context())
with temporary_dir() as chroot:
script = os.path.join(chroot, 'test.js')
proof = os.path.join(chroot, 'path')
with open(script, 'w') as fp:
fp.write(dedent("""
var fs = require('fs');
fs.writeFile("{proof}", "Hello World!", function(err) {{}});
""").format(proof=proof))
self.assertFalse(os.path.exists(proof))
returncode, command = task.execute_node(args=[script])
self.assertEqual(0, returncode)
self.assertTrue(os.path.exists(proof))
with open(proof) as fp:
self.assertEqual('Hello World!', fp.read().strip())
def test_execute_npm(self):
task = self.create_task(self.context())
with temporary_dir() as chroot:
proof = os.path.join(chroot, 'proof')
self.assertFalse(os.path.exists(proof))
package = {
'name': 'pantsbuild.pants.test',
'version': '0.0.0',
'scripts': {
'proof': 'echo "42" > {}'.format(proof)
}
}
with open(os.path.join(chroot, 'package.json'), 'wb') as fp:
json.dump(package, fp)
returncode, command = task.execute_npm(args=['run-script', 'proof'], cwd=chroot)
self.assertEqual(0, returncode)
self.assertTrue(os.path.exists(proof))
with open(proof) as fp:
self.assertEqual('42', fp.read().strip())
| apache-2.0 | 7,513,799,572,198,754,000 | 33.206612 | 97 | 0.662962 | false |
xiangyq/Youqing-final-project | calculate.py | 1 | 2813 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Calculate stock return and yield module"""
import datetime
import csv
def calculate(stocks, stock_market_price):
"""Do some math and return stocks information
including return and yield.
Args:
stocks(list): a list with lists nested.
stock_market_price(dict): a dictionary with stock symbol as key and
stock price as value.
Returns:
create a myresult.csv file with all the information;
print out message: 'Check the result in myresult.csv!'
Examples:
stocks = [
['stock symbol', 'purchase price', 'purchase date', 'volume'],
['C', '56', '20120324', '25'],
['C', '53.12', '20140325', '100'],
['JPM', '45.3', '20130427', '100'],
['APPL', '103.23', '20150125', '20']
]
stock_market_price = {'C': 67.0, 'APPL': 103.0, 'JPM': 87.0}
>>> calculate(stocks, stock_market_price)
Check the result in myresult.csv!
"""
result = [['stock symbol', 'purchase price', 'purchase date',
'volume', 'total return', 'annual yield']]
counter = 1
while counter < len(stocks):
sym = stocks[counter][0]
pdate = stocks[counter][2]
vol = stocks[counter][3]
price1 = stocks[counter][1]
price2 = stock_market_price[sym]
myreturn = (float(price2) - float(price1)) * int(stocks[counter][3])
myreturn = round(myreturn, 1)
c_date = datetime.date.today()
p_date = datetime.datetime.strptime(pdate, '%Y%m%d').date()
h_days = (c_date - p_date).days
myinvest = round(float(stocks[counter][1]) * int(stocks[counter][3]), 1)
myyield = round((myreturn / myinvest) * 365 / h_days * 100, 2)
newinfor = [sym, price1, pdate, vol, '${}'.format(myreturn),
'{}%'.format(myyield)]
result.append(newinfor)
counter += 1
output_file = open('myresult.csv', 'w')
output = csv.writer(output_file)
output.writerows(result)
output_file.close()
print 'Check the result in myresult.csv!'
INPUT_FILE = open('mystock.csv', 'r')
MYSTOCKS = csv.reader(INPUT_FILE)
STOCK_SYMBOL_LIST = []
STOCKS = []
for row in MYSTOCKS:
STOCKS.append(row)
if row[0] != 'stock symbol'and row[0] not in STOCK_SYMBOL_LIST:
STOCK_SYMBOL_LIST.append(row[0])
INPUT_FILE.close()
STOCK_MARKET_PRICE = {}
for STOCK in STOCK_SYMBOL_LIST:
STOCK_PRICE = float(raw_input('What is the price of {} now?'.format(STOCK)))
STOCK_INFOR = {STOCK: STOCK_PRICE}
STOCK_MARKET_PRICE.update(STOCK_INFOR)
print STOCK_MARKET_PRICE
ANSWER = raw_input('Check the prices: are they correct, yes or no?')
if ANSWER[:1].upper() == 'Y':
calculate(STOCKS, STOCK_MARKET_PRICE)
else:
print "Try again!"
| mpl-2.0 | -4,933,643,838,198,132,000 | 32.891566 | 80 | 0.601138 | false |
ppwwyyxx/tensorflow | tensorflow/python/ops/gradient_checker_v2_test.py | 3 | 11259 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for compute_gradient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import \
gradient_checker_v2 as gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
# needs this to register gradient for SoftmaxCrossEntropyWithLogits:
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _random_complex(shape, dtype):
data = np.random.random_sample(shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data.imag = np.random.random_sample(shape)
return data
@test_util.run_all_in_graph_and_eager_modes
class GradientCheckerTest(test.TestCase):
def testAddSimple(self):
size = (2, 3)
x1 = constant_op.constant(2.0, shape=size, name="x1")
x2 = constant_op.constant(3.0, shape=size, name="x2")
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
lambda x1: math_ops.add(x1, x2), [x1]))
tf_logging.info("x1 error = %f", error)
self.assertLess(error, 1e-4)
def testAddCustomized(self):
size = (2, 3)
x1 = constant_op.constant(
2.0, shape=size, dtype=dtypes.float64, name="x1")
x2 = np.asarray(np.arange(6, dtype=np.float64).reshape(2, 3))
# checkint gradients for x2 using a special delta
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
lambda x2: math_ops.add(x1, x2),
[x2], delta=1e-2))
tf_logging.info("x2 error = %f", error)
self.assertLess(error, 1e-10)
def testGather(self):
def f(params):
index_values = [1, 3]
indices = constant_op.constant(index_values, name="i")
return array_ops.gather(params, indices, name="y")
p_shape = (4, 2)
p_size = 8
params = constant_op.constant(
np.arange(p_size).astype(np.float), shape=p_shape, name="p")
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
f, [params]))
tf_logging.info("gather error = %f", error)
self.assertLess(error, 1e-4)
def testNestedGather(self):
def f(params):
index_values = [1, 3, 5, 6]
indices = constant_op.constant(index_values, name="i")
y = array_ops.gather(params, indices, name="y")
index_values2 = [0, 2]
indices2 = constant_op.constant(index_values2, name="i2")
return array_ops.gather(y, indices2, name="y2")
p_shape = (8, 2)
p_size = 16
params = constant_op.constant(
np.arange(p_size).astype(np.float), shape=p_shape, name="p")
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
f, [params]))
tf_logging.info("nested gather error = %f", error)
self.assertLess(error, 1e-4)
def testComplexMul(self):
c = constant_op.constant(5 + 7j, dtype=dtypes.complex64)
def f(x):
return c * x
x_shape = c.shape
x_dtype = c.dtype
x = constant_op.constant(_random_complex(x_shape, x_dtype))
analytical, numerical = gradient_checker.compute_gradient(
f, [x])
correct = np.array([[5, -7], [7, 5]])
self.assertAllEqual(correct, analytical[0])
self.assertAllClose(correct, numerical[0], rtol=1e-4)
x = constant_op.constant(_random_complex(x_shape, x_dtype))
self.assertLess(
gradient_checker.max_error(*gradient_checker.compute_gradient(
f, [x])), 3e-4)
def testComplexConj(self):
def f(x):
return math_ops.conj(x)
x_shape = ()
x_dtype = dtypes.complex64
x = constant_op.constant(_random_complex(x_shape, x_dtype))
analytical, numerical = gradient_checker.compute_gradient(
f, [x])
correct = np.array([[1, 0], [0, -1]])
self.assertAllEqual(correct, analytical[0])
self.assertAllClose(correct, numerical[0], rtol=2e-5)
x = constant_op.constant(_random_complex(x_shape, x_dtype))
self.assertLess(
gradient_checker.max_error(*gradient_checker.compute_gradient(
f, [x])), 2e-5)
def testEmptySucceeds(self):
def f(x):
return array_ops.identity(x)
x = constant_op.constant(np.random.random_sample((0, 3)),
dtype=dtypes.float32)
for grad in gradient_checker.compute_gradient(f, [x]):
self.assertEqual(grad[0].shape, (0, 0))
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
f, [x]))
self.assertEqual(error, 0)
def testEmptyMatMul(self):
def f(x, y):
return math_ops.matmul(x, y)
x = constant_op.constant(
np.random.random_sample((0, 3)), dtype=dtypes.float32)
y = constant_op.constant(
np.random.random_sample((3, 4)), dtype=dtypes.float32)
for grad in gradient_checker.compute_gradient(f, [x, y]):
self.assertEqual(grad[0].shape, (0, 0))
self.assertEqual(grad[1].shape, (0, 12))
error = gradient_checker.max_error(
*gradient_checker.compute_gradient(f, [x, y]))
self.assertEqual(error, 0)
def testEmptyFails(self):
@custom_gradient.custom_gradient
def id_bad_grad(x):
y = array_ops.identity(x)
def grad_fn(dy):
# dx = constant_op.constant(np.zeros((1, 4)), dtype=dtypes.float32)
dx = array_ops.transpose(dy)
return dx
return y, grad_fn
def f(x):
return id_bad_grad(x)
x = constant_op.constant(np.random.random_sample((0, 3)),
dtype=dtypes.float32)
bad = r"Empty gradient has wrong shape: expected \(0, 3\), got \(3, 0\)"
with self.assertRaisesRegexp(ValueError, bad):
gradient_checker.compute_gradient(f, [x])
def testNaNGradFails(self):
@custom_gradient.custom_gradient
def id_nan_grad(x):
y = array_ops.identity(x)
def grad_fn(dy):
dx = np.nan * dy
# dx = dy
return dx
return y, grad_fn
def f(x):
return id_nan_grad(x)
x = constant_op.constant(np.random.random_sample((1, 1)),
dtype=dtypes.float32)
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
f, [x]))
# Typical test would assert error < max_err, so assert this test would
# raise AssertionError, since NaN is not < 1.0.
with self.assertRaisesRegexp(AssertionError, "nan not less than 1.0"):
self.assertLess(error, 1.0)
def testGradGrad(self):
def f(x):
with backprop.GradientTape() as tape:
tape.watch(x)
y = math_ops.square(x)
z = math_ops.square(y)
return tape.gradient(z, x)
analytical, numerical = gradient_checker.compute_gradient(f, [2.0])
self.assertAllEqual([[[48.]]], analytical)
self.assertAllClose([[[48.]]], numerical, rtol=1e-4)
@test_util.run_all_in_graph_and_eager_modes
class MiniMNISTTest(test.TestCase):
# Gradient checker for MNIST.
def _BuildAndTestMiniMNIST(self, param_index, tag):
# Fix seed to avoid occasional flakiness
np.random.seed(6)
# Hyperparameters
batch = 3
inputs = 16
features = 32
classes = 10
# Define the parameters
inp_data = np.random.random_sample(inputs * batch)
hidden_weight_data = np.random.randn(inputs * features) / np.sqrt(inputs)
hidden_bias_data = np.random.random_sample(features)
sm_weight_data = np.random.randn(features * classes) / np.sqrt(features)
sm_bias_data = np.random.random_sample(classes)
# special care for labels since they need to be normalized per batch
label_data = np.random.random(batch * classes).reshape((batch, classes))
s = label_data.sum(axis=1)
label_data /= s[:, None]
# We treat the inputs as "parameters" here
inp = constant_op.constant(
inp_data.tolist(),
shape=[batch, inputs],
dtype=dtypes.float64,
name="inp")
hidden_weight = constant_op.constant(
hidden_weight_data.tolist(),
shape=[inputs, features],
dtype=dtypes.float64,
name="hidden_weight")
hidden_bias = constant_op.constant(
hidden_bias_data.tolist(),
shape=[features],
dtype=dtypes.float64,
name="hidden_bias")
softmax_weight = constant_op.constant(
sm_weight_data.tolist(),
shape=[features, classes],
dtype=dtypes.float64,
name="softmax_weight")
softmax_bias = constant_op.constant(
sm_bias_data.tolist(),
shape=[classes],
dtype=dtypes.float64,
name="softmax_bias")
# List all the parameter so that we can test them one at a time
all_params = [
inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias
]
# Now, Building MNIST
def f(inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias):
features = nn_ops.relu(
nn_ops.xw_plus_b(inp, hidden_weight, hidden_bias), name="features")
logits = nn_ops.xw_plus_b(
features, softmax_weight, softmax_bias, name="logits")
labels = constant_op.constant(
label_data.tolist(),
shape=[batch, classes],
dtype=dtypes.float64,
name="labels")
cost = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name="cost")
return cost
def f_restricted(x):
xs = all_params
i = param_index
# use x for the i-th parameter
xs = xs[0:i]+[x]+xs[i+1:]
return f(*xs)
# Test the gradients.
err = gradient_checker.max_error(*gradient_checker.compute_gradient(
f_restricted, [all_params[param_index]], delta=1e-5))
tf_logging.info("Mini MNIST: %s gradient error = %g", tag, err)
return err
def testInputGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(0, "input"), 1e-8)
def testHiddenWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(1, "hidden_weight"), 1e-8)
def testHiddenBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(2, "hidden_bias"), 1e-8)
def testSoftmaxWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(3, "softmax_weight"), 1e-8)
def testSoftmaxBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(4, "softmax_bias"), 1e-8)
if __name__ == "__main__":
test.main()
| apache-2.0 | 1,599,339,409,595,459,800 | 34.629747 | 80 | 0.649969 | false |
ravello/testmill | lib/testmill/test/test_images.py | 1 | 1035 | # Copyright 2012-2013 Ravello Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import os
from testmill.main import main
from testmill.test import *
@systemtest
class TestImages(TestSuite):
"""Run some basic test on the standard images."""
def test_images(self):
args = get_common_args()
args += ['run', '-m', 'platformtest.yml',
'platformtest', 'sh check_image.sh']
retval = main(args)
assert retval == 0
| apache-2.0 | 639,001,694,747,530,900 | 32.387097 | 74 | 0.703382 | false |
ben-jones/facade | tests/verifyFrame.py | 2 | 6109 | # Ben Jones
# Fall 2013
# HTPT Pluggable Transport
import unittest
from htpt import constants
from htpt import frame
class TestSeqNum(unittest.TestCase):
"""Test sequence number module and lock"""
def test_init(self):
aNum = 10
self.SN = frame.SeqNumber(aNum)
self.SN.seqNum += 500
#self.assertEqual(aNum, self.SN.seqNum)
self.assertTrue(self.SN.initialized)
def test_getSequenceAndIncrement(self):
"""Ensure that sequence numbers are handed out properly and that
wrapping occurs correctly"""
#ensure that we get the next sequence number
self.SN = frame.SeqNumber(5)
self.assertEqual(6, self.SN.getSequenceAndIncrement())
#ensure that we do wrapping
self.SN = frame.SeqNumber(65534)
self.assertEqual(0, self.SN.getSequenceAndIncrement())
class TestSessionID(unittest.TestCase):
"""Test sessionID module and lock"""
def test_init(self):
aID = 10
self.SI = frame.SessionID(aID)
self.SI.sessionID += 500
#self.assertEqual(aID, self.SI.sessionID)
self.assertTrue(self.SI.initialized)
def test_getSessionIDAndIncrement(self):
"""Ensure that session ID are handed out properly and that
wrapping occurs correctly"""
#ensure that we get the next session ID on new client
self.SI = frame.SessionID(5)
self.assertEqual(6, self.SI.getSessionIDAndIncrement())
#ensure that we do wrapping
self.SI = frame.SessionID(255)
self.assertEqual(0, self.SI.getSessionIDAndIncrement())
class TestAssemble(unittest.TestCase):
"""Test the validity of the Assemble framing module in htpt"""
def setUp(self):
self.callback= self.flush
self.uploadedData = ''
self.Assembler = frame.Assemble()
def flush(self, data):
"""Dummy callback function to test the framing modules ability to
send data up"""
print "flush data up"
self.uploadedData = data
def test_init(self):
"""Ensure that the init method works correctly on the SeqNumber
object by initializing the sequence number to the correct value"""
# check initialization with default session ID
self.assertEqual(self.Assembler.seqNum.seqNum, 0)
self.assertEqual(self.Assembler.sessionID, 0)
def test_sessionID(self):
"""Test arbit session ID and set/get"""
aID = 10
self.Assembler.setSessionID(aID)
self.assertEqual(aID, self.Assembler.getSessionID())
def test_generateFlags(self):
# default case no flags
self.flags = self.Assembler.generateFlags()
self.assertEqual(self.flags, 0)
# only more_data flag
self.flags = self.Assembler.generateFlags(more_data=1)
self.assertEqual(self.flags, 1<<7)
# only SYN flag
self.flags = self.Assembler.generateFlags(SYN=1)
self.assertEqual(self.flags, 1<<6)
# more_data and SYN flag
self.flags = self.Assembler.generateFlags(more_data=1, SYN=1)
self.assertEqual(self.flags, 3<<6)
# some random flag should still be like default case
self.flags = self.Assembler.generateFlags(no_fucks_to_give=1)
self.assertEqual(self.flags, 0, "flags break with arbit data")
def test_getHeaders(self):
self.Assembler.setSessionID(0)
self.Assembler.seqNum.seqNum = 0
self.headerString = self.Assembler.getHeaders()
# sequence number should be 1
self.assertEqual(self.headerString[:2], '\x00\x01')
# session ID should be 0
self.assertEqual(self.headerString[2], '\x00')
# flags should be 0 => \x00
self.assertEqual(self.headerString[3], '\x00')
# this should increase sequence number by one, and flags will change
self.headerString = self.Assembler.getHeaders(more_data=1)
self.assertEqual(self.headerString[:2], '\x00\x02')
self.assertEqual(self.headerString[3], '\x80')
def test_assemble(self):
# less data
self.data = '0100101010101010101010101'
self.output = self.Assembler.assemble(self.data, more_data=1, SYN=1)
self.assertEqual(self.data, self.output[4:])
class TestDisassemble(unittest.TestCase):
"""Test the validity of the Disassemble framing module in htpt"""
def setUp(self):
self.callback = self.dummyCallback
self.downloadedData = ''
self.Disassembler = frame.Disassemble(self.callback)
self.dummyData = '01010101010101010101010101010100101'
# data, more_data = 1, SYN=1, seq num =16
self.test_frame1 = '\x00\x10\x00\xc001010101010101010101010101010100101'
# data[:5], more_data = 0, SYN=1 , seq num = 19
self.test_frame2 = '\x00\x13\x00@01010'
# no data, more_data = 1, SYN=0, seq num = 21
self.test_frame3 = '\x00\x15\x00\x80'
def dummyCallback(self, data):
"""Dummy callback function to test the framing modules ability to
send data up"""
print "flush data up"
self.downloadedData = data
def test_init(self):
"""Ensure that the init method works correctly by initializing
the receive buffer"""
# check initialization with default session ID
self.assertEqual(len(self.Disassembler.buffer.buffer), constants.BUFFER_SIZE)
def test_retrieveHeaders(self):
#more_data=1, SYN=0, sessionID=10 (set using setSessionID()), seqNum = 23
self.header = '\x00\x17\n\x80'
self.Disassembler.retrieveHeaders(self.header)
self.assertEqual(self.Disassembler.seqNum, 23)
self.assertEqual(self.Disassembler.getSessionID(), 10)
self.assertEqual(self.Disassembler.flags, 1<<7)
def test_disassemble(self):
self.output = self.Disassembler.disassemble(self.test_frame1)
self.assertEqual(self.Disassembler.seqNum, 16)
self.assertEqual(self.Disassembler.flags, (1<<7 | 1<<6))
self.assertEqual(self.output, self.dummyData)
self.output = self.Disassembler.disassemble(self.test_frame2)
self.assertEqual(self.Disassembler.seqNum, 19)
self.assertEqual(self.Disassembler.flags, (1<<6))
self.assertEqual(self.output, self.dummyData[:5])
self.output = self.Disassembler.disassemble(self.test_frame3)
self.assertEqual(self.Disassembler.seqNum, 21)
self.assertEqual(self.Disassembler.flags, 1<<7)
self.assertEqual(self.output, '')
if __name__ == "__main__":
unittest.main()
| mit | -80,114,441,224,989,220 | 33.320225 | 81 | 0.70879 | false |
KimTaehee/eucalyptus | clc/eucadmin/eucadmin/describeloadbalancing.py | 6 | 1534 | # Copyright 2011-2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import eucadmin.describerequest
class DescribeLoadBalancing(eucadmin.describerequest.DescribeRequest):
ServiceName = 'LoadBalancing'
Description = 'List LoadBalancing services.'
| gpl-3.0 | -8,414,680,557,378,003,000 | 50.133333 | 74 | 0.790743 | false |
nightjean/Deep-Learning | tensorflow/contrib/learn/python/learn/utils/gc.py | 10 | 6119 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""System for specifying garbage collection (GC) of path based data.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# create the directories
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# create a simple parser that pulls the export_version from the directory
def parser(path):
match = re.match("^" + base_dir + "/(\\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc.get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc.mod_export_version(5)
print every_fifth(path_list) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print largest_three(all_paths) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc.union(every_fifth, largest_three)
print both(all_paths) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# delete everything not in 'both'
to_delete = gc.negation(both)
for p in to_delete(all_paths):
gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
Path = collections.namedtuple('Path', 'path export_version')
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
def one_of_every_n_export_versions(n):
"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
"""A filter function that keeps exactly one out of every n paths."""
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
def mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
def union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
def negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
def get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
p = parser(Path(os.path.join(compat.as_str_any(base_dir),
compat.as_str_any(r)),
None))
if p:
paths.append(p)
return sorted(paths)
| apache-2.0 | -5,166,066,407,041,046,000 | 28.418269 | 80 | 0.651904 | false |
kenshay/ImageScripter | Script_Runner/PYTHON/Lib/sqlite3/dbapi2.py | 126 | 2687 | # pysqlite2/dbapi2.py: the DB-API 2.0 interface
#
# Copyright (C) 2004-2005 Gerhard Häring <[email protected]>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import datetime
import time
import collections.abc
from _sqlite3 import *
paramstyle = "qmark"
threadsafety = 1
apilevel = "2.0"
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
version_info = tuple([int(x) for x in version.split(".")])
sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")])
Binary = memoryview
collections.abc.Sequence.register(Row)
def register_adapters_and_converters():
def adapt_date(val):
return val.isoformat()
def adapt_datetime(val):
return val.isoformat(" ")
def convert_date(val):
return datetime.date(*map(int, val.split(b"-")))
def convert_timestamp(val):
datepart, timepart = val.split(b" ")
year, month, day = map(int, datepart.split(b"-"))
timepart_full = timepart.split(b".")
hours, minutes, seconds = map(int, timepart_full[0].split(b":"))
if len(timepart_full) == 2:
microseconds = int('{:0<6.6}'.format(timepart_full[1].decode()))
else:
microseconds = 0
val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
return val
register_adapter(datetime.date, adapt_date)
register_adapter(datetime.datetime, adapt_datetime)
register_converter("date", convert_date)
register_converter("timestamp", convert_timestamp)
register_adapters_and_converters()
# Clean up namespace
del(register_adapters_and_converters)
| gpl-3.0 | -3,338,774,772,565,211,600 | 29.179775 | 88 | 0.703649 | false |
orvi2014/kitsune | kitsune/wiki/tests/test_notifications.py | 17 | 11596 | # -*- coding: utf-8 -*-
from django.core import mail
from nose.tools import eq_
from kitsune.sumo.tests import post
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import add_permission, user
from kitsune.products.tests import product
from kitsune.wiki.config import (
SIGNIFICANCES, MAJOR_SIGNIFICANCE, MEDIUM_SIGNIFICANCE, TYPO_SIGNIFICANCE)
from kitsune.wiki.events import (
ReadyRevisionEvent, ApproveRevisionInLocaleEvent)
from kitsune.wiki.models import Revision
from kitsune.wiki.tests import document, revision, TestCaseBase
def _assert_ready_mail(mail):
assert 'ready for localization' in mail.subject
def _assert_approved_mail(mail):
assert 'new approved revision' in mail.subject
def _assert_creator_mail(mail):
assert mail.subject.startswith('Your revision has been approved')
def _set_up_ready_watcher():
"""Make a user who watches for revision readiness."""
ready_watcher = user(email='[email protected]', save=True)
ReadyRevisionEvent.notify(ready_watcher)
return ready_watcher
class ReviewTests(TestCaseBase):
"""Tests for notifications sent during revision review"""
def setUp(self):
"""Have a user watch for revision approval. Log in."""
self.approved_watcher = user(email='[email protected]', save=True)
ApproveRevisionInLocaleEvent.notify(self.approved_watcher,
locale='en-US')
approver = user(save=True)
add_permission(approver, Revision, 'review_revision')
add_permission(approver, Revision, 'mark_ready_for_l10n')
self.client.login(username=approver.username, password='testpass')
def _review_revision(self, is_approved=True, is_ready=False,
significance=SIGNIFICANCES[0][0], r=None,
comment=None, doc=None):
"""Make a revision, and approve or reject it through the view."""
if not r:
r = revision(is_approved=False,
is_ready_for_localization=False,
significance=significance,
document=doc,
save=True)
# Figure out POST data:
data = {'comment': 'đSome comment'}
if is_approved:
data['approve'] = 'Approve Revision'
data['significance'] = significance
if is_ready:
data['is_ready_for_localization'] = 'on'
if comment:
data['comment'] = comment
else:
data['reject'] = 'Reject Revision'
url = reverse('wiki.review_revision', locale=r.document.locale,
args=[r.document.slug, r.id])
response = self.client.post(url, data)
eq_(302, response.status_code)
def test_ready(self):
"""Show that a ready(-and-approved) rev mails Ready watchers a Ready
notification and Approved watchers an Approved one."""
_set_up_ready_watcher()
self._review_revision(is_ready=True, significance=MEDIUM_SIGNIFICANCE)
# 1 mail to each watcher, 1 to the creator, and one to the reviewer
eq_(4, len(mail.outbox))
_assert_ready_mail(mail.outbox[0])
_assert_approved_mail(mail.outbox[1])
_assert_creator_mail(mail.outbox[2])
def test_product_specific_ready(self):
"""Verify product-specific ready for review notifications."""
# Add an all products in 'es' watcher and a Firefox OS in 'es'
# watcher.
ApproveRevisionInLocaleEvent.notify(user(save=True), locale='es')
ApproveRevisionInLocaleEvent.notify(
user(save=True), product='firefox-os', locale='es')
# Create an 'es' document for Firefox
parent = document(save=True)
doc = document(parent=parent, locale='es', save=True)
parent.products.add(product(slug='firefox', save=True))
# Review a revision. There should be 3 new emails:
# 1 to the creator, 1 to the reviewer and 1 to the 'es' watcher.
self._review_revision(
doc=doc, is_ready=True, significance=MEDIUM_SIGNIFICANCE)
eq_(3, len(mail.outbox))
_assert_approved_mail(mail.outbox[0])
_assert_creator_mail(mail.outbox[1])
# Add firefox-os to the document's products and review a new revision.
# There should be 4 new emails now (the same 3 from before plus one
# for the firefox-os watcher).
parent.products.add(product(slug='firefox-os', save=True))
self._review_revision(
doc=doc, is_ready=True, significance=MEDIUM_SIGNIFICANCE)
eq_(7, len(mail.outbox))
_assert_approved_mail(mail.outbox[3])
_assert_approved_mail(mail.outbox[4])
_assert_creator_mail(mail.outbox[5])
# Add a Firefox watcher. This time there should be 5 new emails.
ApproveRevisionInLocaleEvent.notify(
user(save=True), product='firefox', locale='es')
self._review_revision(
doc=doc, is_ready=True, significance=MEDIUM_SIGNIFICANCE)
eq_(12, len(mail.outbox))
def test_typo_significance_ignore(self):
# Create the first approved revision for the document. This one will
# always have MAJOR_SIGNIFICANCE.
r = revision(is_approved=True, save=True)
# Then, set up a watcher and create a TYPO_SIGNIFICANCE revision.
_set_up_ready_watcher()
self._review_revision(is_ready=True, doc=r.document,
significance=TYPO_SIGNIFICANCE)
# This is the same as test_ready, except we miss 1 mail, that is the
# localization mail.
eq_(3, len(mail.outbox))
def test_approved(self):
"""Show that an approved rev mails Ready watchers nothing and Approved
watchers an Approved notification."""
_set_up_ready_watcher()
self._review_revision(is_ready=False)
# 1 mail to Approved watcher, 1 to creator, 1 for reviewer
eq_(3, len(mail.outbox))
assert 'new approved revision' in mail.outbox[0].subject
assert 'Your revision has been approved' in mail.outbox[1].subject
def test_based_on_approved(self):
u1 = user()
u1.save()
r1 = revision(is_approved=False,
creator=u1,
is_ready_for_localization=False,
save=True)
u2 = user()
u2.save()
r2 = revision(document=r1.document, based_on=r1, is_approved=False,
creator=u2,
is_ready_for_localization=False,
save=True)
eq_(0, len(mail.outbox))
self._review_revision(r=r2)
# 1 mail for each watcher, 1 for creator, and one for reviewer.
eq_(4, len(mail.outbox))
assert 'has a new approved revision' in mail.outbox[0].subject
assert 'Your revision has been approved' in mail.outbox[1].subject
assert 'Your revision has been approved' in mail.outbox[2].subject
assert 'A revision you contributed to has' in mail.outbox[3].subject
def test_neither(self):
"""Show that neither an Approved nor a Ready mail is sent if a rev is
rejected."""
_set_up_ready_watcher()
self._review_revision(is_approved=False)
eq_(2, len(mail.outbox)) # 1 mail to creator, one to the reviewer.
assert mail.outbox[0].subject.startswith(
'Your revision has been reviewed')
def test_user_watching_both(self):
"""If a single person is watching ready and approved revisions and a
revision becomes ready, send only the readiness email, not the approval
one."""
# Have the Approved watcher watch Ready as well:
ReadyRevisionEvent.notify(self.approved_watcher)
self._review_revision(is_ready=True, significance=MEDIUM_SIGNIFICANCE)
# 1 mail to watcher, 1 to creator, 1 to reviewer
eq_(3, len(mail.outbox))
_assert_ready_mail(mail.outbox[0])
_assert_creator_mail(mail.outbox[1])
def test_new_lines_in_review_message(self):
"""Test that newlines in a review message are properly displayed."""
_set_up_ready_watcher()
self._review_revision(comment='foo\n\nbar\nbaz')
assert 'foo<br><br>bar<br>baz' in mail.outbox[1].alternatives[0][0]
def test_first_approved_revision_has_major_significance(self):
"""The 1st approved revision of a document has MAJOR_SIGNIFICANCE."""
self._review_revision(significance=MEDIUM_SIGNIFICANCE)
r = Revision.objects.get()
# Even though MEDIUM_SIGNIFICANCE was POSTed, the revision will be set
# to MAJOR_SIGNIFICANCE.
eq_(MAJOR_SIGNIFICANCE, r.significance)
class ReadyForL10nTests(TestCaseBase):
"""Tests for notifications sent during ready for l10n"""
def setUp(self):
"""Have a user watch for revision approval. Log in."""
self.ready_watcher = user(email='[email protected]', save=True)
ReadyRevisionEvent.notify(self.ready_watcher)
readyer = user(save=True)
add_permission(readyer, Revision, 'mark_ready_for_l10n')
self.client.login(username=readyer.username, password='testpass')
def _mark_as_ready_revision(self, doc=None):
"""Make a revision, and approve or reject it through the view."""
if doc is None:
doc = document(save=True)
r = revision(is_approved=True,
is_ready_for_localization=False,
significance=MEDIUM_SIGNIFICANCE,
document=doc,
save=True)
# Figure out POST data:
data = {'comment': 'something'}
response = post(self.client,
'wiki.mark_ready_for_l10n_revision',
data,
args=[r.document.slug, r.id])
eq_(200, response.status_code)
def test_ready(self):
"""Show that a ready(-and-approved) rev mails Ready watchers a Ready
notification and Approved watchers an Approved one."""
_set_up_ready_watcher()
self._mark_as_ready_revision()
eq_(2, len(mail.outbox)) # 1 mail to each watcher, none to marker
_assert_ready_mail(mail.outbox[0])
_assert_ready_mail(mail.outbox[1])
def test_product_specific_ready(self):
"""Verify product-specific ready for l10n notifications."""
# Add a Firefox OS watcher.
ReadyRevisionEvent.notify(user(save=True), product='firefox-os')
# Create a document for Firefox
doc = document(save=True)
doc.products.add(product(slug='firefox', save=True))
# Mark a revision a ready for L10n. There should be only one email
# to the watcher created in setUp.
self._mark_as_ready_revision(doc=doc)
eq_(1, len(mail.outbox))
_assert_ready_mail(mail.outbox[0])
# Add firefox-os to the document's products. Mark as ready for l10n,
# and there should be two new emails.
doc.products.add(product(slug='firefox-os', save=True))
self._mark_as_ready_revision(doc=doc)
eq_(3, len(mail.outbox))
_assert_ready_mail(mail.outbox[1])
_assert_ready_mail(mail.outbox[2])
# Add a Firefox watcher, mark as ready for l10n, and there should
# be three new emails.
ReadyRevisionEvent.notify(user(save=True), product='firefox')
self._mark_as_ready_revision(doc=doc)
eq_(6, len(mail.outbox))
| bsd-3-clause | -8,602,756,848,582,841,000 | 40.410714 | 79 | 0.625011 | false |
PRIMEDesigner15/PRIMEDesigner15 | dependencies/Lib/test/unittests/test_csv.py | 23 | 39507 | # Copyright (C) 2001,2002 Python Software Foundation
# csv package unit tests
import io
import sys
import os
import unittest
from io import StringIO
from tempfile import TemporaryFile
import csv
import gc
from test import support
class Test_Csv(unittest.TestCase):
"""
Test the underlying C csv parser in ways that are not appropriate
from the high level interface. Further tests of this nature are done
in TestDialectRegistry.
"""
def _test_arg_valid(self, ctor, arg):
self.assertRaises(TypeError, ctor)
self.assertRaises(TypeError, ctor, None)
self.assertRaises(TypeError, ctor, arg, bad_attr = 0)
self.assertRaises(TypeError, ctor, arg, delimiter = 0)
self.assertRaises(TypeError, ctor, arg, delimiter = 'XX')
self.assertRaises(csv.Error, ctor, arg, 'foo')
self.assertRaises(TypeError, ctor, arg, delimiter=None)
self.assertRaises(TypeError, ctor, arg, delimiter=1)
self.assertRaises(TypeError, ctor, arg, quotechar=1)
self.assertRaises(TypeError, ctor, arg, lineterminator=None)
self.assertRaises(TypeError, ctor, arg, lineterminator=1)
self.assertRaises(TypeError, ctor, arg, quoting=None)
self.assertRaises(TypeError, ctor, arg,
quoting=csv.QUOTE_ALL, quotechar='')
self.assertRaises(TypeError, ctor, arg,
quoting=csv.QUOTE_ALL, quotechar=None)
def test_reader_arg_valid(self):
self._test_arg_valid(csv.reader, [])
def test_writer_arg_valid(self):
self._test_arg_valid(csv.writer, StringIO())
def _test_default_attrs(self, ctor, *args):
obj = ctor(*args)
# Check defaults
self.assertEqual(obj.dialect.delimiter, ',')
self.assertEqual(obj.dialect.doublequote, True)
self.assertEqual(obj.dialect.escapechar, None)
self.assertEqual(obj.dialect.lineterminator, "\r\n")
self.assertEqual(obj.dialect.quotechar, '"')
self.assertEqual(obj.dialect.quoting, csv.QUOTE_MINIMAL)
self.assertEqual(obj.dialect.skipinitialspace, False)
self.assertEqual(obj.dialect.strict, False)
# Try deleting or changing attributes (they are read-only)
self.assertRaises(AttributeError, delattr, obj.dialect, 'delimiter')
self.assertRaises(AttributeError, setattr, obj.dialect, 'delimiter', ':')
self.assertRaises(AttributeError, delattr, obj.dialect, 'quoting')
self.assertRaises(AttributeError, setattr, obj.dialect,
'quoting', None)
def test_reader_attrs(self):
self._test_default_attrs(csv.reader, [])
def test_writer_attrs(self):
self._test_default_attrs(csv.writer, StringIO())
def _test_kw_attrs(self, ctor, *args):
# Now try with alternate options
kwargs = dict(delimiter=':', doublequote=False, escapechar='\\',
lineterminator='\r', quotechar='*',
quoting=csv.QUOTE_NONE, skipinitialspace=True,
strict=True)
obj = ctor(*args, **kwargs)
self.assertEqual(obj.dialect.delimiter, ':')
self.assertEqual(obj.dialect.doublequote, False)
self.assertEqual(obj.dialect.escapechar, '\\')
self.assertEqual(obj.dialect.lineterminator, "\r")
self.assertEqual(obj.dialect.quotechar, '*')
self.assertEqual(obj.dialect.quoting, csv.QUOTE_NONE)
self.assertEqual(obj.dialect.skipinitialspace, True)
self.assertEqual(obj.dialect.strict, True)
def test_reader_kw_attrs(self):
self._test_kw_attrs(csv.reader, [])
def test_writer_kw_attrs(self):
self._test_kw_attrs(csv.writer, StringIO())
def _test_dialect_attrs(self, ctor, *args):
# Now try with dialect-derived options
class dialect:
delimiter='-'
doublequote=False
escapechar='^'
lineterminator='$'
quotechar='#'
quoting=csv.QUOTE_ALL
skipinitialspace=True
strict=False
args = args + (dialect,)
obj = ctor(*args)
self.assertEqual(obj.dialect.delimiter, '-')
self.assertEqual(obj.dialect.doublequote, False)
self.assertEqual(obj.dialect.escapechar, '^')
self.assertEqual(obj.dialect.lineterminator, "$")
self.assertEqual(obj.dialect.quotechar, '#')
self.assertEqual(obj.dialect.quoting, csv.QUOTE_ALL)
self.assertEqual(obj.dialect.skipinitialspace, True)
self.assertEqual(obj.dialect.strict, False)
def test_reader_dialect_attrs(self):
self._test_dialect_attrs(csv.reader, [])
def test_writer_dialect_attrs(self):
self._test_dialect_attrs(csv.writer, StringIO())
def _write_test(self, fields, expect, **kwargs):
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, **kwargs)
writer.writerow(fields)
fileobj.seek(0)
self.assertEqual(fileobj.read(),
expect + writer.dialect.lineterminator)
def test_write_arg_valid(self):
self.assertRaises(csv.Error, self._write_test, None, '')
self._write_test((), '')
self._write_test([None], '""')
self.assertRaises(csv.Error, self._write_test,
[None], None, quoting = csv.QUOTE_NONE)
# Check that exceptions are passed up the chain
class BadList:
def __len__(self):
return 10;
def __getitem__(self, i):
if i > 2:
raise IOError
self.assertRaises(IOError, self._write_test, BadList(), '')
class BadItem:
def __str__(self):
raise IOError
self.assertRaises(IOError, self._write_test, [BadItem()], '')
def test_write_bigfield(self):
# This exercises the buffer realloc functionality
bigstring = 'X' * 50000
self._write_test([bigstring,bigstring], '%s,%s' % \
(bigstring, bigstring))
def test_write_quoting(self):
self._write_test(['a',1,'p,q'], 'a,1,"p,q"')
self.assertRaises(csv.Error,
self._write_test,
['a',1,'p,q'], 'a,1,p,q',
quoting = csv.QUOTE_NONE)
self._write_test(['a',1,'p,q'], 'a,1,"p,q"',
quoting = csv.QUOTE_MINIMAL)
self._write_test(['a',1,'p,q'], '"a",1,"p,q"',
quoting = csv.QUOTE_NONNUMERIC)
self._write_test(['a',1,'p,q'], '"a","1","p,q"',
quoting = csv.QUOTE_ALL)
self._write_test(['a\nb',1], '"a\nb","1"',
quoting = csv.QUOTE_ALL)
def test_write_escape(self):
self._write_test(['a',1,'p,q'], 'a,1,"p,q"',
escapechar='\\')
self.assertRaises(csv.Error,
self._write_test,
['a',1,'p,"q"'], 'a,1,"p,\\"q\\""',
escapechar=None, doublequote=False)
self._write_test(['a',1,'p,"q"'], 'a,1,"p,\\"q\\""',
escapechar='\\', doublequote = False)
self._write_test(['"'], '""""',
escapechar='\\', quoting = csv.QUOTE_MINIMAL)
self._write_test(['"'], '\\"',
escapechar='\\', quoting = csv.QUOTE_MINIMAL,
doublequote = False)
self._write_test(['"'], '\\"',
escapechar='\\', quoting = csv.QUOTE_NONE)
self._write_test(['a',1,'p,q'], 'a,1,p\\,q',
escapechar='\\', quoting = csv.QUOTE_NONE)
def test_writerows(self):
class BrokenFile:
def write(self, buf):
raise IOError
writer = csv.writer(BrokenFile())
self.assertRaises(IOError, writer.writerows, [['a']])
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj)
self.assertRaises(TypeError, writer.writerows, None)
writer.writerows([['a','b'],['c','d']])
fileobj.seek(0)
self.assertEqual(fileobj.read(), "a,b\r\nc,d\r\n")
@support.cpython_only
def test_writerows_legacy_strings(self):
import _testcapi
c = _testcapi.unicode_legacy_string('a')
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj)
writer.writerows([[c]])
fileobj.seek(0)
self.assertEqual(fileobj.read(), "a\r\n")
def _read_test(self, input, expect, **kwargs):
reader = csv.reader(input, **kwargs)
result = list(reader)
self.assertEqual(result, expect)
def test_read_oddinputs(self):
self._read_test([], [])
self._read_test([''], [[]])
self.assertRaises(csv.Error, self._read_test,
['"ab"c'], None, strict = 1)
# cannot handle null bytes for the moment
self.assertRaises(csv.Error, self._read_test,
['ab\0c'], None, strict = 1)
self._read_test(['"ab"c'], [['abc']], doublequote = 0)
self.assertRaises(csv.Error, self._read_test,
[b'ab\0c'], None)
def test_read_eol(self):
self._read_test(['a,b'], [['a','b']])
self._read_test(['a,b\n'], [['a','b']])
self._read_test(['a,b\r\n'], [['a','b']])
self._read_test(['a,b\r'], [['a','b']])
self.assertRaises(csv.Error, self._read_test, ['a,b\rc,d'], [])
self.assertRaises(csv.Error, self._read_test, ['a,b\nc,d'], [])
self.assertRaises(csv.Error, self._read_test, ['a,b\r\nc,d'], [])
def test_read_eof(self):
self._read_test(['a,"'], [['a', '']])
self._read_test(['"a'], [['a']])
self._read_test(['^'], [['\n']], escapechar='^')
self.assertRaises(csv.Error, self._read_test, ['a,"'], [], strict=True)
self.assertRaises(csv.Error, self._read_test, ['"a'], [], strict=True)
self.assertRaises(csv.Error, self._read_test,
['^'], [], escapechar='^', strict=True)
def test_read_escape(self):
self._read_test(['a,\\b,c'], [['a', 'b', 'c']], escapechar='\\')
self._read_test(['a,b\\,c'], [['a', 'b,c']], escapechar='\\')
self._read_test(['a,"b\\,c"'], [['a', 'b,c']], escapechar='\\')
self._read_test(['a,"b,\\c"'], [['a', 'b,c']], escapechar='\\')
self._read_test(['a,"b,c\\""'], [['a', 'b,c"']], escapechar='\\')
self._read_test(['a,"b,c"\\'], [['a', 'b,c\\']], escapechar='\\')
def test_read_quoting(self):
self._read_test(['1,",3,",5'], [['1', ',3,', '5']])
self._read_test(['1,",3,",5'], [['1', '"', '3', '"', '5']],
quotechar=None, escapechar='\\')
self._read_test(['1,",3,",5'], [['1', '"', '3', '"', '5']],
quoting=csv.QUOTE_NONE, escapechar='\\')
# will this fail where locale uses comma for decimals?
self._read_test([',3,"5",7.3, 9'], [['', 3, '5', 7.3, 9]],
quoting=csv.QUOTE_NONNUMERIC)
self._read_test(['"a\nb", 7'], [['a\nb', ' 7']])
self.assertRaises(ValueError, self._read_test,
['abc,3'], [[]],
quoting=csv.QUOTE_NONNUMERIC)
def test_read_bigfield(self):
# This exercises the buffer realloc functionality and field size
# limits.
limit = csv.field_size_limit()
try:
size = 50000
bigstring = 'X' * size
bigline = '%s,%s' % (bigstring, bigstring)
self._read_test([bigline], [[bigstring, bigstring]])
csv.field_size_limit(size)
self._read_test([bigline], [[bigstring, bigstring]])
self.assertEqual(csv.field_size_limit(), size)
csv.field_size_limit(size-1)
self.assertRaises(csv.Error, self._read_test, [bigline], [])
self.assertRaises(TypeError, csv.field_size_limit, None)
self.assertRaises(TypeError, csv.field_size_limit, 1, None)
finally:
csv.field_size_limit(limit)
def test_read_linenum(self):
r = csv.reader(['line,1', 'line,2', 'line,3'])
self.assertEqual(r.line_num, 0)
next(r)
self.assertEqual(r.line_num, 1)
next(r)
self.assertEqual(r.line_num, 2)
next(r)
self.assertEqual(r.line_num, 3)
self.assertRaises(StopIteration, next, r)
self.assertEqual(r.line_num, 3)
def test_roundtrip_quoteed_newlines(self):
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj)
self.assertRaises(TypeError, writer.writerows, None)
rows = [['a\nb','b'],['c','x\r\nd']]
writer.writerows(rows)
fileobj.seek(0)
for i, row in enumerate(csv.reader(fileobj)):
self.assertEqual(row, rows[i])
class TestDialectRegistry(unittest.TestCase):
def test_registry_badargs(self):
self.assertRaises(TypeError, csv.list_dialects, None)
self.assertRaises(TypeError, csv.get_dialect)
self.assertRaises(csv.Error, csv.get_dialect, None)
self.assertRaises(csv.Error, csv.get_dialect, "nonesuch")
self.assertRaises(TypeError, csv.unregister_dialect)
self.assertRaises(csv.Error, csv.unregister_dialect, None)
self.assertRaises(csv.Error, csv.unregister_dialect, "nonesuch")
self.assertRaises(TypeError, csv.register_dialect, None)
self.assertRaises(TypeError, csv.register_dialect, None, None)
self.assertRaises(TypeError, csv.register_dialect, "nonesuch", 0, 0)
self.assertRaises(TypeError, csv.register_dialect, "nonesuch",
badargument=None)
self.assertRaises(TypeError, csv.register_dialect, "nonesuch",
quoting=None)
self.assertRaises(TypeError, csv.register_dialect, [])
def test_registry(self):
class myexceltsv(csv.excel):
delimiter = "\t"
name = "myexceltsv"
expected_dialects = csv.list_dialects() + [name]
expected_dialects.sort()
csv.register_dialect(name, myexceltsv)
self.addCleanup(csv.unregister_dialect, name)
self.assertEqual(csv.get_dialect(name).delimiter, '\t')
got_dialects = sorted(csv.list_dialects())
self.assertEqual(expected_dialects, got_dialects)
def test_register_kwargs(self):
name = 'fedcba'
csv.register_dialect(name, delimiter=';')
self.addCleanup(csv.unregister_dialect, name)
self.assertEqual(csv.get_dialect(name).delimiter, ';')
self.assertEqual([['X', 'Y', 'Z']], list(csv.reader(['X;Y;Z'], name)))
def test_incomplete_dialect(self):
class myexceltsv(csv.Dialect):
delimiter = "\t"
self.assertRaises(csv.Error, myexceltsv)
def test_space_dialect(self):
class space(csv.excel):
delimiter = " "
quoting = csv.QUOTE_NONE
escapechar = "\\"
with TemporaryFile("w+") as fileobj:
fileobj.write("abc def\nc1ccccc1 benzene\n")
fileobj.seek(0)
reader = csv.reader(fileobj, dialect=space())
self.assertEqual(next(reader), ["abc", "def"])
self.assertEqual(next(reader), ["c1ccccc1", "benzene"])
def compare_dialect_123(self, expected, *writeargs, **kwwriteargs):
with TemporaryFile("w+", newline='', encoding="utf-8") as fileobj:
writer = csv.writer(fileobj, *writeargs, **kwwriteargs)
writer.writerow([1,2,3])
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
def test_dialect_apply(self):
class testA(csv.excel):
delimiter = "\t"
class testB(csv.excel):
delimiter = ":"
class testC(csv.excel):
delimiter = "|"
class testUni(csv.excel):
delimiter = "\u039B"
csv.register_dialect('testC', testC)
try:
self.compare_dialect_123("1,2,3\r\n")
self.compare_dialect_123("1\t2\t3\r\n", testA)
self.compare_dialect_123("1:2:3\r\n", dialect=testB())
self.compare_dialect_123("1|2|3\r\n", dialect='testC')
self.compare_dialect_123("1;2;3\r\n", dialect=testA,
delimiter=';')
self.compare_dialect_123("1\u039B2\u039B3\r\n",
dialect=testUni)
finally:
csv.unregister_dialect('testC')
def test_bad_dialect(self):
# Unknown parameter
self.assertRaises(TypeError, csv.reader, [], bad_attr = 0)
# Bad values
self.assertRaises(TypeError, csv.reader, [], delimiter = None)
self.assertRaises(TypeError, csv.reader, [], quoting = -1)
self.assertRaises(TypeError, csv.reader, [], quoting = 100)
class TestCsvBase(unittest.TestCase):
def readerAssertEqual(self, input, expected_result):
with TemporaryFile("w+", newline='') as fileobj:
fileobj.write(input)
fileobj.seek(0)
reader = csv.reader(fileobj, dialect = self.dialect)
fields = list(reader)
self.assertEqual(fields, expected_result)
def writerAssertEqual(self, input, expected_result):
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, dialect = self.dialect)
writer.writerows(input)
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected_result)
class TestDialectExcel(TestCsvBase):
dialect = 'excel'
def test_single(self):
self.readerAssertEqual('abc', [['abc']])
def test_simple(self):
self.readerAssertEqual('1,2,3,4,5', [['1','2','3','4','5']])
def test_blankline(self):
self.readerAssertEqual('', [])
def test_empty_fields(self):
self.readerAssertEqual(',', [['', '']])
def test_singlequoted(self):
self.readerAssertEqual('""', [['']])
def test_singlequoted_left_empty(self):
self.readerAssertEqual('"",', [['','']])
def test_singlequoted_right_empty(self):
self.readerAssertEqual(',""', [['','']])
def test_single_quoted_quote(self):
self.readerAssertEqual('""""', [['"']])
def test_quoted_quotes(self):
self.readerAssertEqual('""""""', [['""']])
def test_inline_quote(self):
self.readerAssertEqual('a""b', [['a""b']])
def test_inline_quotes(self):
self.readerAssertEqual('a"b"c', [['a"b"c']])
def test_quotes_and_more(self):
# Excel would never write a field containing '"a"b', but when
# reading one, it will return 'ab'.
self.readerAssertEqual('"a"b', [['ab']])
def test_lone_quote(self):
self.readerAssertEqual('a"b', [['a"b']])
def test_quote_and_quote(self):
# Excel would never write a field containing '"a" "b"', but when
# reading one, it will return 'a "b"'.
self.readerAssertEqual('"a" "b"', [['a "b"']])
def test_space_and_quote(self):
self.readerAssertEqual(' "a"', [[' "a"']])
def test_quoted(self):
self.readerAssertEqual('1,2,3,"I think, therefore I am",5,6',
[['1', '2', '3',
'I think, therefore I am',
'5', '6']])
def test_quoted_quote(self):
self.readerAssertEqual('1,2,3,"""I see,"" said the blind man","as he picked up his hammer and saw"',
[['1', '2', '3',
'"I see," said the blind man',
'as he picked up his hammer and saw']])
def test_quoted_nl(self):
input = '''\
1,2,3,"""I see,""
said the blind man","as he picked up his
hammer and saw"
9,8,7,6'''
self.readerAssertEqual(input,
[['1', '2', '3',
'"I see,"\nsaid the blind man',
'as he picked up his\nhammer and saw'],
['9','8','7','6']])
def test_dubious_quote(self):
self.readerAssertEqual('12,12,1",', [['12', '12', '1"', '']])
def test_null(self):
self.writerAssertEqual([], '')
def test_single_writer(self):
self.writerAssertEqual([['abc']], 'abc\r\n')
def test_simple_writer(self):
self.writerAssertEqual([[1, 2, 'abc', 3, 4]], '1,2,abc,3,4\r\n')
def test_quotes(self):
self.writerAssertEqual([[1, 2, 'a"bc"', 3, 4]], '1,2,"a""bc""",3,4\r\n')
def test_quote_fieldsep(self):
self.writerAssertEqual([['abc,def']], '"abc,def"\r\n')
def test_newlines(self):
self.writerAssertEqual([[1, 2, 'a\nbc', 3, 4]], '1,2,"a\nbc",3,4\r\n')
class EscapedExcel(csv.excel):
quoting = csv.QUOTE_NONE
escapechar = '\\'
class TestEscapedExcel(TestCsvBase):
dialect = EscapedExcel()
def test_escape_fieldsep(self):
self.writerAssertEqual([['abc,def']], 'abc\\,def\r\n')
def test_read_escape_fieldsep(self):
self.readerAssertEqual('abc\\,def\r\n', [['abc,def']])
class TestDialectUnix(TestCsvBase):
dialect = 'unix'
def test_simple_writer(self):
self.writerAssertEqual([[1, 'abc def', 'abc']], '"1","abc def","abc"\n')
def test_simple_reader(self):
self.readerAssertEqual('"1","abc def","abc"\n', [['1', 'abc def', 'abc']])
class QuotedEscapedExcel(csv.excel):
quoting = csv.QUOTE_NONNUMERIC
escapechar = '\\'
class TestQuotedEscapedExcel(TestCsvBase):
dialect = QuotedEscapedExcel()
def test_write_escape_fieldsep(self):
self.writerAssertEqual([['abc,def']], '"abc,def"\r\n')
def test_read_escape_fieldsep(self):
self.readerAssertEqual('"abc\\,def"\r\n', [['abc,def']])
class TestDictFields(unittest.TestCase):
### "long" means the row is longer than the number of fieldnames
### "short" means there are fewer elements in the row than fieldnames
def test_write_simple_dict(self):
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.DictWriter(fileobj, fieldnames = ["f1", "f2", "f3"])
writer.writeheader()
fileobj.seek(0)
self.assertEqual(fileobj.readline(), "f1,f2,f3\r\n")
writer.writerow({"f1": 10, "f3": "abc"})
fileobj.seek(0)
fileobj.readline() # header
self.assertEqual(fileobj.read(), "10,,abc\r\n")
def test_write_no_fields(self):
fileobj = StringIO()
self.assertRaises(TypeError, csv.DictWriter, fileobj)
def test_read_dict_fields(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj,
fieldnames=["f1", "f2", "f3"])
self.assertEqual(next(reader), {"f1": '1', "f2": '2', "f3": 'abc'})
def test_read_dict_no_fieldnames(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("f1,f2,f3\r\n1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj)
self.assertEqual(next(reader), {"f1": '1', "f2": '2', "f3": 'abc'})
self.assertEqual(reader.fieldnames, ["f1", "f2", "f3"])
# Two test cases to make sure existing ways of implicitly setting
# fieldnames continue to work. Both arise from discussion in issue3436.
def test_read_dict_fieldnames_from_file(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("f1,f2,f3\r\n1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj,
fieldnames=next(csv.reader(fileobj)))
self.assertEqual(reader.fieldnames, ["f1", "f2", "f3"])
self.assertEqual(next(reader), {"f1": '1', "f2": '2', "f3": 'abc'})
def test_read_dict_fieldnames_chain(self):
import itertools
with TemporaryFile("w+") as fileobj:
fileobj.write("f1,f2,f3\r\n1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj)
first = next(reader)
for row in itertools.chain([first], reader):
self.assertEqual(reader.fieldnames, ["f1", "f2", "f3"])
self.assertEqual(row, {"f1": '1', "f2": '2', "f3": 'abc'})
def test_read_long(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("1,2,abc,4,5,6\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj,
fieldnames=["f1", "f2"])
self.assertEqual(next(reader), {"f1": '1', "f2": '2',
None: ["abc", "4", "5", "6"]})
def test_read_long_with_rest(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("1,2,abc,4,5,6\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj,
fieldnames=["f1", "f2"], restkey="_rest")
self.assertEqual(next(reader), {"f1": '1', "f2": '2',
"_rest": ["abc", "4", "5", "6"]})
def test_read_long_with_rest_no_fieldnames(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("f1,f2\r\n1,2,abc,4,5,6\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj, restkey="_rest")
self.assertEqual(reader.fieldnames, ["f1", "f2"])
self.assertEqual(next(reader), {"f1": '1', "f2": '2',
"_rest": ["abc", "4", "5", "6"]})
def test_read_short(self):
with TemporaryFile("w+") as fileobj:
fileobj.write("1,2,abc,4,5,6\r\n1,2,abc\r\n")
fileobj.seek(0)
reader = csv.DictReader(fileobj,
fieldnames="1 2 3 4 5 6".split(),
restval="DEFAULT")
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": '4', "5": '5', "6": '6'})
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": 'DEFAULT', "5": 'DEFAULT',
"6": 'DEFAULT'})
def test_read_multi(self):
sample = [
'2147483648,43.0e12,17,abc,def\r\n',
'147483648,43.0e2,17,abc,def\r\n',
'47483648,43.0,170,abc,def\r\n'
]
reader = csv.DictReader(sample,
fieldnames="i1 float i2 s1 s2".split())
self.assertEqual(next(reader), {"i1": '2147483648',
"float": '43.0e12',
"i2": '17',
"s1": 'abc',
"s2": 'def'})
def test_read_with_blanks(self):
reader = csv.DictReader(["1,2,abc,4,5,6\r\n","\r\n",
"1,2,abc,4,5,6\r\n"],
fieldnames="1 2 3 4 5 6".split())
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": '4', "5": '5', "6": '6'})
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": '4', "5": '5', "6": '6'})
def test_read_semi_sep(self):
reader = csv.DictReader(["1;2;abc;4;5;6\r\n"],
fieldnames="1 2 3 4 5 6".split(),
delimiter=';')
self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
"4": '4', "5": '5', "6": '6'})
class TestArrayWrites(unittest.TestCase):
def test_int_write(self):
import array
contents = [(20-i) for i in range(20)]
a = array.array('i', contents)
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, dialect="excel")
writer.writerow(a)
expected = ",".join([str(i) for i in a])+"\r\n"
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
def test_double_write(self):
import array
contents = [(20-i)*0.1 for i in range(20)]
a = array.array('d', contents)
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, dialect="excel")
writer.writerow(a)
expected = ",".join([str(i) for i in a])+"\r\n"
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
def test_float_write(self):
import array
contents = [(20-i)*0.1 for i in range(20)]
a = array.array('f', contents)
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, dialect="excel")
writer.writerow(a)
expected = ",".join([str(i) for i in a])+"\r\n"
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
def test_char_write(self):
import array, string
a = array.array('u', string.ascii_letters)
with TemporaryFile("w+", newline='') as fileobj:
writer = csv.writer(fileobj, dialect="excel")
writer.writerow(a)
expected = ",".join(a)+"\r\n"
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
class TestDialectValidity(unittest.TestCase):
def test_quoting(self):
class mydialect(csv.Dialect):
delimiter = ";"
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\r\n'
quoting = csv.QUOTE_NONE
d = mydialect()
mydialect.quoting = None
self.assertRaises(csv.Error, mydialect)
mydialect.doublequote = True
mydialect.quoting = csv.QUOTE_ALL
mydialect.quotechar = '"'
d = mydialect()
mydialect.quotechar = "''"
self.assertRaises(csv.Error, mydialect)
mydialect.quotechar = 4
self.assertRaises(csv.Error, mydialect)
def test_delimiter(self):
class mydialect(csv.Dialect):
delimiter = ";"
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\r\n'
quoting = csv.QUOTE_NONE
d = mydialect()
mydialect.delimiter = ":::"
self.assertRaises(csv.Error, mydialect)
mydialect.delimiter = 4
self.assertRaises(csv.Error, mydialect)
def test_lineterminator(self):
class mydialect(csv.Dialect):
delimiter = ";"
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\r\n'
quoting = csv.QUOTE_NONE
d = mydialect()
mydialect.lineterminator = ":::"
d = mydialect()
mydialect.lineterminator = 4
self.assertRaises(csv.Error, mydialect)
class TestSniffer(unittest.TestCase):
sample1 = """\
Harry's, Arlington Heights, IL, 2/1/03, Kimi Hayes
Shark City, Glendale Heights, IL, 12/28/02, Prezence
Tommy's Place, Blue Island, IL, 12/28/02, Blue Sunday/White Crow
Stonecutters Seafood and Chop House, Lemont, IL, 12/19/02, Week Back
"""
sample2 = """\
'Harry''s':'Arlington Heights':'IL':'2/1/03':'Kimi Hayes'
'Shark City':'Glendale Heights':'IL':'12/28/02':'Prezence'
'Tommy''s Place':'Blue Island':'IL':'12/28/02':'Blue Sunday/White Crow'
'Stonecutters ''Seafood'' and Chop House':'Lemont':'IL':'12/19/02':'Week Back'
"""
header1 = '''\
"venue","city","state","date","performers"
'''
sample3 = '''\
05/05/03?05/05/03?05/05/03?05/05/03?05/05/03?05/05/03
05/05/03?05/05/03?05/05/03?05/05/03?05/05/03?05/05/03
05/05/03?05/05/03?05/05/03?05/05/03?05/05/03?05/05/03
'''
sample4 = '''\
2147483648;43.0e12;17;abc;def
147483648;43.0e2;17;abc;def
47483648;43.0;170;abc;def
'''
sample5 = "aaa\tbbb\r\nAAA\t\r\nBBB\t\r\n"
sample6 = "a|b|c\r\nd|e|f\r\n"
sample7 = "'a'|'b'|'c'\r\n'd'|e|f\r\n"
# Issue 18155: Use a delimiter that is a special char to regex:
header2 = '''\
"venue"+"city"+"state"+"date"+"performers"
'''
sample8 = """\
Harry's+ Arlington Heights+ IL+ 2/1/03+ Kimi Hayes
Shark City+ Glendale Heights+ IL+ 12/28/02+ Prezence
Tommy's Place+ Blue Island+ IL+ 12/28/02+ Blue Sunday/White Crow
Stonecutters Seafood and Chop House+ Lemont+ IL+ 12/19/02+ Week Back
"""
sample9 = """\
'Harry''s'+ Arlington Heights'+ 'IL'+ '2/1/03'+ 'Kimi Hayes'
'Shark City'+ Glendale Heights'+' IL'+ '12/28/02'+ 'Prezence'
'Tommy''s Place'+ Blue Island'+ 'IL'+ '12/28/02'+ 'Blue Sunday/White Crow'
'Stonecutters ''Seafood'' and Chop House'+ 'Lemont'+ 'IL'+ '12/19/02'+ 'Week Back'
"""
def test_has_header(self):
sniffer = csv.Sniffer()
self.assertEqual(sniffer.has_header(self.sample1), False)
self.assertEqual(sniffer.has_header(self.header1 + self.sample1),
True)
def test_has_header_regex_special_delimiter(self):
sniffer = csv.Sniffer()
self.assertEqual(sniffer.has_header(self.sample8), False)
self.assertEqual(sniffer.has_header(self.header2 + self.sample8),
True)
def test_sniff(self):
sniffer = csv.Sniffer()
dialect = sniffer.sniff(self.sample1)
self.assertEqual(dialect.delimiter, ",")
self.assertEqual(dialect.quotechar, '"')
self.assertEqual(dialect.skipinitialspace, True)
dialect = sniffer.sniff(self.sample2)
self.assertEqual(dialect.delimiter, ":")
self.assertEqual(dialect.quotechar, "'")
self.assertEqual(dialect.skipinitialspace, False)
def test_delimiters(self):
sniffer = csv.Sniffer()
dialect = sniffer.sniff(self.sample3)
# given that all three lines in sample3 are equal,
# I think that any character could have been 'guessed' as the
# delimiter, depending on dictionary order
self.assertIn(dialect.delimiter, self.sample3)
dialect = sniffer.sniff(self.sample3, delimiters="?,")
self.assertEqual(dialect.delimiter, "?")
dialect = sniffer.sniff(self.sample3, delimiters="/,")
self.assertEqual(dialect.delimiter, "/")
dialect = sniffer.sniff(self.sample4)
self.assertEqual(dialect.delimiter, ";")
dialect = sniffer.sniff(self.sample5)
self.assertEqual(dialect.delimiter, "\t")
dialect = sniffer.sniff(self.sample6)
self.assertEqual(dialect.delimiter, "|")
dialect = sniffer.sniff(self.sample7)
self.assertEqual(dialect.delimiter, "|")
self.assertEqual(dialect.quotechar, "'")
dialect = sniffer.sniff(self.sample8)
self.assertEqual(dialect.delimiter, '+')
dialect = sniffer.sniff(self.sample9)
self.assertEqual(dialect.delimiter, '+')
self.assertEqual(dialect.quotechar, "'")
def test_doublequote(self):
sniffer = csv.Sniffer()
dialect = sniffer.sniff(self.header1)
self.assertFalse(dialect.doublequote)
dialect = sniffer.sniff(self.header2)
self.assertFalse(dialect.doublequote)
dialect = sniffer.sniff(self.sample2)
self.assertTrue(dialect.doublequote)
dialect = sniffer.sniff(self.sample8)
self.assertFalse(dialect.doublequote)
dialect = sniffer.sniff(self.sample9)
self.assertTrue(dialect.doublequote)
if not hasattr(sys, "gettotalrefcount"):
if support.verbose: print("*** skipping leakage tests ***")
else:
class NUL:
def write(s, *args):
pass
writelines = write
class TestLeaks(unittest.TestCase):
def test_create_read(self):
delta = 0
lastrc = sys.gettotalrefcount()
for i in range(20):
gc.collect()
self.assertEqual(gc.garbage, [])
rc = sys.gettotalrefcount()
csv.reader(["a,b,c\r\n"])
csv.reader(["a,b,c\r\n"])
csv.reader(["a,b,c\r\n"])
delta = rc-lastrc
lastrc = rc
# if csv.reader() leaks, last delta should be 3 or more
self.assertEqual(delta < 3, True)
def test_create_write(self):
delta = 0
lastrc = sys.gettotalrefcount()
s = NUL()
for i in range(20):
gc.collect()
self.assertEqual(gc.garbage, [])
rc = sys.gettotalrefcount()
csv.writer(s)
csv.writer(s)
csv.writer(s)
delta = rc-lastrc
lastrc = rc
# if csv.writer() leaks, last delta should be 3 or more
self.assertEqual(delta < 3, True)
def test_read(self):
delta = 0
rows = ["a,b,c\r\n"]*5
lastrc = sys.gettotalrefcount()
for i in range(20):
gc.collect()
self.assertEqual(gc.garbage, [])
rc = sys.gettotalrefcount()
rdr = csv.reader(rows)
for row in rdr:
pass
delta = rc-lastrc
lastrc = rc
# if reader leaks during read, delta should be 5 or more
self.assertEqual(delta < 5, True)
def test_write(self):
delta = 0
rows = [[1,2,3]]*5
s = NUL()
lastrc = sys.gettotalrefcount()
for i in range(20):
gc.collect()
self.assertEqual(gc.garbage, [])
rc = sys.gettotalrefcount()
writer = csv.writer(s)
for row in rows:
writer.writerow(row)
delta = rc-lastrc
lastrc = rc
# if writer leaks during write, last delta should be 5 or more
self.assertEqual(delta < 5, True)
class TestUnicode(unittest.TestCase):
names = ["Martin von Löwis",
"Marc André Lemburg",
"Guido van Rossum",
"François Pinard"]
def test_unicode_read(self):
import io
with TemporaryFile("w+", newline='', encoding="utf-8") as fileobj:
fileobj.write(",".join(self.names) + "\r\n")
fileobj.seek(0)
reader = csv.reader(fileobj)
self.assertEqual(list(reader), [self.names])
def test_unicode_write(self):
import io
with TemporaryFile("w+", newline='', encoding="utf-8") as fileobj:
writer = csv.writer(fileobj)
writer.writerow(self.names)
expected = ",".join(self.names)+"\r\n"
fileobj.seek(0)
self.assertEqual(fileobj.read(), expected)
def test_main():
mod = sys.modules[__name__]
support.run_unittest(
*[getattr(mod, name) for name in dir(mod) if name.startswith('Test')]
)
if __name__ == '__main__':
test_main()
| bsd-3-clause | -3,903,981,953,445,689,300 | 38.26839 | 108 | 0.542907 | false |
tectronics/wfrog | wflogger/input/__init__.py | 5 | 1238 | ## Copyright 2009 Laurent Bovet <[email protected]>
## Jordi Puigsegur <[email protected]>
##
## This file is part of wfrog
##
## wfrog is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import yaml
import function
import stdio
import http
import atom
# YAML mappings
class YamlFunctionInput(function.FunctionInput, yaml.YAMLObject):
yaml_tag = u'!function'
class YamlStdioInput(stdio.StdioInput, yaml.YAMLObject):
yaml_tag = u'!stdio-in'
class YamlHttpInput(http.HttpInput, yaml.YAMLObject):
yaml_tag = u'!http-in'
class YamlAtomInput(atom.AtomInput, yaml.YAMLObject):
yaml_tag = u'!atom-in' | gpl-3.0 | -6,566,565,377,472,703,000 | 31.605263 | 73 | 0.732633 | false |
SaptakS/open-event-orga-server | app/helpers/oauth.py | 3 | 4349 | from urlparse import urlparse
import oauth2
from flask import request
from app.settings import get_settings
class OAuth(object):
"""Google Credentials"""
AUTH_URI = 'https://accounts.google.com/o/oauth2/auth'
TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
USER_INFO = 'https://www.googleapis.com/userinfo/v2/me'
SCOPE = ['profile', 'email']
@classmethod
def get_client_id(cls):
return get_settings()['google_client_id']
@classmethod
def get_client_secret(cls):
return get_settings()['google_client_secret']
@classmethod
def get_redirect_uri(cls):
url = urlparse(request.url)
redirect_uri = url.scheme + '://' + url.netloc + '/gCallback'
return redirect_uri
@classmethod
def get_auth_uri(cls):
return cls.AUTH_URI
@classmethod
def get_token_uri(cls):
return cls.TOKEN_URI
@classmethod
def get_user_info(cls):
return cls.USER_INFO
class FbOAuth(object):
"""Facebook Credentials"""
Fb_AUTH_URI = 'https://www.facebook.com/dialog/oauth'
Fb_TOKEN_URI = 'https://graph.facebook.com/oauth/access_token'
Fb_USER_INFO = 'https://graph.facebook.com/me?fields=email,id,name,picture,last_name,first_name,link'
SCOPE = ['public_profile', 'email']
@classmethod
def get_client_id(cls):
return get_settings()['fb_client_id']
@classmethod
def get_client_secret(cls):
return get_settings()['fb_client_secret']
@classmethod
def get_redirect_uri(cls):
url = urlparse(request.url)
fb_redirect_uri = url.scheme + '://' + url.netloc + '/fCallback'
return fb_redirect_uri
@classmethod
def get_auth_uri(cls):
return cls.Fb_AUTH_URI
@classmethod
def get_token_uri(cls):
return cls.Fb_TOKEN_URI
@classmethod
def get_user_info(cls):
return cls.Fb_USER_INFO
class TwitterOAuth(object):
"""Facebook Credentials"""
TW_AUTH_URI = 'https://api.twitter.com/oauth/authorize'
TW_REQUEST_TOKEN_URI = 'https://api.twitter.com/oauth/request_token'
TW_ACCESS_TOKEN = "https://api.twitter.com/oauth/access_token?"
@classmethod
def get_client_id(cls):
return get_settings()['tw_consumer_key']
@classmethod
def get_client_secret(cls):
return get_settings()['tw_consumer_secret']
@classmethod
def get_redirect_uri(cls):
url = urlparse(request.url)
tw_redirect_uri = url.scheme + '://' + url.netloc + '/tCallback'
return tw_redirect_uri
def get_consumer(self):
return oauth2.Consumer(key=self.get_client_id(),
secret=self.get_client_secret())
def get_request_token(self):
client = oauth2.Client(self.get_consumer())
return client.request(self.TW_REQUEST_TOKEN_URI, "GET")
def get_access_token(self, oauth_verifier, oauth_token):
consumer = self.get_consumer()
client = oauth2.Client(consumer)
return client.request(
self.TW_ACCESS_TOKEN + 'oauth_verifier=' + oauth_verifier + "&oauth_token=" + oauth_token, "POST")
def get_authorized_client(self, oauth_verifier, oauth_token):
import urlparse
resp, content = self.get_access_token(oauth_verifier, oauth_token)
access_token = dict(urlparse.parse_qsl(content))
token = oauth2.Token(access_token["oauth_token"], access_token["oauth_token_secret"])
token.set_verifier(oauth_verifier)
return oauth2.Client(self.get_consumer(), token), access_token
class InstagramOAuth(object):
INSTAGRAM_OAUTH_URI = "https://api.instagram.com/oauth/authorize/"
INSTAGRAM_TOKEN_URI = "https://api.instagram.com/oauth/access_token"
SCOPE = ['basic', 'public_content']
@classmethod
def get_client_id(cls):
return get_settings()['in_client_id']
@classmethod
def get_client_secret(cls):
return get_settings()['in_client_secret']
@classmethod
def get_redirect_uri(cls):
url = urlparse(request.url)
i_redirect_uri = url.scheme + '://' + url.netloc + '/iCallback'
return i_redirect_uri
@classmethod
def get_auth_uri(cls):
return cls.INSTAGRAM_OAUTH_URI
@classmethod
def get_token_uri(cls):
return cls.INSTAGRAM_TOKEN_URI
| gpl-3.0 | -1,094,879,263,530,122,200 | 28.787671 | 110 | 0.638768 | false |
cprov/snapcraft | snapcraft/cli/env.py | 1 | 3773 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from distutils import util
from typing import List
from . import echo
from snapcraft.internal import errors
from snapcraft.formatting_utils import humanize_list
class BuilderEnvironmentConfig:
"""Handle the chosen build provider.
To determine the build environment, SNAPCRAFT_BUILD_ENVIRONMENT is
retrieved from the environment and used to determine the build
provider. If it is not set, a value of `host` is assumed.
Valid values are:
- host: the host will drive the build.
- lxd: the host will setup a container to drive the build.
- multipass: a vm driven by multipass will be created to drive the build.
Use of the lxd value is equivalent to setting the now deprecated
SNAPCRAFT_CONTAINER_BUILDS environment variable to a value that
would evaluate to True.
Setting this variable to a value that resolves to a non boolean
results in an error.
"""
def __init__(
self, *, default="host", additional_providers: List[str] = None
) -> None:
"""Instantiate a BuildEnvironmentConfig.
:param str default: the default provider to use among the list of valid
ones.
:param str additional_providers: Additional providers allowed in the
environment.
"""
valid_providers = ["host", "lxd"]
if additional_providers is not None:
valid_providers.extend(additional_providers)
use_lxd = None
container_builds = os.environ.get("SNAPCRAFT_CONTAINER_BUILDS")
if container_builds:
echo.warning(
"The flag SNAPCRAFT_CONTAINER_BUILDS has been deprecated. "
"Use SNAPCRAFT_BUILD_ENVIRONMENT=lxd instead."
)
try:
use_lxd = util.strtobool(container_builds)
except ValueError:
raise errors.SnapcraftEnvironmentError(
"The experimental feature of using non-local LXD remotes "
"with SNAPCRAFT_CONTAINER_BUILDS has been dropped."
)
build_provider = os.environ.get("SNAPCRAFT_BUILD_ENVIRONMENT")
if build_provider and use_lxd:
raise errors.SnapcraftEnvironmentError(
"SNAPCRAFT_BUILD_ENVIRONMENT and SNAPCRAFT_CONTAINER_BUILDS "
"cannot be used together.\n"
"Given that SNAPCRAFT_CONTAINER_BUILDS is deprecated, "
"unset that variable from the environment and try again."
)
if use_lxd:
build_provider = "lxd"
elif not build_provider:
build_provider = default
elif build_provider not in valid_providers:
raise errors.SnapcraftEnvironmentError(
"SNAPCRAFT_BUILD_ENVIRONMENT must be one of: {}.".format(
humanize_list(items=valid_providers, conjunction="or")
)
)
self.provider = build_provider
self.is_host = build_provider == "host"
self.is_lxd = build_provider == "lxd"
| gpl-3.0 | 7,436,541,643,309,643,000 | 38.302083 | 79 | 0.643255 | false |
shaz13/oppia | core/domain/rating_services.py | 10 | 5068 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""System for assigning and displaying ratings of explorations."""
import datetime
from core.domain import event_services
from core.domain import exp_services
from core.platform import models
import feconf
(exp_models, user_models,) = models.Registry.import_models([
models.NAMES.exploration, models.NAMES.user])
transaction_services = models.Registry.import_transaction_services()
ALLOWED_RATINGS = [1, 2, 3, 4, 5]
def assign_rating_to_exploration(user_id, exploration_id, new_rating):
"""Records the rating awarded by the user to the exploration in both the
user-specific data and exploration summary.
This function validates the exploration id but not the user id.
Args:
user_id: str. The id of the user assigning the rating.
exploration_id: str. The id of the exploration that is
assigned a rating.
new_rating: int. Value of assigned rating, should be between
1 and 5 inclusive.
"""
if not isinstance(new_rating, int):
raise ValueError(
'Expected the rating to be an integer, received %s' % new_rating)
if new_rating not in ALLOWED_RATINGS:
raise ValueError('Expected a rating 1-5, received %s.' % new_rating)
try:
exp_services.get_exploration_by_id(exploration_id)
except:
raise Exception('Invalid exploration id %s' % exploration_id)
def _update_user_rating():
exp_user_data_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
if exp_user_data_model:
old_rating = exp_user_data_model.rating
else:
old_rating = None
exp_user_data_model = user_models.ExplorationUserDataModel.create(
user_id, exploration_id)
exp_user_data_model.rating = new_rating
exp_user_data_model.rated_on = datetime.datetime.utcnow()
exp_user_data_model.put()
return old_rating
old_rating = transaction_services.run_in_transaction(_update_user_rating)
exploration_summary = exp_services.get_exploration_summary_by_id(
exploration_id)
if not exploration_summary.ratings:
exploration_summary.ratings = feconf.get_empty_ratings()
exploration_summary.ratings[str(new_rating)] += 1
if old_rating:
exploration_summary.ratings[str(old_rating)] -= 1
event_services.RateExplorationEventHandler.record(
exploration_id, user_id, new_rating, old_rating)
exploration_summary.scaled_average_rating = (
exp_services.get_scaled_average_rating(
exploration_summary.ratings))
exp_services.save_exploration_summary(exploration_summary)
def get_user_specific_rating_for_exploration(user_id, exploration_id):
"""Fetches a rating for the specified exploration from the specified user
if one exists.
Args:
user_id: str. The id of the user.
exploration_id: str. The id of the exploration.
Returns:
int or None. An integer between 1 and 5 inclusive, or None if the user
has not previously rated the exploration.
"""
exp_user_data_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
return exp_user_data_model.rating if exp_user_data_model else None
def get_when_exploration_rated(user_id, exploration_id):
"""Fetches the datetime the exploration was last rated by this user, or
None if no rating has been awarded.
Currently this function is only used for testing purposes.
Args:
user_id: str. The id of the user.
exploration_id: str. The id of the exploration.
Returns:
datetime.datetime or None. When the exploration was last
rated by the user, or None if the user has not previously
rated the exploration.
"""
exp_user_data_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
return exp_user_data_model.rated_on if exp_user_data_model else None
def get_overall_ratings_for_exploration(exploration_id):
"""Fetches all ratings for an exploration.
Args:
exploration_id: str. The id of the exploration.
Returns:
a dict whose keys are '1', '2', '3', '4', '5' and whose
values are nonnegative integers representing the frequency counts
of each rating.
"""
exp_summary = exp_services.get_exploration_summary_by_id(exploration_id)
return exp_summary.ratings
| apache-2.0 | -4,749,925,435,926,295,000 | 35.2 | 78 | 0.692186 | false |
jrn223/Freestyle | app/Stock_market_data.py | 1 | 3248 | # for email functionality, credit @s2t2
import os
import sendgrid
from sendgrid.helpers.mail import * # source of Email, Content, Mail, etc.
# for day of week
import datetime
# to query Google stock data
from pandas_datareader import data
from datetime import date, timedelta
#for sorting biggest gains to biggest losses
import operator
stock_data = []
#Stock data for Apple, Amazon, Activision Blizzard, Hologic Inc, Ligand Pharmaceuticals Inc, Microsoft, Ferrari, T. Rowe Price, Tesla, Vivint Solar Inc
symbols = ['AAPL', 'AMZN', 'ATVI', 'HOLX', 'LGND', 'MSFT', 'RACE', 'TROW', 'TSLA', 'VSLR']
data_source = 'google'
day_of_week = datetime.datetime.today().weekday()
# print(day_of_week) tested day of week functionality working
monday = [0]
other_weekdays = [1,2,3,4]
saturday = [5]
sunday = [6]
if day_of_week in monday:
start = str(date.today() - timedelta(days=3))
end = str(date.today())
elif day_of_week in other_weekdays:
start = str(date.today() - timedelta(days=1))
end = str(date.today())
elif day_of_week in saturday:
start = str(date.today() - timedelta(days=2))
end = str(date.today() - timedelta(days=1))
elif day_of_week in sunday:
start = str(date.today() - timedelta(days=3))
end = str(date.today() - timedelta(days=2))
response = data.DataReader(symbols, data_source, start, end)
daily_closing_prices = response.ix["Close"]
# print(daily_closing_prices) test table
def stock_data_builder (ticker_symbol):
stock = {}
stock["ticker"] = ticker_symbol
stock["today_close"] = daily_closing_prices.iloc[1][ticker_symbol]
stock["previous_day_close"] = daily_closing_prices.iloc[0][ticker_symbol]
stock["difference"] = stock["today_close"] - stock["previous_day_close"]
stock_data.append(stock)
for ticker in symbols:
stock_data_builder(ticker)
products_gain_loss_order = sorted(stock_data, key=lambda x: x["difference"], reverse=True)
# print(products_gain_loss_order) tested sort functionality working
print("{:<35} {:<35} {:<35} {:<35}".format("Ticker","Most Recent Closing Price","Previous Day's Closing Price", "Gain / Loss"))
email_chart = "{:<20} {:<37} {:<30} {:<40}".format("Ticker","Most Recent Closing Price","Previous Day's Closing Price", "Gain / Loss") + "\n"
for stock in products_gain_loss_order:
print("{:<35} {:<35} {:<35} {:<35}".format(stock["ticker"], stock["today_close"], stock["previous_day_close"], '{0:.2f}'.format(stock["difference"])))
email_chart = email_chart + "{:<20} {:<50} {:<43} {:<0}".format(stock["ticker"], stock["today_close"], stock["previous_day_close"], '{0:.2f}'.format(stock["difference"])) + "\n"
# AUTHENTICATE, credit @s2t2
SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')
sg = sendgrid.SendGridAPIClient(apikey = SENDGRID_API_KEY)
# COMPILE REQUEST PARAMETERS, credit @s2t2
subject = "Stock Market Updates!"
my_email = Email("[email protected]")
from_email = my_email
to_email = my_email
content = Content("text/plain", email_chart)
mail = Mail(from_email, subject, to_email, content)
# ISSUE REQUEST, credit @s2t2
response = sg.client.mail.send.post(request_body=mail.get())
# PARSE RESPONSE, credit @s2t2
print(response.status_code)
print(response.body)
print(response.headers)
| mit | 2,787,467,201,835,120,000 | 32.833333 | 181 | 0.689963 | false |
gonicus/gosa | backend/src/tests/backend/objects/comparator/test_acl_set.py | 1 | 4604 | # This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import unittest
from gosa.backend.objects.comparator.acl_set import *
class AclSetComparatorTests(unittest.TestCase):
@unittest.mock.patch.object(PluginRegistry, 'getInstance')
def test_IsAclSet(self, mockedResolver):
resolver = unittest.mock.MagicMock(autoSpec=True, create=True)
resolver.getAclRoles.return_value = {"name":["role1","role2"]}
mockedResolver.return_value = resolver
comp = IsAclSet()
(result, errors) = comp.process(None, None, [{}])
assert result == False
assert len(errors) == 1
(result, errors) = comp.process(None, None, [{"priority":"high"}])
assert result == False
assert len(errors) == 1
(result, errors) = comp.process(None, None, [{"priority": "high", "members":[]}])
assert result == False
assert len(errors) == 1
# with rolename + actions
(result, errors) = comp.process(None, None, [{"priority": "high", "members": [], "rolename":"role1", "actions":["action"]}])
assert result == False
assert len(errors) == 1
#wrong rolename type
(result, errors) = comp.process(None, None, [{"priority": "high", "members": [], "rolename": {"role1"}}])
assert result == False
assert len(errors) == 1
#unallowed rolename
(result, errors) = comp.process(None, None, [{"priority": "high", "members": [], "rolename": "role3"}])
assert result == False
assert len(errors) == 1
# no rolename, scope missing
(result, errors) = comp.process(None, None, [
{"priority": "high", "members": [], "actions": ["action"]}])
assert result == False
assert len(errors) == 1
# no rolename, actions missing
(result, errors) = comp.process(None, None, [
{"priority": "high", "members": [], "scope": "local"}])
assert result == False
assert len(errors) == 1
# no rolename, no topic in actions
(result, errors) = comp.process(None, None, [
{"priority": "high", "members": [], "scope": "local", "actions":{"acl":"acl", "options":{}}}])
assert result == False
assert len(errors) == 1
# no rolename, no acl in actions
(result, errors) = comp.process(None, None, [
{"priority": "high", "members": [], "scope": "local",
"actions": [{"topic": "topic","options": {}}]}])
assert result == False
assert len(errors) == 1
# no rolename, wrong topic type in actions
(result, errors) = comp.process(None, None, [
{"priority": "high", "members": [], "scope": "local",
"actions": [{"topic": True, "acl": "crod", "options": {}}]}])
assert result == False
assert len(errors) == 1
# no rolename, wrong acl type in actions
(result, errors) = comp.process(None, None, [
{"priority": "high", "members": [], "scope": "local",
"actions": [{"topic": "topic", "acl": True, "options": {}}]}])
assert result == False
assert len(errors) == 1
# no rolename, wrong acl content in actions
(result, errors) = comp.process(None, None, [
{"priority": "high", "members": [], "scope": "local",
"actions": [{"topic": "topic", "acl": "asds", "options": {}}]}])
assert result == False
assert len(errors) == 1
# no rolename, unsupportted keys in actions
(result, errors) = comp.process(None, None, [
{"priority": "high", "members": [], "scope": "local",
"actions": [{"topic": "topic", "acl": "crod", "options": {},"unsupported": True}]}])
assert result == False
assert len(errors) == 1
# no rolename, wrong options type in actions
(result, errors) = comp.process(None, None, [
{"priority": "high", "members": [], "scope": "local",
"actions": [{"topic": "topic", "acl": "crod", "options": []}]}])
assert result == False
assert len(errors) == 1
# finally a valid example
(result, errors) = comp.process(None, None, [
{"priority": "high", "members": [], "scope": "local",
"actions": [{"topic": "topic", "acl": "crod", "options": {}}]}])
assert result == True
assert len(errors) == 0 | lgpl-2.1 | 5,341,840,653,029,482,000 | 39.394737 | 132 | 0.54583 | false |
axelberndt/Raspberry-Pi-Tools | src/RotaryEncoder.py | 1 | 6002 | #!/usr/bin/env python3.5
# This Python script reads a rotary encoder connected to GPIO pin 16 and 18, GND on pin 14 or any other ground pin.
# Some encoders have inverse direction; in this case swap the values of GPIOpinA and GPIOpinB accordingly.
# This solution works without debouncing. However, very quick rotation may lead to missing state changes and, hence,
# some few results could be wrong.
# Furthermore, most users will not need the locking and can safely remove everything on that (wherever variable lock occurs).
# Put it to a location on your Pi, say /home/pi/myTools/ and write the following line at the terminal.
# python /home/pi/myTools/RotaryEncoder.py&
# This will execute the script in background and produce terminal output whenever the encoder rotates.
# To stop the script write
# ps -elf | grep python
# at the terminal and find the process id (PID) of the script. Then write this line and insert the right PID:
# kill -9 PID
# Author: Axel Berndt
#
# Rotary encoder pulse
# +-------+ +-------+ 0
# A | | | |
# ------+ +-------+ +-- 1
# +-------+ +-------+ 0
# B | | | |
# --+ +-------+ +------ 1
from RPi import GPIO
from time import sleep
GPIOpinA = 23 # left pin of the rotary encoder is on GPIO 23 (Pi pin 16)
GPIOpinB = 24 # right pin of the rotary encoder is on GPIO 24 (Pi pin 18)
value = 0 # this value will be in-/decreased by rotating the encoder
lock = False # this is set True to prevent interference of multiple interrupt processings
aDown = False # this is set True to wait for GPIO A to go down
bUp = False # this is set True to wait for GPIO B to go up
bDown = False # this is set True to wait for GPIO B to go down
# initialize GPIO input and define interrupts
def init():
GPIO.setmode(GPIO.BCM) # set the GPIO naming/numbering convention to BCM
GPIO.setup(GPIOpinA, GPIO.IN, pull_up_down=GPIO.PUD_UP) # input channel A
GPIO.setup(GPIOpinB, GPIO.IN, pull_up_down=GPIO.PUD_UP) # input channel B
GPIO.add_event_detect(GPIOpinA, GPIO.BOTH, callback=rotaryInterruptA) # define interrupt for action on channel A (no bouncetime needed)
GPIO.add_event_detect(GPIOpinB, GPIO.BOTH, callback=rotaryInterruptB) # define interrupt for action on channel B (no bouncetime needed)
# the callback functions when turning the encoder
# this one reacts on action on channel A
def rotaryInterruptA(GPIOpin):
global lock, GPIOpinA, GPIOpinB # get access to some global variables
A = GPIO.input(GPIOpinA) # read current value of channel A
B = GPIO.input(GPIOpinB) # read current value of channel B
while lock: # while another interrupt is processing
pass # wait
lock = True # now, prevent other interrupts to interfere
global value, aDown, bUp, bDown # get access to some more global variables
if aDown: # if we are waiting for channel A to go down (to finish -> rotation cycle)
if not A: # check if it is down now
aDown = False # -> rotation cycle finished
elif bUp or bDown: # if a <- rotation cycle is unfinished so far
pass # don't do anything new
elif A: # if a new rotation cycle starts, i.e. nothing to go up or down
if B: # if B is already up, the rotation direction is ->
aDown = True # to finish the cycle, wait for A to go down again
value += 1 # increase our test output value
print("-> " + str(value)) # make terminal output
else: # if B still has to come up, the rotation direction is <-
bUp = True # in this rotation cycle B has to come up and down again, we start with waiting for B to come up
value -= 1 # decrease our test output value
print("<- " + str(value)) # make terminal output
lock = False # unlock, now other interrupts can do their job
return # done
# this callback function reacts on action on channel B
def rotaryInterruptB(GPIOpin):
global lock, GPIOpinB # get access to some global variables
B = GPIO.input(GPIOpinB) # read current value of channel B
while lock: # while another interrupt is processing
pass # wait
lock = True # now, prevent other interrupts to interfere
global bUp, bDown # get access to some more global variables
if B: # if B is up
if bUp: # and we have been waiting for B to come up (this is part of the <- rotation cycle)
bDown = True # wait for B to come down again
bUp = False # done with this
elif bDown: # B is down (if B: was False) and if we were waiting for B to come down
bDown = False # <- rotation cycle finished
lock = False # unlock, now other interrupts can do their job
return # done
# the main function
def main():
try: # run the program
init() # initialize everything
while True: # idle loop
sleep(300) # wakes up once every 5 minutes = 300 seconds
except KeyboardInterrupt:
GPIO.cleanup() # clean up GPIO on CTRL+C exit
GPIO.cleanup() # clean up GPIO on normal exit
# the entry point
if __name__ == '__main__':
main() | gpl-3.0 | 3,735,513,601,733,635,000 | 51.2 | 141 | 0.573476 | false |
j-griffith/cinder | cinder/objects/request_spec.py | 2 | 6127 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import versionutils
from oslo_versionedobjects import fields
from cinder import objects
from cinder.objects import base
@base.CinderObjectRegistry.register
class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
base.CinderComparableObject):
# Version 1.0: Initial version
# Version 1.1: Added group_id and group_backend
# Version 1.2 Added ``resource_backend``
# Version 1.3: Added backup_id
VERSION = '1.3'
fields = {
'consistencygroup_id': fields.UUIDField(nullable=True),
'group_id': fields.UUIDField(nullable=True),
'cgsnapshot_id': fields.UUIDField(nullable=True),
'image_id': fields.UUIDField(nullable=True),
'snapshot_id': fields.UUIDField(nullable=True),
'source_replicaid': fields.UUIDField(nullable=True),
'source_volid': fields.UUIDField(nullable=True),
'volume_id': fields.UUIDField(nullable=True),
'volume': fields.ObjectField('Volume', nullable=True),
'volume_type': fields.ObjectField('VolumeType', nullable=True),
'volume_properties': fields.ObjectField('VolumeProperties',
nullable=True),
'CG_backend': fields.StringField(nullable=True),
'group_backend': fields.StringField(nullable=True),
'resource_backend': fields.StringField(nullable=True),
'backup_id': fields.UUIDField(nullable=True),
}
obj_extra_fields = ['resource_properties']
@property
def resource_properties(self):
# TODO(dulek): This is to maintain compatibility with filters from
# oslo-incubator. As we've moved them into our codebase we should adapt
# them to use volume_properties and remove this shim.
return self.volume_properties
@classmethod
def from_primitives(cls, spec):
"""Returns RequestSpec object creating it from legacy dictionary.
FIXME(dulek): This should go away in early O as we stop supporting
backward compatibility with M.
"""
spec = spec.copy()
spec_obj = cls()
vol_props = spec.pop('volume_properties', {})
if vol_props is not None:
vol_props = VolumeProperties(**vol_props)
spec_obj.volume_properties = vol_props
if 'volume' in spec:
vol = spec.pop('volume', {})
vol.pop('name', None)
if vol is not None:
vol = objects.Volume(**vol)
spec_obj.volume = vol
if 'volume_type' in spec:
vol_type = spec.pop('volume_type', {})
if vol_type is not None:
vol_type = objects.VolumeType(**vol_type)
spec_obj.volume_type = vol_type
spec.pop('resource_properties', None)
for k, v in spec.items():
setattr(spec_obj, k, v)
return spec_obj
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with target version."""
super(RequestSpec, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
added_fields = (((1, 1), ('group_id', 'group_backend')),
((1, 2), ('resource_backend')))
for version, remove_fields in added_fields:
if target_version < version:
for obj_field in remove_fields:
primitive.pop(obj_field, None)
@base.CinderObjectRegistry.register
class VolumeProperties(base.CinderObject, base.CinderObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added group_id and group_type_id
VERSION = '1.1'
# TODO(dulek): We should add this to initially move volume_properites to
# ovo, but this should be removed as soon as possible. Most of the data
# here is already in request_spec and volume there. Outstanding ones would
# be reservation, and qos_specs. First one may be moved to request_spec and
# second added as relationship in volume_type field and whole
# volume_properties (and resource_properties) in request_spec won't be
# needed.
fields = {
'attach_status': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'cgsnapshot_id': fields.UUIDField(nullable=True),
'consistencygroup_id': fields.UUIDField(nullable=True),
'group_id': fields.UUIDField(nullable=True),
'display_description': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'encryption_key_id': fields.UUIDField(nullable=True),
'metadata': fields.DictOfStringsField(nullable=True),
'multiattach': fields.BooleanField(nullable=True),
'project_id': fields.StringField(nullable=True),
'qos_specs': fields.DictOfStringsField(nullable=True),
'replication_status': fields.StringField(nullable=True),
'reservations': fields.ListOfStringsField(nullable=True),
'size': fields.IntegerField(nullable=True),
'snapshot_id': fields.UUIDField(nullable=True),
'source_replicaid': fields.UUIDField(nullable=True),
'source_volid': fields.UUIDField(nullable=True),
'status': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'volume_type_id': fields.UUIDField(nullable=True),
'group_type_id': fields.UUIDField(nullable=True),
}
| apache-2.0 | -8,157,374,361,758,151,000 | 41.846154 | 79 | 0.655296 | false |
astropy/astropy | astropy/nddata/mixins/ndslicing.py | 11 | 4359 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the Slicing mixin to the NDData class.
from astropy import log
from astropy.wcs.wcsapi import (BaseLowLevelWCS, BaseHighLevelWCS,
SlicedLowLevelWCS, HighLevelWCSWrapper)
__all__ = ['NDSlicingMixin']
class NDSlicingMixin:
"""Mixin to provide slicing on objects using the `NDData`
interface.
The ``data``, ``mask``, ``uncertainty`` and ``wcs`` will be sliced, if
set and sliceable. The ``unit`` and ``meta`` will be untouched. The return
will be a reference and not a copy, if possible.
Examples
--------
Using this Mixin with `~astropy.nddata.NDData`:
>>> from astropy.nddata import NDData, NDSlicingMixin
>>> class NDDataSliceable(NDSlicingMixin, NDData):
... pass
Slicing an instance containing data::
>>> nd = NDDataSliceable([1,2,3,4,5])
>>> nd[1:3]
NDDataSliceable([2, 3])
Also the other attributes are sliced for example the ``mask``::
>>> import numpy as np
>>> mask = np.array([True, False, True, True, False])
>>> nd2 = NDDataSliceable(nd, mask=mask)
>>> nd2slc = nd2[1:3]
>>> nd2slc[nd2slc.mask]
NDDataSliceable([3])
Be aware that changing values of the sliced instance will change the values
of the original::
>>> nd3 = nd2[1:3]
>>> nd3.data[0] = 100
>>> nd2
NDDataSliceable([ 1, 100, 3, 4, 5])
See also
--------
NDDataRef
NDDataArray
"""
def __getitem__(self, item):
# Abort slicing if the data is a single scalar.
if self.data.shape == ():
raise TypeError('scalars cannot be sliced.')
# Let the other methods handle slicing.
kwargs = self._slice(item)
return self.__class__(**kwargs)
def _slice(self, item):
"""Collects the sliced attributes and passes them back as `dict`.
It passes uncertainty, mask and wcs to their appropriate ``_slice_*``
method, while ``meta`` and ``unit`` are simply taken from the original.
The data is assumed to be sliceable and is sliced directly.
When possible the return should *not* be a copy of the data but a
reference.
Parameters
----------
item : slice
The slice passed to ``__getitem__``.
Returns
-------
dict :
Containing all the attributes after slicing - ready to
use them to create ``self.__class__.__init__(**kwargs)`` in
``__getitem__``.
"""
kwargs = {}
kwargs['data'] = self.data[item]
# Try to slice some attributes
kwargs['uncertainty'] = self._slice_uncertainty(item)
kwargs['mask'] = self._slice_mask(item)
kwargs['wcs'] = self._slice_wcs(item)
# Attributes which are copied and not intended to be sliced
kwargs['unit'] = self.unit
kwargs['meta'] = self.meta
return kwargs
def _slice_uncertainty(self, item):
if self.uncertainty is None:
return None
try:
return self.uncertainty[item]
except TypeError:
# Catching TypeError in case the object has no __getitem__ method.
# But let IndexError raise.
log.info("uncertainty cannot be sliced.")
return self.uncertainty
def _slice_mask(self, item):
if self.mask is None:
return None
try:
return self.mask[item]
except TypeError:
log.info("mask cannot be sliced.")
return self.mask
def _slice_wcs(self, item):
if self.wcs is None:
return None
try:
llwcs = SlicedLowLevelWCS(self.wcs.low_level_wcs, item)
return HighLevelWCSWrapper(llwcs)
except Exception as err:
self._handle_wcs_slicing_error(err, item)
# Implement this in a method to allow subclasses to customise the error.
def _handle_wcs_slicing_error(self, err, item):
raise ValueError(f"Slicing the WCS object with the slice '{item}' "
"failed, if you want to slice the NDData object without the WCS, you "
"can remove by setting `NDData.wcs = None` and then retry.") from err
| bsd-3-clause | -2,427,317,883,012,298,000 | 32.022727 | 79 | 0.586144 | false |
bioinfo-core-BGU/neatseq-flow | bin/neatseq_flow.py | 1 | 5716 | #!/usr/bin/env python3
""" Create the pipeline scripts
This script inits the 'NeatSeq-Flow' class that does all the script creating
"""
__author__ = "Menachem Sklarz"
__version__ = "1.6.0"
__affiliation__ = "Bioinformatics Core Unit, NIBN, Ben Gurion University"
import os
import sys
import shutil
import argparse
# Remove bin from search path:
sys.path.pop(0)
# Append neatseq_flow path to list (when using installed version, will find it before getting to this search path)
# Problem that might arrise: When trying to run a local copy when it is installed in site-packages/
sys.path.append(os.path.realpath(os.path.expanduser(os.path.dirname(os.path.abspath(__file__))+os.sep+"..")))
from neatseq_flow.PLC_main import NeatSeqFlow
# Parse arguments:
parser = argparse.ArgumentParser(description="""
This program creates a set of scripts to perform a workflow.
The samples are defined in the --sample_file, the workflow itself in the --param_file.
""",
epilog="""
Author: Menachem Sklarz, NIBN
""")
parser.add_argument("-s", "--sample_file", help="Location of sample file, in classic or tabular format")
parser.add_argument("-p", "--param_file", help="Location of parameter file. Can be a comma-separated list - all will "
"be used as one. Alternatively, -p can be passed many times with "
"different param files", action="append")
parser.add_argument("-g", "--mapping", help="Location of mapping (or grouping) file. A tab-separated table describing "
"the samples and their properties.", action="append")
parser.add_argument("-d", "--home_dir", help="Location of workflow. Default is currect directory", default=os.getcwd())
parser.add_argument("-m", "--message", help="A message describing the workflow", default="")
parser.add_argument("-r", "--runid", help="Don't create new run ID. Use this one.", default="")
# parser.add_argument("-c","--convert2yaml", help="Convert parameter file to yaml format?", action='store_true')
parser.add_argument("-l", "--clean", help="Remove old workflow directories except for 'data'", action='store_true')
parser.add_argument("--clean-all", help="Remove all old workflow directories", action='store_true')
parser.add_argument("--delete", help="Delete all NeatSeq-Flow folders in workflow directory (see --home_dir)", action='store_true')
parser.add_argument("-V", "--verbose", help="Print admonitions?", action='store_true')
parser.add_argument("-v", "--version", help="Print version and exit.", action='store_true')
parser.add_argument("--list_modules", help="List modules available in modules_paths.", action='store_true')
args = parser.parse_args()
# Showing version and leaving, if required
if args.version:
print("NeatSeq-Flow version %s" % __version__)
print("Installation location: %s" % os.path.dirname(os.path.realpath(__file__)))
sys.exit()
# Deleting all dirs and leaving, if required
if args.delete:
text = input("Are you sure you want to delete the workflow in {home_dir}?\n('yes' to approve) > ".format(home_dir = args.home_dir))
if not text.lower() == "yes":
sys.exit()
from shutil import rmtree
for dir2del in "scripts data objects stderr stdout logs backups".split(" "):
path2del = "{home}{sep}{dir}".format(home=args.home_dir,
sep=os.sep,
dir=dir2del)
sys.stdout.write("Deleting {path}\n".format(path=path2del))
rmtree(path2del)
sys.stdout.write("Removed all NeatSeq-Flow directories in \n" + args.home_dir)
sys.exit()
# Testing sample and parameter files were passed
if args.sample_file is None or args.param_file is None:
print("Please supply sample and parameter files...\n")
parser.print_help()
sys.exit()
# Cleaning
if args.clean:
# if args.home_dir != os.getcwd():
text = input("Are you sure you want to delete the workflow in {home_dir}?\n('yes' to approve) > ".format(home_dir = args.home_dir))
if not text.lower() == "yes":
sys.exit()
if args.clean_all:
text = input("Are you sure you want to delete '{data}'?\n('yes' to approve) > ".format(data=os.sep.join([args.home_dir, "data"])))
if os.path.isdir(os.sep.join([args.home_dir, "data"])):
if text.lower() == "yes":
shutil.rmtree(os.sep.join([args.home_dir, "data"]))
else:
print("Not removing 'data'")
for wfdir in ["backups", "logs", "objects", "scripts", "stderr", "stdout"]:
if os.path.isdir(os.sep.join([args.home_dir, wfdir])):
shutil.rmtree(os.sep.join([args.home_dir, wfdir]))
else:
if args.clean_all:
sys.exit("Please pass -l as well as --clean-all. This is a safety precaution...")
# Checking that sample_file and param_file were passed:
if args.sample_file is None or args.param_file is None:
print("Don't forget to pass sample and parameter files with the -s and -p flags.\n", parser.print_help())
# Converting list of parameter files into comma-separated list. This is deciphered by the neatseq_flow class.
args.param_file = ",".join(args.param_file)
try:
NeatSeqFlow(sample_file = args.sample_file,
param_file = args.param_file,
grouping_file = args.mapping,
home_dir = args.home_dir,
message = args.message,
runid = args.runid,
verbose = args.verbose,
list_modules = args.list_modules)
except SystemExit:
pass | gpl-3.0 | 506,171,317,172,829,760 | 45.479675 | 138 | 0.638034 | false |
matthiasdiener/spack | var/spack/repos/builtin/packages/vtkh/package.py | 3 | 4255 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Vtkh(Package):
"""VTK-h is a toolkit of scientific visualization algorithms for emerging
processor architectures. VTK-h brings together several projects like VTK-m
and DIY2 to provide a toolkit with hybrid parallel capabilities."""
homepage = "https://github.com/Alpine-DAV/vtk-h"
url = "https://github.com/Alpine-DAV/vtk-h"
version('master',
git='https://github.com/Alpine-DAV/vtk-h.git',
branch='master',
submodules=True)
maintainers = ['cyrush']
variant("mpi", default=True, description="build mpi support")
variant("tbb", default=True, description="build tbb support")
variant("cuda", default=False, description="build cuda support")
depends_on("cmake")
depends_on("mpi", when="+mpi")
depends_on("tbb", when="+tbb")
depends_on("cuda", when="+cuda")
depends_on("vtkm@master")
depends_on("vtkm@master+tbb", when="+tbb")
depends_on("vtkm@master+cuda", when="+cuda")
def install(self, spec, prefix):
with working_dir('spack-build', create=True):
cmake_args = ["../src",
"-DVTKM_DIR={0}".format(spec["vtkm"].prefix),
"-DENABLE_TESTS=OFF",
"-DBUILD_TESTING=OFF"]
# mpi support
if "+mpi" in spec:
mpicc = spec['mpi'].mpicc
mpicxx = spec['mpi'].mpicxx
cmake_args.extend(["-DMPI_C_COMPILER={0}".format(mpicc),
"-DMPI_CXX_COMPILER={0}".format(mpicxx)])
mpiexe_bin = join_path(spec['mpi'].prefix.bin, 'mpiexec')
if os.path.isfile(mpiexe_bin):
cmake_args.append("-DMPIEXEC={0}".format(mpiexe_bin))
# tbb support
if "+tbb" in spec:
cmake_args.append("-DTBB_DIR={0}".format(spec["tbb"].prefix))
# cuda support
if "+cuda" in spec:
cmake_args.append("-DENABLE_CUDA=ON")
# this fix is necessary if compiling platform has cuda, but
# no devices (this common for front end nodes on hpc clusters)
# we choose kepler as a lowest common denominator
cmake_args.append("-DVTKm_CUDA_Architecture=kepler")
# use release, instead of release with debug symbols b/c vtkh libs
# can overwhelm compilers with too many symbols
for arg in std_cmake_args:
if arg.count("CMAKE_BUILD_TYPE") == 0:
cmake_args.extend(std_cmake_args)
cmake_args.append("-DCMAKE_BUILD_TYPE=Release")
cmake(*cmake_args)
if "+cuda" in spec:
# avoid issues with make -j and FindCuda deps
# likely a ordering issue that needs to be resolved
# in vtk-h
make(parallel=False)
else:
make()
make("install")
| lgpl-2.1 | 5,576,999,453,130,666,000 | 41.55 | 78 | 0.584724 | false |
adlius/osf.io | addons/box/apps.py | 14 | 1027 | from addons.base.apps import BaseAddonAppConfig, generic_root_folder
from addons.box.settings import MAX_UPLOAD_SIZE
box_root_folder = generic_root_folder('box')
class BoxAddonAppConfig(BaseAddonAppConfig):
name = 'addons.box'
label = 'addons_box'
full_name = 'Box'
short_name = 'box'
owners = ['user', 'node']
configs = ['accounts', 'node']
categories = ['storage']
has_hgrid_files = True
max_file_size = MAX_UPLOAD_SIZE
@property
def get_hgrid_data(self):
return box_root_folder
FOLDER_SELECTED = 'box_folder_selected'
NODE_AUTHORIZED = 'box_node_authorized'
NODE_DEAUTHORIZED = 'box_node_deauthorized'
actions = (FOLDER_SELECTED, NODE_AUTHORIZED, NODE_DEAUTHORIZED, )
@property
def routes(self):
from . import routes
return [routes.api_routes]
@property
def user_settings(self):
return self.get_model('UserSettings')
@property
def node_settings(self):
return self.get_model('NodeSettings')
| apache-2.0 | -3,704,457,776,115,016,700 | 24.675 | 69 | 0.661149 | false |
SeverTopan/AdjSim | adjsim/core.py | 1 | 15436 | """Core adjsim module.
This module contains the core features of the ABM engine, specifically, the Simulation and Agent objects
and the facilities to allow them to interact with one another.
Designed and developed by Sever Topan.
"""
# Standard.
import random
import time
import sys
import uuid
import copy
# Third party.
from PyQt5 import QtCore, QtGui, QtWidgets
import numpy as np
# Local.
from . import utility
from . import analysis
from . import visual
from . import decision
from . import color
from . import index
from . import callback
class _ActionSuite(utility.InheritableDict):
"""Container for Actions. May only store callables.
This object behaves through the same interface as a python
dictionary.
"""
def __setitem__(self, key, value):
"""Adds an item to the action suite."""
if not callable(value):
raise utility.ActionException
self._data[key] = value
class Agent(object):
"""The base Agent class.
All agents added to a simulation must be derived from this.
Attributes:
actions (_ActionSuite): The _ActionSuite container that holds all actions.
decision (decision.Decision): The decision object that the agent will use to determine action invocation.
order (int): The order in which this agent will take its step relative to others. Equal orders result in
no step order guarantee.
step_complete (bool): whether or not the agent has completed its step.
"""
def __init__(self):
self.actions = _ActionSuite()
self.decision = decision.NoCastDecision()
self.order = 0
self.step_complete = False
self._exists = True
self._id = uuid.uuid4()
@property
def id(self):
""" uuid: A unique identifier for the agent. Read-only."""
return self._id
class SpatialAgent(Agent):
"""The Spatial Agent class.
Builds upon Agent to incorporate 2d spatial coordinates representing the agent's position.
Any agent that desires to have the movement callback invoked when position is changed should
inherit from this class.
"""
DEFAULT_POS = np.array([0, 0])
def __init__(self, pos=DEFAULT_POS):
super().__init__()
self._pos = None
self._movement_callback = None
# Go through setter so that we do proper type checks.
self.pos = pos
@property
def pos(self):
"""np.ndarray: Obtains agent position. The returned array is NOT writeable."""
return self._pos
@pos.setter
def pos(self, value):
# assert pos type
if not type(value) == np.ndarray or value.shape != (2,):
raise TypeError
# Make immutable so that we have control over the agent movement callback.
value.flags.writeable = False
self._pos = value
# Trigger callback.
# This will always be non-None if the agent has been added to a simulation.
if self._movement_callback is not None:
self._movement_callback(self)
@property
def x(self):
"""int: Obtains agent's x-coordinate."""
return self.pos[0]
@x.setter
def x(self, value):
self.pos = np.array([value, self.y])
@property
def y(self):
"""int: Obtains agent's y-coordinate."""
return self.pos[1]
@y.setter
def y(self, value):
self.pos = np.array([self.x, value])
class VisualAgent(SpatialAgent):
"""The Visual Agent class.
Builds upon SpatialAgent to allow for agents to be visualized when simulated with a VisualSimulation.
Visual agents appear as circles with visual properties delineated by this class's attributes.
Attributes:
size (int): The size of the visualized agent.
color (QtGui.QColor): The color of the visualized agent.
style (QtCore.Qt.Pattern): The pattern of the visualized agent.
"""
DEFAULT_SIZE = 10
DEFAULT_COLOR = color.BLUE_DARK
DEFAULT_STYLE = QtCore.Qt.SolidPattern
def __init__(self, pos=SpatialAgent.DEFAULT_POS, size=DEFAULT_SIZE, color=DEFAULT_COLOR,
style=DEFAULT_STYLE):
super().__init__(pos)
self.size = size
self.color = color
self.style = style
class _AgentSuite(utility.InheritableSet):
"""Container for agents. May only store objects derived from Agent.
This object behaves through the same interface as a python set.
One additional function is provided to obtain a set copy for use in visualization.
Attributes:
callback_suite (_CallbackSuite): Reference to the simulation callback suite.
"""
def __init__(self, callback_suite):
super().__init__()
# Store references for callbacks
self.callback_suite = callback_suite
def add(self, agent):
"""Adds an item to the agent suite."""
if not issubclass(type(agent), Agent):
raise utility.InvalidAgentException
# Add agent.
self._data.add(agent)
# Register movement callback.
if issubclass(type(agent), SpatialAgent):
agent._movement_callback = self.callback_suite.agent_moved
# Trigger addition callback.
self.callback_suite.agent_added(agent)
def discard(self, value):
"""Discards an item to the agent suite."""
# 'Euthanize' and remove agent.
value._exists = False
value.step_complete = True
return_val = self._data.discard(value)
# Trigger callbacks.
self.callback_suite.agent_removed(value)
return return_val
def visual_snapshot(self):
"""Obtains a copy of the agent suite for visualization.
Returns:
A set of VisualAgent objects.
"""
return_set = set()
for agent in self._data:
if issubclass(type(agent), VisualAgent):
visual_copy = VisualAgent(pos=copy.copy(agent.pos), size=copy.copy(agent.size),
color=copy.copy(agent.color), style=copy.copy(agent.style))
visual_copy._id = copy.copy(agent.id)
return_set.add(visual_copy)
return return_set
class _TrackerSuite(utility.InheritableDict):
"""Container for trackers. May only store objects derived from Tracker.
This object behaves through the same interface as a python dictionary.
"""
def __setitem__(self, key, value):
"""Adds an item to the tracker suite."""
try:
assert issubclass(type(value), analysis.Tracker)
except:
raise utility.TrackerException
self._data[key] = value
class _CallbackSuite(object):
"""Container for callbacks.
Attributes:
agent_added (callback.AgentChangedCallback): Fires when an Agent is added to the agent set.
agent_removed (callback.AgentChangedCallback): Fires when an Agent is removed from the agent set.
agent_moved (callback.AgentChangedCallback): Fires when a SpatialAgent's pos attribute is set.
simulation_step_started (callback.SimulationMilestoneCallback): Fires when a Simulation step is started.
simulation_step_complete (callback.SimulationMilestoneCallback): Fires when a Simulation step is ended.
simulation_started (callback.SimulationMilestoneCallback): Fires when the Simulation starts.
simulation_complete (callback.SimulationMilestoneCallback): Fires when the Simulation ends.
"""
def __init__(self):
# Agent callbacks.
self.agent_added = callback.AgentChangedCallback()
self.agent_removed = callback.AgentChangedCallback()
self.agent_moved = callback.AgentChangedCallback()
self.simulation_step_started = callback.SimulationMilestoneCallback()
self.simulation_step_complete = callback.SimulationMilestoneCallback()
self.simulation_started = callback.SimulationMilestoneCallback()
self.simulation_complete = callback.SimulationMilestoneCallback()
class _IndexSuite(object):
"""Container for indidces.
"""
def __init__(self, simulation):
self._grid = index.GridIndex(simulation)
@property
def grid(self):
"""Obtain the grid index."""
return self._grid
class Simulation(object):
"""The base Simulation object.
This is the core object that is used to run adjsim simulations.
Attributes:
callbacks (_CallbackSuite): The Simulation's callbacks.
agents (_AgentSuite): The Simulation's agents.
trackers (_TrackerSuite): The Simulation's trackers.
indices (_IndexSuite): The Simulation's indices.
end_condition (callable): The Simulation's end condition.
time (int): The current Simulation time. Reflects step count.
"""
def __init__(self):
self.callbacks = _CallbackSuite()
self.agents = _AgentSuite(self.callbacks)
self.trackers = _TrackerSuite()
self.indices = _IndexSuite(self)
self.end_condition = None
self.time = 0
self._prev_print_str_len = 0
self._running = False
def start(self):
"""Starts a simulation instance.
Note:
This must be called before a call to the step function. This function triggers the
simulation_started callback.
"""
if self._running == True:
raise Exception("Simulation already started.")
self._running = True
self.time = 0
# Call milestone callback.
self.callbacks.simulation_started(self)
def end(self):
"""Ends a simulation instance.
Note:
This function triggers the simulation_ended callback.
"""
if self._running == False:
raise Exception("Simulation already ended.")
self._running = False
# Print a new line for prettier formatting.
print()
# Call milestone callback.
self.callbacks.simulation_complete(self)
def _step_single(self):
"""Performs a single simulation step.
This is where one iteration of the ABM loop occurs.
"""
# Perform setup in needed.
if self.time == 0:
self._track()
# Call milestone callback.
self.callbacks.simulation_step_started(self)
# Iterate through agents in sorted order
for agent in sorted(self.agents, key=lambda a: a.order):
# Check if agent has been removed in previous iteration
if not agent._exists:
continue
# Delegate action casting to decision module.
try:
agent.decision(self, agent)
except:
raise utility.DecisionException
agent.step_complete = False
self.time += 1
self._track()
# Call milestone callback.
self.callbacks.simulation_step_complete(self)
def step(self, num_timesteps=1):
"""Performs a given number of simulation steps.
Note:
This is where ABM loop occurs. This function must be called after the simulation has been started.
Args:
num_timesteps (int): the number of timesteps to simulate.
"""
# Check running status.
if not self._running:
raise utility.SimulatonWorkflowException()
# Simulate.
for i in range(num_timesteps):
self._print_simulation_status(i + 1, num_timesteps)
self._step_single()
# Check end condition.
if self.end_condition is not None:
try:
if self.end_condition(self):
break
except:
raise utility.EndConditionException
def simulate(self, num_timesteps):
"""Performs a given number of simulation steps while handling simulation start/end.
Note:
This convinience method simply calls start, step(num_timesteps), and end.
Args:
num_timesteps (int): the number of timesteps to simulate.
"""
self.start()
self.step(num_timesteps)
self.end()
def _track(self):
"""Calls the Simulation's trackers."""
try:
for tracker in self.trackers.values():
tracker(self)
except:
raise utility.TrackerException
def _print_simulation_status(self, timestep, num_timesteps):
"""Prints the simulation status at a given timestep.
Args:
timestep (int): The current timestep.
num_timesteps (int): The total number of timesteps.
"""
# Flush previous message.
sys.stdout.write("\r" + " " * self._prev_print_str_len)
sys.stdout.flush()
# Print new timestep string.
print_str = "\rSimulating timestep {}/{} - population: {}".format(timestep, num_timesteps, len(self.agents))
sys.stdout.write(print_str)
sys.stdout.flush()
self._prev_print_str_len = len(print_str)
class VisualSimulation(Simulation):
"""The Visual Simulation object.
This derivation of the Simulation object uses PyQt5 to render a visual representation
of an active simulation.
"""
def __init__(self):
super().__init__()
self._setup_required = True
self._wait_on_visual_init = 1
def _super_step(self, num_timesteps):
"""Calls the step method on Simulation base.
Args:
num_timesteps (int): The number of timesteps to simulate.
"""
super().step(num_timesteps)
def _step_single(self):
"""Visual implementation of single step."""
# Paint initial frame.
if self._setup_required:
self._setup_required = False
self._visual_thread.update_signal.emit(self.agents.visual_snapshot())
time.sleep(self._wait_on_visual_init)
super()._step_single()
# Wait for animaiton.
self._visual_thread.update_semaphore.acquire(1)
self._visual_thread.update_signal.emit(self.agents.visual_snapshot())
def step(self, num_timesteps=1):
"""Perform a given number of visual simulation steps.
Args:
num_timesteps (int): The number of timesteps to simulate.
"""
# Check running status.
if not self._running:
raise utility.SimulatonWorkflowException()
# Perform threading initialization for graphics.
self._setup_required = True
self._update_semaphore = QtCore.QSemaphore(0)
self._q_app = QtWidgets.QApplication([])
self._view = visual.AdjGraphicsView(self._q_app.desktop().screenGeometry(), self._update_semaphore)
self._visual_thread = visual.AdjThread(self._q_app, self, num_timesteps)
self._visual_thread.finished.connect(self._q_app.exit)
self._visual_thread.update_signal.connect(self._view.update)
# Begin simulation.
self._visual_thread.start()
self._q_app.exec_()
# Cleanup variables.
self._visual_thread.quit()
del self._visual_thread
del self._view
del self._q_app
del self._update_semaphore
| gpl-3.0 | 2,943,923,704,895,294,500 | 30.183838 | 116 | 0.61985 | false |
popazerty/gui-test | lib/python/Plugins/Plugin.py | 21 | 3836 | from Components.config import ConfigSubsection, config
import os
config.plugins = ConfigSubsection()
class PluginDescriptor:
"""An object to describe a plugin."""
# where to list the plugin. Note that there are different call arguments,
# so you might not be able to combine them.
# supported arguments are:
# session
# servicereference
# reason
# you have to ignore unknown kwargs!
# argument: session
WHERE_EXTENSIONSMENU = 1
WHERE_MAINMENU = 2
WHERE_PLUGINMENU = 3
# argument: session, serviceref (currently selected)
WHERE_MOVIELIST = 4
# argument: menuid. Fnc must return list with menuitems (4-tuple of name, fnc to call, entryid or None, weight or None)
WHERE_MENU = 5
# reason (0: start, 1: end)
WHERE_AUTOSTART = 6
# start as wizard. In that case, fnc must be tuple (priority,class) with class being a screen class!
WHERE_WIZARD = 7
# like autostart, but for a session. currently, only session starts are
# delivered, and only on pre-loaded plugins
WHERE_SESSIONSTART = 8
# start as teletext plugin. arguments: session, serviceref
WHERE_TELETEXT = 9
# file-scanner, fnc must return a list of Scanners
WHERE_FILESCAN = 10
# fnc must take an interface name as parameter and return None if the plugin supports an extended setup
# or return a function which is called with session and the interface name for extended setup of this interface
WHERE_NETWORKSETUP = 11
# show up this plugin (or a choicebox with all of them) for long INFO keypress
# or return a function which is called with session and the interface name for extended setup of this interface
WHERE_EVENTINFO = 12
# reason (True: Networkconfig read finished, False: Networkconfig reload initiated )
WHERE_NETWORKCONFIG_READ = 13
WHERE_AUDIOMENU = 14
# fnc 'SoftwareSupported' or 'AdvancedSoftwareSupported' must take a parameter and return None
# if the plugin should not be displayed inside Softwaremanger or return a function which is called with session
# and 'None' as parameter to call the plugin from the Softwaremanager menus. "menuEntryName" and "menuEntryDescription"
# should be provided to name and describe the new menu entry.
WHERE_SOFTWAREMANAGER = 15
# fnc must take an interface name as parameter and return None if the plugin supports an extended setup
# or return a function which is called with session and the interface name for extended setup of this interface
WHERE_NETWORKMOUNTS = 16
WHERE_VIXMENU = 17
def __init__(self, name="Plugin", where=None, description="", icon=None, fnc=None, wakeupfnc=None, needsRestart=None, internal=False, weight=0):
if not where: where = []
self.name = name
self.internal = internal
self.needsRestart = needsRestart
self.path = None
if isinstance(where, list):
self.where = where
else:
self.where = [ where ]
self.description = description
if icon is None or isinstance(icon, str):
self.iconstr = icon
self._icon = None
else:
self.iconstr = None
self._icon = icon
self.weight = weight
self.wakeupfnc = wakeupfnc
self.__call__ = fnc
def updateIcon(self, path):
self.path = path
def getWakeupTime(self):
return self.wakeupfnc and self.wakeupfnc() or -1
@property
def icon(self):
if self.iconstr:
from Tools.LoadPixmap import LoadPixmap
return LoadPixmap(os.path.join(self.path, self.iconstr))
else:
return self._icon
def __eq__(self, other):
return self.__call__ == other.__call__
def __ne__(self, other):
return self.__call__ != other.__call__
def __lt__(self, other):
if self.weight < other.weight:
return True
elif self.weight == other.weight:
return self.name < other.name
else:
return False
def __gt__(self, other):
return other<self
def __ge__(self, other):
return not self<other
def __le__(self, other):
return not other<self
| gpl-2.0 | 5,470,608,534,162,150,000 | 28.736434 | 145 | 0.725756 | false |
ddaygold/music-controller | musicctl.py | 1 | 2623 | import imaplib
import smtplib
import sys
import email
import email.message as emailmessage
import subprocess
import alsaaudio
import logging
import argparse
import datetime
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c','--credfile',required=True)
args = parser.parse_args()
'''
The credfile is where the login details are stored for use by the script. Obviously they should not be in the
git repo ;)
The credfile is a simple text file with a field per line in this order:
username
password (cleartext)
email address to notify of muting
imap server address
imap server port
smtp server address
smtp server port
logfile path
'''
credfile = open(args.credfile,'r')
USER,PASS,MAILING_LIST,IMAP_SERVER,IMAP_PORT_STRING,SMTP_SERVER,SMTP_PORT_STRING,LOG_FILE = [x.strip() for x in credfile.readlines()]
IMAP_PORT = int(IMAP_PORT_STRING)
SMTP_PORT = int(SMTP_PORT_STRING)
#logging block
logger = logging.getLogger('music-controller')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(LOG_FILE)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info('Starting up')
#THis is the default audio device, which might not work for other people
m = alsaaudio.Mixer(control='Master',id=0,cardindex=0)
sender =smtplib.SMTP_SSL(SMTP_SERVER, SMTP_PORT)
sender.login(USER,PASS)
recv = imaplib.IMAP4_SSL(IMAP_SERVER,IMAP_PORT)
recv.login(USER,PASS)
recv.select()
typ, data = recv.search(None, 'ALL')
data_set = set(int(x) for x in data[0].split())
r_set = set()
toget = data_set - r_set
for target in data_set:
typ, data = recv.fetch(target, '(RFC822)')
mail = data[0][1]
message = email.message_from_string(mail)
subject = message['Subject'].strip()
logger.info('working on '+subject)
author = message['From']
if subject.startswith('MUTE'):
logger.info('Found a mute command, muting')
m.setmute(1)
msg = emailmessage.Message()
msg['To'] = MAILING_LIST
msg['From'] = USER
msg['Subject'] = str(datetime.datetime.now()) + ' Muting'
sender.sendmail(USER,MAILING_LIST,str(msg))
logger.info('Deleting '+subject)
recv.store(target,'+FLAGS','\\Deleted')
recv.expunge()
sender.quit()
if __name__ == "__main__":
main()
| gpl-2.0 | -2,362,707,196,964,738,600 | 30.60241 | 137 | 0.635532 | false |
bioinfo-core-BGU/neatseq-flow | neatseq_flow/step_classes/mapping/samtools_old.py | 2 | 17385 | # -*- coding: UTF-8 -*-
"""
``samtools_old`` :sup:`*`
-----------------------------------------------------------------
:Authors: Menachem Sklarz
:Affiliation: Bioinformatics core facility
:Organization: National Institute of Biotechnology in the Negev, Ben Gurion University.
A class that defines a module for executing samtools on a SAM or BAM file.
.. attention:: The module was tested on samtools 1.3
The samtools programs included in the module are the following:
* ``view`` to convert the SAM file to a BAM file
* ``sort`` to sort the BAM file
* ``index`` creates an index for the BAM file
* ``flagstat`` Runs flagstat on the BAM file
* ``stats`` Runs stats on the BAM file
* ``idxstats`` Runs idxstats on the BAM file
* ``fastq/a`` Converts a BAM or CRAM into either FASTQ or FASTA format depending on the command invoked.
* ``merge`` Merges sample bam files into single project bam file.
.. Note:: Order of samtools subprogram execution:
The ``samtools`` programs are executed in the order above. It is up to you to have a sensible combination...
Requires
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* A SAM file in the following location:
* ``sample_data[<sample>]["sam"]`` (for ``scope=sample``)
* ``sample_data["sam"]`` (for ``scope=project``)
* Or a BAM file in:
* ``sample_data[<sample>]["bam"]`` (for ``scope=sample``)
* ``sample_data["bam"]`` (for ``scope=project``)
Output
~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Depending on the parameters, will put files in the following locations:
* ``sample_data[<sample>]["bam"]``
* ``sample_data[<sample>]["bai"]``
* ``sample_data[<sample>]["unfiltered_bam"]``
* ``sample_data[<sample>]["unsorted_bam"]``
* ``sample_data[<sample>]["bam.flagstat"]``
* ``sample_data[<sample>]["bam.stats"]``
* ``sample_data[<sample>]["bam.idxstats"]``
* If ``fastq`` was called, will also create the following files:
* ``self.sample_data[<sample>]["fastq.F"]``
* ``self.sample_data[<sample>]["fastq.R"]``
* ``self.sample_data[<sample>]["fastq.S"]``
* If ``fasta`` was called, will also create the following files:
* ``self.sample_data[<sample>]["fasta.F"]``
* ``self.sample_data[<sample>]["fasta.R"]``
* ``self.sample_data[<sample>]["fasta.S"]``
.. Note:: If ``scope`` is set to ``project``, the above mentioned output files will be created in the project
scope, e.g. ``sample_data["project_data"]["stats"]``..
.. Note:: If ``merge`` is included, ``scope`` must be ``sample`` and the merged *bam* is located in ``sample_data["project_data"]["stats"]``..
Parameters that can be set
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. csv-table:: Parameters that can be set:
:header: "Parameter", "Values", "Comments"
"project", "sample|project", "Scope of SAM/BAM top operate on. Defaults to ``sample``."
"view", "*e.g.*: -buh -q 30", "``samtools view`` parameters."
"sort", "*e.g.*: -@ 20", "``samtools sort`` parameters."
"index", "", "``samtools index`` parameters."
"flagstat", "", "Leave empty. flagstat takes no parameters"
"stats", "``samtools stats`` parameters", "Adds code for ``samtools stats``"
"idxstats", "", "Adds code for ``samtools idxstats``"
"fastq/a", "``samtools fastq/a`` parameters", "Adds code for ``samtools fastq/a``"
"merge", "``*e.g.*: -R region``", "Adds code for ``samtools merge``, using the parameters supplied"
"region", "", "A region to limit the ``view`` script to."
"filter_by_tag", "*e.g.*: NM:i:[01]", "Filter BAM by one of the tags. Use an awk-compliant regular expression. In this example, keep only lines where the edit distance is 0 or 1. This is an experimental feature and should be used with caution..."
"del_sam", "", "Remove SAM file"
"del_unsorted", "", "Remove unsorted bam file."
"type2use","sam|bam","Type of file to use. Must exist in scope"
Lines for parameter file
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
sam_bwt1:
module: samtools
base: bwt1
script_path: /path/to/samtools/bin/samtools
qsub_params:
-pe: shared 20
view: -buh -q 30 -@ 20 -F 4
sort: -@ 20
flagstat:
index:
stats: --remove-dups
del_sam:
del_unsorted:
References
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Li, H., Handsaker, B., Wysoker, A., Fennell, T., Ruan, J., Homer, N., Marth, G., Abecasis, G. and Durbin, R., 2009. **The sequence alignment/map format and SAMtools**. *Bioinformatics*, 25(16), pp.2078-2079.
"""
import os
import sys
import re
from neatseq_flow.PLC_step import Step,AssertionExcept
__author__ = "Menachem Sklarz"
__version__ = "1.6.0"
class Step_samtools_old(Step):
def step_specific_init(self):
self.shell = "bash" # Can be set to "bash" by inheriting instances
if "type2use" in self.params:
if not isinstance(self.params["type2use"],str) or self.params["type2use"] not in ["sam","bam"]:
raise AssertionExcept("'type2use' must be either 'sam' or 'bam'")
# Setting default scope to sample
if "scope" not in self.params:
self.params["scope"] = "sample"
for prog in "view sort index flagstat stats idxstats fastq fasta merge".split(" "):
if prog in self.params and self.params[prog] is None:
self.params[prog] = ""
def step_sample_initiation(self):
""" A place to do initiation stages following setting of sample_data
"""
# Set list of samples to go over. Either self.sample_data["samples"] for sample scope
# or ["project_data"] for project scope
if self.params["scope"] == "project":
sample_list = ["project_data"]
if "merge" in list(self.params.keys()):
raise AssertionExcept("project scope not defined for samtools merge")
elif self.params["scope"] == "sample":
sample_list = self.sample_data["samples"]
else:
raise AssertionExcept("'scope' must be either 'sample' or 'project'")
for sample in sample_list: # Getting list of samples out of samples_hash
# Check that a sam or bam exists
if "bam" in self.sample_data[sample] and "sam" in self.sample_data[sample]:
if "type2use" in self.params:
self.file2use = self.params["type2use"]
else:
raise AssertionExcept(
"Both BAM and SAM file types exist. Specify which one to use with 'type2use'.\n",
sample)
elif "bam" in self.sample_data[sample]:
self.file2use = "bam"
elif "sam" in self.sample_data[sample]:
self.file2use = "sam"
else:
raise AssertionExcept("Neither BAM nor SAM file exist.\n", sample)
if "type2use" in self.params:
if self.params["type2use"] not in self.sample_data[sample]:
raise AssertionExcept("No file of type '{type}' exists.".format(type=self.params["type2use"]),
sample)
def create_spec_wrapping_up_script(self):
""" Add stuff to check and agglomerate the output data
"""
# -------------- samtools merge ------------------
if "merge" in list(self.params.keys()):
sample_dir = self.make_folder_for_sample()
# Name of specific script:
self.spec_script_name = self.set_spec_script_name()
self.script = ""
# This line should be left before every new script. It sees to local issues.
# Use the dir it returns as the base_dir for this step.
use_dir = self.local_start(sample_dir)
outfile = self.sample_data["Title"] + ".merged.bam"
self.script += """\
###########
# Running samtools merge
#----------------
{env_path} merge \\{params}
\t{outfile} \\
\t{infiles}
""".format(env_path=self.get_script_env_path(),
infiles=" \\\n\t".join([self.sample_data[sample]["bam"] for sample in self.sample_data["samples"]]),
params="" if not self.params["merge"] else "\n\t" + self.params["merge"] + " \\",
outfile=use_dir + outfile)
self.sample_data["project_data"]["bam"] = sample_dir + outfile
self.stamp_file(self.sample_data["project_data"]["bam"])
def build_scripts(self):
""" This is the actual script building function
"""
if self.params["scope"] == "project":
sample_list = ["project_data"]
elif self.params["scope"] == "sample":
sample_list = self.sample_data["samples"]
else:
raise AssertionExcept("'scope' must be either 'sample' or 'project'")
for sample in sample_list: # Getting list of samples out of samples_hash
# Make a dir for the current sample:
sample_dir = self.make_folder_for_sample(sample)
# Name of specific script:
self.spec_script_name = self.set_spec_script_name(sample)
self.script = ""
# This line should be left before every new script. It sees to local issues.
# Use the dir it returns as the base_dir for this step.
use_dir = self.local_start(sample_dir)
active_file = self.sample_data[sample][self.file2use]
filter_suffix = ".filt"
sort_suffix = ".srt"
index_suffix = ".bai"
if "view" in self.params:
output_type = "bam" if re.search("\-\w*b", self.params["view"]) else "sam"
outfile = ".".join([os.path.basename(active_file), output_type])
self.script += """\
###########
# Running samtools view
#----------------
{env_path} view \\{params}
\t-o {outfile} \\
\t{active_file} {region}
""".format(env_path=self.get_script_env_path(),
active_file=active_file,
params="" if not self.params["view"] else "\n\t" + self.params["view"] + " \\",
region="" if not "region" in self.params else "\\\n\t" + self.params["region"],
outfile=use_dir + outfile)
active_file = use_dir + outfile
self.sample_data[sample][output_type] = sample_dir + outfile
self.stamp_file(self.sample_data[sample][output_type])
# If target of view is sam, terminating script. All others work on bam only.
if output_type == "sam":
self.write_warning("""
Output from samtools view is SAM. Not proceeding further.
To produce a BAM, make sure to include the -b flag in the samtools view parameters.""")
# If sam output, can't proceed with rest of commands which require bam input_file:
# Move all files from temporary local dir to permanent base_dir
self.local_finish(use_dir, sample_dir)
self.create_low_level_script()
continue
else:
# view not passed
# If source is SAM, terminate with error
if self.file2use == "sam":
raise AssertionExcept("Source file is 'sam', you must include 'view' in your oprations")
# else, create local link to BAM and set active file accordingly
self.script += """\
##########
# Making local link to original bam file: (-f to force)
#----------
cp -fs {active_file} {here}
""".format(active_file=active_file,
here=use_dir)
active_file = use_dir + os.path.basename(active_file)
self.sample_data[sample]["bam"] = sample_dir + os.path.basename(active_file)
# The following can be merged into the main 'view' section
if "filter_by_tag" in list(self.params.keys()):
# outfile = os.path.basename(active_file) + filter_suffix
outfile = filter_suffix.join(os.path.splitext(os.path.basename(active_file)))
self.script += """\
###########
# Filtering BAM
#----------------
{env_path} view \\
\t-h \\
\t{active_file} | \\
\tawk '$0 ~\"(^@)|({query})\"' | \\
\t{env_path} view \\
\t-bh \\
\t-o {outfile} \\
\t-
{rm_unfilt}
""".format(env_path=self.get_script_env_path(),
active_file=active_file,
query=self.params["filter_by_tag"],
outfile=use_dir+outfile,
rm_unfilt="# Removing unfiltered BAM\nrm -rf "+active_file if "del_unfiltered" in list(self.params.keys()) else "")
# Storing filtered and unfiltered bams:
self.sample_data[sample]["unfiltered_bam"] = active_file
self.sample_data[sample]["bam"] = sample_dir + outfile
self.stamp_file(self.sample_data[sample]["bam"])
active_file = use_dir + outfile
if "sort" in list(self.params.keys()):
if "bam" not in self.sample_data[sample]:
raise AssertionExcept("Can't run 'sort', as no BAM is defined", sample)
# outfile = os.path.basename(active_file) + sort_suffix
outfile = sort_suffix.join(os.path.splitext(os.path.basename(active_file)))
self.script += """\
###########
# Sorting BAM
#----------------
{env_path}sort \\{params}
\t-o {outf} \\
\t{active_file}
{rm_unsort}
""".format(env_path=self.get_script_env_path(),
params="" if not self.params["sort"] else "\n\t"+self.params["sort"]+" \\",
outf=(use_dir + outfile),
active_file=active_file,
rm_unsort="# Removing unsorted BAM\nrm -rf "+active_file if "del_unsorted" in list(self.params.keys()) else "")
# Storing sorted bam in 'bam' slot and unsorted bam in unsorted_bam slot
self.sample_data[sample]["unsorted_bam"] = active_file
self.sample_data[sample]["bam"] = sample_dir + outfile
self.stamp_file(self.sample_data[sample]["bam"])
active_file = use_dir + outfile
if "index" in list(self.params.keys()):
self.script += """\
###########
# Indexing BAM
#----------------
{env_path}index \\{params}
\t{active_file}
""".format(env_path=self.get_script_env_path(),
params="" if not self.params["index"] else "\n\t" + self.params["index"] + " \\",
active_file=active_file)
self.sample_data[sample]["bai"] = sample_dir + os.path.basename(active_file) + index_suffix
self.stamp_file(self.sample_data[sample]["bai"])
for comm in ["flagstat","stats","idxstats"]:
if comm in list(self.params.keys()):
outfile = ".".join([os.path.basename(active_file), comm])
self.script += """\
###########
# Calculating {comm}
#----------------
{env_path}{comm} \\{params}
\t{active_file} \\
\t> {outfile}
""".format(env_path=self.get_script_env_path(),
params="" if not self.params[comm] else "\n\t" + self.params[comm] + " \\",
active_file=active_file,
comm=comm,
outfile=use_dir+outfile)
self.sample_data[sample]["bam."+comm] = sample_dir + outfile
self.stamp_file(self.sample_data[sample]["bam."+comm])
# Adding code for fastq or fasta extraction from bam:
for type in (set(self.params.keys()) & set(["fasta","fastq"])):
if "fastq.F" in self.sample_data[sample]:
readspart = """\
-1 {readsF} \\
\t-2 {readsR} \
""".format(readsF=(active_file + ".F." + type),
readsR=(active_file + ".R." + type))
else:
readspart = """\
-0 {readsS} \
""".format(readsS=(active_file + ".S." + type))
# -0 and mixed paired-single not supported yet
self.script += """\
###########
# Extracting fastq files from BAM:
#----------------
{env_path}{type} \\{params}
\t{readspart} \\
\t{active_file}
""".format(env_path=self.get_script_env_path(),
params="" if not self.params[type] else "\n\t" + self.params[type] + " \\",
readspart=readspart,
type=type,
active_file=active_file)
# Storing and Stamping files
if "fastq.F" in self.sample_data[sample]:
self.sample_data[sample][type+".F"] = "%s%s.F.%s" % (sample_dir, os.path.basename(active_file), type)
self.sample_data[sample][type+".R"] = "%s%s.R.%s" % (sample_dir, os.path.basename(active_file), type)
self.stamp_file(self.sample_data[sample][type+".F"])
self.stamp_file(self.sample_data[sample][type+".R"])
else:
self.sample_data[sample][type+".S"] = "%s%s.S.%s" % (sample_dir, os.path.basename(active_file), type)
self.stamp_file(self.sample_data[sample][type+".S"])
if "del_sam" in list(self.params.keys()) and "sam" in self.sample_data[sample]:
self.script += """\
###########
# Removing SAM
#----------------
rm -rf {sam}
""".format(sam=self.sample_data[sample]["sam"])
self.local_finish(use_dir,sample_dir)
self.create_low_level_script()
| gpl-3.0 | 563,642,634,171,199,600 | 37.208791 | 250 | 0.557032 | false |
simplecrypto/deploy_helper | deploy/deploy.py | 1 | 9542 | #!/usr/bin/env python
import argparse
import time
import datetime
import os
import json
import sys
import subprocess
import logging
from pprint import pformat
class Deploy(object):
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def __init__(self, **kwargs):
# Defaults
self.__dict__.update(colorize=True,
basedir="",
tag=False,
executable=None,
config="deploy.json",
venv_dir=None,
extra_requirements=[],
cachedir="pipcache",
wheeldir="wheelhouse",
repo_dir="repo",
package=None,
rev="HEAD",
venv_name_command="git --git-dir {repo_dir}/.git --no-pager log -1 --format='%f' {rev}",
)
# Override from kwargs
fail = bool(kwargs.get('config') != self.config)
self.__dict__.update(kwargs)
try:
self.__dict__.update(json.load(open(self.config)))
except Exception as e:
if fail:
self.fail("Failed to load config file {}".format(self.config), 5, exception=e)
self.warn("Failed to load config file {}".format(self.config))
# Finally, kwargs take priority over config, so apply again
self.__dict__.update(kwargs)
# ---- Process configurations
# For each othe the other directories, join to basedir if a non-abs
# path is specified
if not os.path.isabs(self.cachedir):
self.cachedir = os.path.realpath(os.path.join(self.basedir, self.cachedir))
if not os.path.isabs(self.wheeldir):
self.wheeldir = os.path.realpath(os.path.join(self.basedir, self.wheeldir))
if not os.path.isabs(self.repo_dir):
self.repo_dir = os.path.realpath(os.path.join(self.basedir, self.repo_dir))
if self.package is None:
self.fail("A package name must be provided!")
if self.executable is None:
self.fail("An executable name is required")
# ---- Setup base information
self.basedir = os.path.realpath(self.basedir)
self.logger.debug("Deploy config:\n{}".format(pformat(self.__dict__)))
def color(self, string, color):
""" Colorize an output string if coloring is enabled """
if self.colorize:
return color + string + self.ENDC
else:
return string
def req(self, command_string):
""" Require the call to succeed, or exit """
ret = self.system(command_string)
if ret:
self.fail("Command {} exited with return code {}"
.format(command_string, ret), ret * -1)
def warn(self, message):
self.logger.warn(self.color("!! {}".format(message), self.HEADER))
def fail(self, message, code=5, exception=None):
# XXX: Print exception information
self.logger.error(self.color("#### ERROR: {} ####".format(message), self.WARNING))
exit(code)
def system(self, call, output=False):
""" Simple wrapper for os.system that prints the command that's being
executed """
self.logger.info(self.color("-- {}".format(call), self.OKBLUE))
if output:
try:
return subprocess.check_output(call, shell=True).strip()
except subprocess.CalledProcessError as e:
self.fail("Command {} exited with return code {}"
.format(call, e.returncode), e.returncode * -1)
return os.system(call)
def try_pip(self, pip_fragment):
""" Runs a pip fragment which will first try to install from the
wheelhouse. If it fails to install everything from the wheelhouse it
compiles wheels and then runs again. """
pip_inst = ("{}/bin/pip install --no-index --use-wheel --find-links='{}'"
" --download-cache='{}' {}"
.format(self.venv_dir, self.wheeldir, self.cachedir, pip_fragment))
# If error (non zero ret), assume that there weren't valid wheels
if self.system(pip_inst):
self.req("{}/bin/pip wheel --download-cache='{}' --wheel-dir='{}' {}"
.format(self.venv_dir, self.cachedir, self.wheeldir, pip_fragment))
# Try to install now with valid wheels
self.req(pip_inst)
def current_rev(self):
self.githash = self.system(
"git --git-dir {repo_dir}/.git rev-parse {rev}"
.format(**self.__dict__), output=True)
assert len(self.githash) == 40
venv_name = self.system(self.venv_name_command.format(**self.__dict__), output=True)[:30].rstrip("-")
venv_name += "-{}".format(self.githash)
self.logger.info("Parsed githash for repository {}".format(self.githash))
# If not provided, use basedir
if self.venv_dir is None:
self.venv_dir = os.path.join(self.basedir, venv_name)
# If provided but relative, join to basedir
elif not os.path.isabs(self.venv_dir):
self.venv_dir = os.path.realpath(os.path.join(self.basedir, self.venv_dir, venv_name))
# If absolute, join to given abs path
else:
self.venv_dir = os.path.join(self.venv_dir, venv_name)
def create(self):
""" Creates a virtualenv for a specific python package """
self.current_rev()
if os.path.isdir(self.venv_dir):
self.fail("venv dir {} already exists! Aborting.".format(self.venv_dir))
self.logger.info(self.color("Marking sha hash in repository", self.HEADER))
repo = os.path.join(self.repo_dir, self.package)
if self.tag:
self.req(r'echo "__sha__ = \"{}\"" >> {}/__init__.py'.format(self.githash, repo))
self.req("virtualenv {}".format(self.venv_dir))
self.req("{}/bin/pip install wheel".format(self.venv_dir))
self.try_pip("-r {}".format(os.path.join(self.repo_dir, "requirements.txt")))
for extra in self.extra_requirements:
self.try_pip("-r {}".format(extra))
self.req("{}/bin/pip install {}".format(self.venv_dir, self.repo_dir))
if self.tag:
self.req("git --git-dir {0}/.git --work-tree {0} checkout -- {1}/__init__.py"
.format(self.repo_dir, self.package))
self.logger.info(self.color("#### SUCCESS ####", self.OKGREEN))
def is_venv(self, folder):
dirs = set(os.walk(folder).next()[1])
return set(["bin", "include", "lib", "local"]).issubset(dirs)
def num_links(self, f):
self.req("stat -c '%h' {}".format(f))
def find_venvs(self):
for folder in os.walk(self.basedir).next()[1]:
if self.is_venv(folder):
seconds_since_create = int(time.time() - os.stat(folder).st_ctime)
age = datetime.timedelta(seconds=seconds_since_create)
links = os.stat(os.path.join(folder, "bin", self.executable)).st_nlink - 1
yield age, links, folder
def clean_venvs(self):
for age, links, folder in self.find_venvs():
if links == 0:
print("Found venv {}, age {}, with no links to the binary."
.format(folder, age))
if raw_input("Would you like to delete it? [y/n]") == "y":
self.req("rm -rf {}".format(folder))
else:
print("Found venv {}, age {}, with {} links. Ignoring."
.format(folder, age, links))
def list_venvs(self):
print("Age\tLinks\tName")
for vals in self.find_venvs():
print("{}\t{}\t{}".format(*vals))
def link(self, rev=None):
if rev is None:
self.current_rev()
for name in self.names:
self.logger.info("Linking {} to {}".format(name, self.venv_dir))
self.req("ln --no-dereference -f {}/bin/{} {}"
.format(self.venv_dir, self.executable, name))
def main():
parser = argparse.ArgumentParser(prog='venv deploy')
parser.add_argument('-l', '--log-level', default="INFO",
choices=['DEBUG', 'INFO', 'WARN', 'ERROR'])
parser.add_argument('-c', '--config', default="deploy.json")
subparsers = parser.add_subparsers(title='main subcommands', dest='action')
subparsers.add_parser('list_venvs')
subparsers.add_parser('clean_venvs')
create = subparsers.add_parser('create')
create.add_argument('-r', '--rev', default="HEAD")
link = subparsers.add_parser('link', help='links a list of executable names to the current git revision')
link.add_argument('names', help='names of the hardlinks you\'d like to create', action='append')
link.add_argument('-r', '--rev', default="HEAD")
args = parser.parse_args()
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s'))
root = logging.getLogger()
root.setLevel(getattr(logging, args.log_level))
root.addHandler(handler)
logger = logging.getLogger("deploy")
dep = Deploy(logger=logger, **vars(args))
getattr(dep, args.action)()
if __name__ == "__main__":
main()
| isc | -5,027,464,528,203,097,000 | 40.307359 | 117 | 0.563089 | false |
noironetworks/heat | heat/common/param_utils.py | 3 | 2815 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
from heat.common.i18n import _
def extract_bool(name, value):
"""Convert any true/false string to its corresponding boolean value.
Value is case insensitive.
"""
if str(value).lower() not in ('true', 'false'):
raise ValueError(_('Unrecognized value "%(value)s" for "%(name)s", '
'acceptable values are: true, false.')
% {'value': value, 'name': name})
return strutils.bool_from_string(value, strict=True)
def delim_string_to_list(value):
if value is None:
return None
if value == '':
return []
return value.split(',')
def extract_int(name, value, allow_zero=True, allow_negative=False):
if value is None:
return None
if not strutils.is_int_like(value):
raise ValueError(_("Only integer is acceptable by "
"'%(name)s'.") % {'name': name})
if value in ('0', 0):
if allow_zero:
return int(value)
raise ValueError(_("Only non-zero integer is acceptable by "
"'%(name)s'.") % {'name': name})
try:
result = int(value)
except (TypeError, ValueError):
raise ValueError(_("Value '%(value)s' is invalid for '%(name)s' "
"which only accepts integer.") %
{'name': name, 'value': value})
if allow_negative is False and result < 0:
raise ValueError(_("Value '%(value)s' is invalid for '%(name)s' "
"which only accepts non-negative integer.") %
{'name': name, 'value': value})
return result
def extract_tags(subject):
tags = subject.split(',')
for tag in tags:
if len(tag) > 80:
raise ValueError(_('Invalid tag, "%s" is longer than 80 '
'characters') % tag)
return tags
def extract_template_type(subject):
template_type = subject.lower()
if template_type not in ('cfn', 'hot'):
raise ValueError(_('Invalid template type "%(value)s", valid '
'types are: cfn, hot.') %
{'value': subject})
return template_type
| apache-2.0 | 1,304,535,237,187,406,600 | 32.511905 | 78 | 0.573357 | false |
R4stl1n/allianceauth | allianceauth/services/modules/phpbb3/migrations/0002_service_permissions.py | 3 | 2133 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-02 05:59
from __future__ import unicode_literals
from django.db import migrations
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.management import create_permissions
import logging
logger = logging.getLogger(__name__)
def migrate_service_enabled(apps, schema_editor):
for app_config in apps.get_app_configs():
app_config.models_module = True
create_permissions(app_config, apps=apps, verbosity=0)
app_config.models_module = None
Group = apps.get_model("auth", "Group")
Permission = apps.get_model("auth", "Permission")
Phpbb3User = apps.get_model("phpbb3", "Phpbb3User")
perm = Permission.objects.get(codename='access_phpbb3')
member_group_name = getattr(settings, str('DEFAULT_AUTH_GROUP'), 'Member')
blue_group_name = getattr(settings, str('DEFAULT_BLUE_GROUP'), 'Blue')
# Migrate members
if Phpbb3User.objects.filter(user__groups__name=member_group_name).exists() or \
getattr(settings, str('ENABLE_AUTH_FORUM'), False):
try:
group = Group.objects.get(name=member_group_name)
group.permissions.add(perm)
except ObjectDoesNotExist:
logger.warning('Failed to migrate ENABLE_AUTH_FORUM setting')
# Migrate blues
if Phpbb3User.objects.filter(user__groups__name=blue_group_name).exists() or \
getattr(settings, str('ENABLE_BLUE_FORUM'), False):
try:
group = Group.objects.get(name=blue_group_name)
group.permissions.add(perm)
except ObjectDoesNotExist:
logger.warning('Failed to migrate ENABLE_BLUE_FORUM setting')
class Migration(migrations.Migration):
dependencies = [
('phpbb3', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='phpbb3user',
options={'permissions': (('access_phpbb3', 'Can access the phpBB3 service'),)},
),
migrations.RunPython(migrate_service_enabled, migrations.RunPython.noop),
]
| gpl-2.0 | 7,692,667,226,943,113,000 | 33.967213 | 91 | 0.66526 | false |
gmr/tredis | tredis/strings.py | 1 | 28551 | """Redis String Commands Mixin"""
# Python 2 support for ascii()
if 'ascii' not in dir(__builtins__): # pragma: nocover
from tredis.compat import ascii
BITOP_AND = b'&'
"""Use for specifying a bitwise AND operation with
:meth:`~tredis.RedisClient.bitop`"""
BITOP_OR = b'|'
"""Use for specifying a bitwise OR operation with
:meth:`~tredis.RedisClient.bitop`"""
BITOP_XOR = b'^'
"""Use for specifying a bitwise XOR operation with
:meth:`~tredis.RedisClient.bitop`"""
BITOP_NOT = b'~'
"""Use for specifying a bitwise NOT operation with
:meth:`~tredis.RedisClient.bitop`"""
_BITOPTS = {
BITOP_AND: b'AND',
BITOP_OR: b'OR',
BITOP_XOR: b'XOR',
BITOP_NOT: b'NOT',
}
class StringsMixin(object):
"""Redis String Commands Mixin"""
def append(self, key, value):
"""If key already exists and is a string, this command appends the
value at the end of the string. If key does not exist it is created and
set as an empty string, so :meth:`~tredis.RedisClient.append` will be
similar to :meth:`~tredis.RedisClient.set` in this special case.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``. The amortized time complexity
is ``O(1)`` assuming the appended value is small and the already
present value is of any size, since the dynamic string library used
by Redis will double the free space available on every reallocation.
:param key: The key to get
:type key: :class:`str`, :class:`bytes`
:param value: The value to append to the key
:type value: :class:`str`, :class:`bytes`
:returns: The length of the string after the append operation
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'APPEND', key, value])
def bitcount(self, key, start=None, end=None):
"""Count the number of set bits (population counting) in a string.
By default all the bytes contained in the string are examined. It is
possible to specify the counting operation only in an interval passing
the additional arguments start and end.
Like for the :meth:`~tredis.RedisClient.getrange` command start and
end can contain negative values in order to index bytes starting from
the end of the string, where ``-1`` is the last byte, ``-2`` is the
penultimate, and so forth.
Non-existent keys are treated as empty strings, so the command will
return zero.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)``
:param key: The key to get
:type key: :class:`str`, :class:`bytes`
:param int start: The start position to evaluate in the string
:param int end: The end position to evaluate in the string
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`, :exc:`ValueError`
"""
command = [b'BITCOUNT', key]
if start is not None and end is None:
raise ValueError('Can not specify start without an end')
elif start is None and end is not None:
raise ValueError('Can not specify start without an end')
elif start is not None and end is not None:
command += [ascii(start), ascii(end)]
return self._execute(command)
def bitop(self, operation, dest_key, *keys):
"""Perform a bitwise operation between multiple keys (containing
string values) and store the result in the destination key.
The values for operation can be one of:
- ``b'AND'``
- ``b'OR'``
- ``b'XOR'``
- ``b'NOT'``
- :data:`tredis.BITOP_AND` or ``b'&'``
- :data:`tredis.BITOP_OR` or ``b'|'``
- :data:`tredis.BITOP_XOR` or ``b'^'``
- :data:`tredis.BITOP_NOT` or ``b'~'``
``b'NOT'`` is special as it only takes an input key, because it
performs inversion of bits so it only makes sense as an unary operator.
The result of the operation is always stored at ``dest_key``.
**Handling of strings with different lengths**
When an operation is performed between strings having different
lengths, all the strings shorter than the longest string in the set are
treated as if they were zero-padded up to the length of the longest
string.
The same holds true for non-existent keys, that are considered as a
stream of zero bytes up to the length of the longest string.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)``
:param bytes operation: The operation to perform
:param dest_key: The key to store the bitwise operation results to
:type dest_key: :class:`str`, :class:`bytes`
:param keys: One or more keys as keyword parameters for the bitwise op
:type keys: :class:`str`, :class:`bytes`
:return: The size of the string stored in the destination key, that is
equal to the size of the longest input string.
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`, :exc:`ValueError`
"""
if (operation not in _BITOPTS.keys()
and operation not in _BITOPTS.values()):
raise ValueError('Invalid operation value: {}'.format(operation))
elif operation in [b'~', b'NOT'] and len(keys) > 1:
raise ValueError('NOT can only be used with 1 key')
if operation in _BITOPTS.keys():
operation = _BITOPTS[operation]
return self._execute([b'BITOP', operation, dest_key] + list(keys))
def bitpos(self, key, bit, start=None, end=None):
"""Return the position of the first bit set to ``1`` or ``0`` in a
string.
The position is returned, thinking of the string as an array of bits
from left to right, where the first byte's most significant bit is at
position 0, the second byte's most significant bit is at position
``8``, and so forth.
The same bit position convention is followed by
:meth:`~tredis.RedisClient.getbit` and
:meth:`~tredis.RedisClient.setbit`.
By default, all the bytes contained in the string are examined. It is
possible to look for bits only in a specified interval passing the
additional arguments start and end (it is possible to just pass start,
the operation will assume that the end is the last byte of the string.
However there are semantic differences as explained later). The range
is interpreted as a range of bytes and not a range of bits, so
``start=0`` and ``end=2`` means to look at the first three bytes.
Note that bit positions are returned always as absolute values starting
from bit zero even when start and end are used to specify a range.
Like for the :meth:`~tredis.RedisClient.getrange` command start and
end can contain negative values in order to index bytes starting from
the end of the string, where ``-1`` is the last byte, ``-2`` is the
penultimate, and so forth.
Non-existent keys are treated as empty strings.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)``
:param key: The key to get
:type key: :class:`str`, :class:`bytes`
:param int bit: The bit value to search for (``1`` or ``0``)
:param int start: The start position to evaluate in the string
:param int end: The end position to evaluate in the string
:returns: The position of the first bit set to ``1`` or ``0``
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`, :exc:`ValueError`
"""
if 0 < bit > 1:
raise ValueError('bit must be 1 or 0, not {}'.format(bit))
command = [b'BITPOS', key, ascii(bit)]
if start is not None and end is None:
raise ValueError('Can not specify start without an end')
elif start is None and end is not None:
raise ValueError('Can not specify start without an end')
elif start is not None and end is not None:
command += [ascii(start), ascii(end)]
return self._execute(command)
def decr(self, key):
"""Decrements the number stored at key by one. If the key does not
exist, it is set to 0 before performing the operation. An error is
returned if the key contains a value of the wrong type or contains a
string that can not be represented as integer. This operation is
limited to 64 bit signed integers.
See :meth:`~tredis.RedisClient.incr` for extra information on
increment/decrement operations.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``
:param key: The key to decrement
:type key: :class:`str`, :class:`bytes`
:returns: The value of key after the decrement
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'DECR', key])
def decrby(self, key, decrement):
"""Decrements the number stored at key by decrement. If the key does
not exist, it is set to 0 before performing the operation. An error
is returned if the key contains a value of the wrong type or contains
a string that can not be represented as integer. This operation is
limited to 64 bit signed integers.
See :meth:`~tredis.RedisClient.incr` for extra information on
increment/decrement operations.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``
:param key: The key to decrement
:type key: :class:`str`, :class:`bytes`
:param int decrement: The amount to decrement by
:returns: The value of key after the decrement
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'DECRBY', key, ascii(decrement)])
def get(self, key):
"""Get the value of key. If the key does not exist the special value
:data:`None` is returned. An error is returned if the value stored
at key is not a string, because :meth:`~tredis.RedisClient.get` only
handles string values.
.. note:: **Time complexity**: ``O(1)``
:param key: The key to get
:type key: :class:`str`, :class:`bytes`
:rtype: bytes|None
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'GET', key])
def getbit(self, key, offset):
"""Returns the bit value at offset in the string value stored at key.
When offset is beyond the string length, the string is assumed to be a
contiguous space with 0 bits. When key does not exist it is assumed to
be an empty string, so offset is always out of range and the value is
also assumed to be a contiguous space with 0 bits.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``
:param key: The key to get the bit from
:type key: :class:`str`, :class:`bytes`
:param int offset: The bit offset to fetch the bit from
:rtype: bytes|None
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'GETBIT', key, ascii(offset)])
def getrange(self, key, start, end):
"""Returns the bit value at offset in the string value stored at key.
When offset is beyond the string length, the string is assumed to be a
contiguous space with 0 bits. When key does not exist it is assumed to
be an empty string, so offset is always out of range and the value is
also assumed to be a contiguous space with 0 bits.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)`` where ``N`` is the length of
the returned string. The complexity is ultimately determined by the
returned length, but because creating a substring from an existing
string is very cheap, it can be considered ``O(1)`` for small
strings.
:param key: The key to get the bit from
:type key: :class:`str`, :class:`bytes`
:param int start: The start position to evaluate in the string
:param int end: The end position to evaluate in the string
:rtype: bytes|None
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'GETRANGE', key, ascii(start), ascii(end)])
def getset(self, key, value):
"""Atomically sets key to value and returns the old value stored at
key. Returns an error when key exists but does not hold a string value.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``
:param key: The key to remove
:type key: :class:`str`, :class:`bytes`
:param value: The value to set
:type value: :class:`str`, :class:`bytes`
:returns: The previous value
:rtype: bytes
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'GETSET', key, value])
def incr(self, key):
"""Increments the number stored at key by one. If the key does not
exist, it is set to ``0`` before performing the operation. An error is
returned if the key contains a value of the wrong type or contains a
string that can not be represented as integer. This operation is
limited to 64 bit signed integers.
.. note:: This is a string operation because Redis does not have a
dedicated integer type. The string stored at the key is interpreted
as a base-10 64 bit signed integer to execute the operation.
Redis stores integers in their integer representation, so for string
values that actually hold an integer, there is no overhead for storing
the string representation of the integer.
.. note:: **Time complexity**: ``O(1)``
:param key: The key to increment
:type key: :class:`str`, :class:`bytes`
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'INCR', key])
def incrby(self, key, increment):
"""Increments the number stored at key by increment. If the key does
not exist, it is set to 0 before performing the operation. An error is
returned if the key contains a value of the wrong type or contains a
string that can not be represented as integer. This operation is
limited to 64 bit signed integers.
See :meth:`~tredis.RedisClient.incr` for extra information on
increment/decrement operations.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``
:param key: The key to increment
:type key: :class:`str`, :class:`bytes`
:param int increment: The amount to increment by
:returns: The value of key after the increment
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'INCRBY', key, ascii(increment)])
def incrbyfloat(self, key, increment):
"""Increment the string representing a floating point number stored at
key by the specified increment. If the key does not exist, it is set to
0 before performing the operation. An error is returned if one of the
following conditions occur:
- The key contains a value of the wrong type (not a string).
- The current key content or the specified increment are not
parsable as a double precision floating point number.
If the command is successful the new incremented value is stored as the
new value of the key (replacing the old one), and returned to the
caller as a string.
Both the value already contained in the string key and the increment
argument can be optionally provided in exponential notation, however
the value computed after the increment is stored consistently in the
same format, that is, an integer number followed (if needed) by a dot,
and a variable number of digits representing the decimal part of the
number. Trailing zeroes are always removed.
The precision of the output is fixed at 17 digits after the decimal
point regardless of the actual internal precision of the computation.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``
:param key: The key to increment
:type key: :class:`str`, :class:`bytes`
:param float increment: The amount to increment by
:returns: The value of key after the increment
:rtype: bytes
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'INCRBYFLOAT', key, ascii(increment)])
def mget(self, *keys):
"""Returns the values of all specified keys. For every key that does
not hold a string value or does not exist, the special value nil is
returned. Because of this, the operation never fails.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of
keys to retrieve.
:param keys: One or more keys as keyword arguments to the function
:type keys: :class:`str`, :class:`bytes`
:rtype: list
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'MGET'] + list(keys))
def mset(self, mapping):
"""Sets the given keys to their respective values.
:meth:`~tredis.RedisClient.mset` replaces existing values with new
values, just as regular :meth:`~tredis.RedisClient.set`. See
:meth:`~tredis.RedisClient.msetnx` if you don't want to overwrite
existing values.
:meth:`~tredis.RedisClient.mset` is atomic, so all given keys are set
at once. It is not possible for clients to see that some of the keys
were updated while others are unchanged.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of
keys to set.
:param dict mapping: A mapping of key/value pairs to set
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
command = [b'MSET']
for key, value in mapping.items():
command += [key, value]
return self._execute(command, b'OK')
def msetnx(self, mapping):
"""Sets the given keys to their respective values.
:meth:`~tredis.RedisClient.msetnx` will not perform any operation at
all even if just a single key already exists.
Because of this semantic :meth:`~tredis.RedisClient.msetnx` can be used
in order to set different keys representing different fields of an
unique logic object in a way that ensures that either all the fields or
none at all are set.
:meth:`~tredis.RedisClient.msetnx` is atomic, so all given keys are set
at once. It is not possible for clients to see that some of the keys
were updated while others are unchanged.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of
keys to set.
:param dict mapping: A mapping of key/value pairs to set
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
command = [b'MSETNX']
for key, value in mapping.items():
command += [key, value]
return self._execute(command, 1)
def psetex(self, key, milliseconds, value):
""":meth:`~tredis.RedisClient.psetex` works exactly like
:meth:`~tredis.RedisClient.psetex` with the sole difference that the
expire time is specified in milliseconds instead of seconds.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``
:param key: The key to set
:type key: :class:`str`, :class:`bytes`
:param int milliseconds: Number of milliseconds for TTL
:param value: The value to set
:type value: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute(
[b'PSETEX', key, ascii(milliseconds), value], b'OK')
def set(self, key, value, ex=None, px=None, nx=False, xx=False):
"""Set key to hold the string value. If key already holds a value, it
is overwritten, regardless of its type. Any previous time to live
associated with the key is discarded on successful
:meth:`~tredis.RedisClient.set` operation.
If the value is not one of :class:`str`, :class:`bytes`, or
:class:`int`, a :exc:`ValueError` will be raised.
.. note:: **Time complexity**: ``O(1)``
:param key: The key to remove
:type key: :class:`str`, :class:`bytes`
:param value: The value to set
:type value: :class:`str`, :class:`bytes`, :class:`int`
:param int ex: Set the specified expire time, in seconds
:param int px: Set the specified expire time, in milliseconds
:param bool nx: Only set the key if it does not already exist
:param bool xx: Only set the key if it already exist
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
:raises: :exc:`ValueError`
"""
command = [b'SET', key, value]
if ex:
command += [b'EX', ascii(ex).encode('ascii')]
if px:
command += [b'PX', ascii(px).encode('ascii')]
if nx:
command.append(b'NX')
if xx:
command.append(b'XX')
return self._execute(command, b'OK')
def setbit(self, key, offset, bit):
"""Sets or clears the bit at offset in the string value stored at key.
The bit is either set or cleared depending on value, which can be
either 0 or 1. When key does not exist, a new string value is created.
The string is grown to make sure it can hold a bit at offset. The
offset argument is required to be greater than or equal to 0, and
smaller than 2 :sup:`32` (this limits bitmaps to 512MB). When the
string at key is grown, added bits are set to 0.
.. warning:: When setting the last possible bit (offset equal to
2 :sup:`32` -1) and the string value stored at key does not yet hold
a string value, or holds a small string value, Redis needs to
allocate all intermediate memory which can block the server for some
time. On a 2010 MacBook Pro, setting bit number 2 :sup:`32` -1
(512MB allocation) takes ~300ms, setting bit number 2 :sup:`30` -1
(128MB allocation) takes ~80ms, setting bit number 2 :sup:`28` -1
(32MB allocation) takes ~30ms and setting bit number 2 :sup:`26` -1
(8MB allocation) takes ~8ms. Note that once this first allocation is
done, subsequent calls to :meth:`~tredis.RedisClient.setbit` for the
same key will not have the allocation overhead.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``
:param key: The key to get the bit from
:type key: :class:`str`, :class:`bytes`
:param int offset: The bit offset to fetch the bit from
:param int bit: The value (``0`` or ``1``) to set for the bit
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
if 0 < bit > 1:
raise ValueError('bit must be 1 or 0, not {}'.format(bit))
return self._execute([b'SETBIT', key, ascii(offset), ascii(bit)])
def setex(self, key, seconds, value):
"""Set key to hold the string value and set key to timeout after a
given number of seconds.
:meth:`~tredis.RedisClient.setex` is atomic, and can be reproduced by
using :meth:`~tredis.RedisClient.set` and
:meth:`~tredis.RedisClient.expire` inside an
:meth:`~tredis.RedisClient.multi` /
:meth:`~tredis.RedisClient.exec` block. It is provided as a faster
alternative to the given sequence of operations, because this operation
is very common when Redis is used as a cache.
An error is returned when seconds is invalid.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``
:param key: The key to set
:type key: :class:`str`, :class:`bytes`
:param int seconds: Number of seconds for TTL
:param value: The value to set
:type value: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'SETEX', key, ascii(seconds), value], b'OK')
def setnx(self, key, value):
"""Set key to hold string value if key does not exist. In that case, it
is equal to :meth:`~tredis.RedisClient.setnx`. When key already holds a
value, no operation is performed. :meth:`~tredis.RedisClient.setnx` is
short for "SET if Not eXists".
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``
:param key: The key to set
:type key: :class:`str`, :class:`bytes`
:param value: The value to set
:type value: :class:`str`, :class:`bytes`, :class:`int`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'SETNX', key, value], 1)
def setrange(self, key, offset, value):
"""Overwrites part of the string stored at key, starting at the
specified offset, for the entire length of value. If the offset is
larger than the current length of the string at key, the string is
padded with zero-bytes to make offset fit. Non-existing keys are
considered as empty strings, so this command will make sure it holds a
string large enough to be able to set value at offset.
.. note:: The maximum offset that you can set is 2 :sup:`29` -1
(536870911), as Redis Strings are limited to 512 megabytes. If you
need to grow beyond this size, you can use multiple keys.
.. warning:: When setting the last possible byte and the string value
stored at key does not yet hold a string value, or holds a small
string value, Redis needs to allocate all intermediate memory which
can block the server for some time. On a 2010 MacBook Pro, setting
byte number 536870911 (512MB allocation) takes ~300ms, setting byte
number 134217728 (128MB allocation) takes ~80ms, setting bit number
33554432 (32MB allocation) takes ~30ms and setting bit number
8388608 (8MB allocation) takes ~8ms. Note that once this first
allocation is done, subsequent calls to
:meth:`~tredis.RedisClient.setrange` for the same key will not have
the allocation overhead.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``, not counting the time taken to
copy the new string in place. Usually, this string is very small so
the amortized complexity is ``O(1)``. Otherwise, complexity is
``O(M)`` with ``M`` being the length of the value argument.
:param key: The key to get the bit from
:type key: :class:`str`, :class:`bytes`
:param value: The value to set
:type value: :class:`str`, :class:`bytes`, :class:`int`
:returns: The length of the string after it was modified by the command
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'SETRANGE', key, ascii(offset), value])
def strlen(self, key):
"""Returns the length of the string value stored at key. An error is
returned when key holds a non-string value
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``
:param key: The key to set
:type key: :class:`str`, :class:`bytes`
:returns: The length of the string at key, or 0 when key does not exist
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'STRLEN', key])
| bsd-3-clause | 3,319,824,002,011,939,000 | 40.080576 | 79 | 0.620784 | false |
yzl0083/orange | Orange/multilabel/br.py | 6 | 4998 | """
.. index:: Binary Relevance Learner
***************************************
Binary Relevance Learner
***************************************
The most basic problem transformation method for multi-label classification
is the Binary Relevance method.
It learns :math:`|L|` binary classifiers :math:`H_l:X \\rightarrow \{l, \\neg l\}`,
one for each different label :math:`l` in :math:`L`.
It transforms the original data set into :math:`|L|` data sets :math:`D_l`
that contain all examples of the original data set, labelled as
:math:`l` if the labels of the original example contained :math:`l` and
as :math:`\\neg l` otherwise. It is the same solution used in order
to deal with a single-label multi-class problem using a binary classifier.
For more information, see G. Tsoumakas and I. Katakis. `Multi-label classification: An overview
<http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.104.9401&rep=rep1&type=pdf>`_.
International Journal of Data Warehousing and Mining, 3(3):1-13, 2007.
Note that a copy of the table is made in RAM for each label to enable construction of
a classifier. Due to technical limitations, that is currently unavoidable and
should be remedied in Orange 3.
.. index:: Binary Relevance Learner
.. autoclass:: Orange.multilabel.BinaryRelevanceLearner
:members:
:show-inheritance:
:param instances: a table of instances.
:type instances: :class:`Orange.data.Table`
:param base_learner: the binary learner, the default learner is
:class:`Orange.classification.bayes.NaiveLearner`.
:type base_learner: :class:`Orange.classification.Learner`
.. index:: Binary Relevance Classifier
.. autoclass:: Orange.multilabel.BinaryRelevanceClassifier
:members:
:show-inheritance:
Examples
========
The following example demonstrates a straightforward invocation of
this algorithm (:download:`mlc-classify.py <code/mlc-classify.py>`):
.. literalinclude:: code/mlc-classify.py
:lines: 6, 15-17
"""
import Orange
from Orange.classification.bayes import NaiveLearner as _BayesLearner
import multibase as _multibase
class BinaryRelevanceLearner(_multibase.MultiLabelLearner):
"""
Class that implements the Binary Relevance (BR) method.
"""
def __new__(cls, instances = None, base_learner = None, weight_id = 0, **argkw):
self = _multibase.MultiLabelLearner.__new__(cls, **argkw)
if base_learner:
self.base_learner = base_learner
else:
self.base_learner = _BayesLearner
if instances is not None:
self.__init__(**argkw)
return self.__call__(instances, weight_id)
else:
return self
def __call__(self, instances, weight_id = 0, **kwds):
if not Orange.multilabel.is_multilabel(instances):
raise TypeError("The given data set is not a multi-label data set"
" with class values 0 and 1.")
for k in kwds.keys():
self.__dict__[k] = kwds[k]
classifiers = []
for c in instances.domain.class_vars:
new_domain = Orange.data.Domain(instances.domain.attributes, c)
#build the instances
new_table = Orange.data.Table(new_domain, instances)
classifer = self.base_learner(new_table)
classifiers.append(classifer)
#Learn from the given table of data instances.
return BinaryRelevanceClassifier(instances = instances,
classifiers = classifiers,
weight_id = weight_id)
class BinaryRelevanceClassifier(_multibase.MultiLabelClassifier):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __call__(self, instance, result_type=Orange.classification.Classifier.GetValue):
"""
:rtype: a list of :class:`Orange.data.Value`, a list of :class:`Orange.statistics.distribution.Distribution`, or a tuple with both
"""
domain = self.instances.domain
labels = []
dists = []
for c in self.classifiers:
v, p = c(instance, Orange.classification.Classifier.GetBoth)
labels.append(v)
dists.append(p)
if result_type == Orange.classification.Classifier.GetValue:
return labels
if result_type == Orange.classification.Classifier.GetProbabilities:
return dists
return labels, dists
#########################################################################################
# A quick test/example.
if __name__ == "__main__":
data = Orange.data.Table("emotions.tab")
classifier = Orange.multilabel.BinaryRelevanceLearner(data,Orange.classification.knn.kNNLearner)
for i in range(10):
c,p = classifier(data[i],Orange.classification.Classifier.GetBoth)
print c,p | gpl-3.0 | -1,425,528,767,165,137,000 | 37.160305 | 138 | 0.622049 | false |
StefanSimis/PyTrios | pytrios/gpslib.py | 1 | 15424 | """
Multithreaded GPS library
Terry C provided this
"""
import datetime
import logging
import re
import threading
import time
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
logger = logging.getLogger(__name__)
class GPSParser(object):
"""
Class which contains a parse and checksum method.
Will parse GPGGA, GPRMC, GPVTG, and HCHDG NMEA sentences
"""
@staticmethod
def checksum(sentence):
"""
Check and validate GPS NMEA sentence
:param sentence: NMEA sentence
:type sentence: str
:return: True if checksum is valid, False otherwise
:rtype: bool
"""
sentence = sentence.strip()
match = re.search(r'^\$(.*\*.*)$', sentence)
if match:
sentence = match.group(1)
nmeadata, cksum = re.split(r'\*', sentence)
calc_cksum = 0
for char in nmeadata:
calc_cksum ^= ord(char)
return '0x'+cksum.lower() == hex(calc_cksum)
return False
@staticmethod
def parse(gps_string):
"""
Parse a GPS NMEA sentence, Returns NMEA dictionary or None
:param gps_string: NMEA sentence.
:type gps_string: str
:return: Function output or None
"""
if GPSParser.checksum(gps_string):
try:
if gps_string.startswith('$GPGSA'):
return GPSParser.parse_gpgsa(gps_string)
if gps_string.startswith('$GPGGA'):
return GPSParser.parse_gpgga(gps_string)
elif gps_string.startswith('$GPRMC'):
return GPSParser.parse_gprmc(gps_string)
elif gps_string.startswith('$GPVTG'):
return GPSParser.parse_gpvtg(gps_string)
elif gps_string.startswith('$HCHDG'):
return GPSParser.parse_hchdg(gps_string)
except ValueError:
pass
return None
@staticmethod
def parse_gpgsa(gpgsa_string):
"""
Parses a GPGSA sentence. Satellite Status (fix info)
:param gpgsa_string: NMEA Sentence
:type gpgsa_string: str
:return: Returns dictionary with data extracted from the string
:rtype: dict
"""
gps_parts = gpgsa_string.split(',')[1:-1]
# $GPGSA,A,3,04,05,,09,12,,,24,,,,,2.5,1.3,2.1*39
# 3 3D fix - values include: 1 = no fix
# 2 = 2D fix
# 3 = 3D fix
result = {
'type': 'gpgsa',
'fix_quality': int(gps_parts[1])
}
return result
@staticmethod
def parse_gpgga(gpgga_string):
"""
Parses a GPGGA sentence. Essential fix info.
:param gpgga_string: NMEA Sentence
:type gpgga_string: str
:return: Returns dictionary with data extracted from the string
:rtype: dict
"""
gps_parts = gpgga_string.split(',')[1:-1]
lat = int(gps_parts[1][0:2]) + (float(gps_parts[1][2:])/60.0)
lon = int(gps_parts[3][0:3]) + (float(gps_parts[3][3:])/60.0)
if gps_parts[2] == 'S':
lat *= -1
if gps_parts[4] == 'W':
lon *= -1
hour = int(gps_parts[0][0:2])
mins = int(gps_parts[0][2:4])
seconds = int(round(float(gps_parts[0][4:]), ndigits=0))
date = datetime.datetime.now()
date.replace(hour=hour, minute=mins, second=seconds)
result = {
'type': 'gpgga',
'hour': hour,
'min': mins,
'seconds': seconds,
'date': date,
'lat': lat,
'lon': lon,
'alt': float(gps_parts[8]),
'fix_type': int(gps_parts[5])
}
return result
@staticmethod
def parse_gprmc(gprmc_string):
"""
Parses a GPRMC sentence.
:param gprmc_string: NMEA Sentence
:type gprmc_string: str
:return: Returns a dictionary with data extracted from the string.
:rtype: dict
"""
gps_parts = gprmc_string.split(',')[1:-1]
# $GPRMC,113623.12,A,5021.9979,N,00407.9635,W,0.0,358.1,310315,2.2,W,A*3A
# 0 1 2 3 4 5 6 7 8 9 10
hour = int(gps_parts[0][0:2])
mins = int(gps_parts[0][2:4])
seconds = int(round(float(gps_parts[0][4:]), ndigits=0))
day = int(gps_parts[8][0:2])
month = int(gps_parts[8][2:4])
year = int(gps_parts[8][4:6])
if year < 1900:
year += 2000
date = datetime.datetime(year, month, day, hour, mins, seconds)
lat = int(gps_parts[2][0:2]) + (float(gps_parts[2][2:])/60.0)
lon = int(gps_parts[4][0:3]) + (float(gps_parts[4][3:])/60.0)
if gps_parts[3] == 'S':
lat *= -1
if gps_parts[5] == 'W':
lon *= -1
result = {
'type': 'gprmc',
'hour': hour,
'min': mins,
'seconds': seconds,
'day': day,
'month': month,
'year': year,
'date': date,
'lat': lat,
'lon': lon,
'speed': float(gps_parts[6]),
'heading': float(gps_parts[7])
}
return result
@staticmethod
def parse_gpvtg(gpvtg_string):
"""
Parses a GPVTG sentence.
:param gpvtg_string: NMEA Sentence
:type gpvtg_string: str
:return: Returns a dictionary with data extracted from the string.
:rtype: dict
"""
gps_parts = gpvtg_string.split(',')[1:-1]
# $GPVTG,232.7,T,234.9,M,1.3,N,2.4,K,A*2F
# 0 1 2 3 4 5 6 7
result = {
'type': 'gpvtg',
'heading': float(gps_parts[0]),
'speed': float(gps_parts[4])
}
return result
@staticmethod
def parse_hchdg(hchdg_string):
"""
Parses a HCHDG sentence.
:param hchdg_string: NMEA Sentence
:type hchdg_string: str
:return: Returns a dictionary with data extracted from the string.
:rtype: dict
"""
gps_parts = hchdg_string.split(',')[1:-1]
# $HCHDG,359.6,0.0,E,2.2,W*59
# 0 1 2 3
result = {
'type': 'hchdg',
'heading': float(gps_parts[0])
}
return result
class GPSSerialReader(threading.Thread):
"""
Thread to read from a serial port
"""
def __init__(self, serial_port, parent):
threading.Thread.__init__(self)
self.serial_port = serial_port
self.parent = parent
self.observers = []
self.current_gps_dict = None
logger.info("Starting GPS reader thread")
def run(self):
"""
Main loop of the thread.
This will run and read from a GPS string and when it is valid
and decoded it'll be passed via the observer design pattern.
"""
while not self.parent.stop_gps:
gps_string = self.serial_port.readline()
logger.info("NMEA: {0}".format(gps_string.strip()))
self.current_gps_dict = GPSParser.parse(gps_string)
self.notify_observers()
time.sleep(0.08) # Sleep for a millisecond to spare CPU
def register_observer(self, observer):
"""
Register an observer of the GPS thread.
Observers must implement a method called "update"
:param observer: An observer object.
:type observer: object
"""
if observer not in self.observers:
self.observers.append(observer)
def notify_observers(self):
"""
This pushes the GPS dict to all observers.
"""
if self.current_gps_dict is not None:
for observer in self.observers:
observer.update(self.current_gps_dict)
class WatchdogTimer(object):
"""
Simple Watchdog timer.
Timer to call callback function after timer runs out.
"""
def __init__(self, callback, timeout=5):
self.timeout = timeout
self.callback = callback
self.timer = threading.Timer(self.timeout, self.timer_callback)
self.timer.start()
def timer_callback(self):
"""
Calls the passed in callback function and resets the timer
"""
self.callback()
self.reset()
def reset(self):
"""
Stops the timer and starts it again.
"""
self.timer.cancel()
self.timer = threading.Timer(self.timeout, self.timer_callback)
self.timer.start()
def stop(self):
"""
Stops the timer.
"""
self.timer.cancel()
class GPSManager(object):
"""
Main GPS class which oversees the management and reading of GPS ports.
"""
def __init__(self):
self.serial_ports = []
self.stop_gps = False
self.watchdog = None
self.started = False
self.threads = []
self.heading = None
self.lat = None
self.lon = None
self.alt = None
self.speed = None
self.fix_type = 0
self.fix_quality = 0
self.datetime = None
self.old = False
self.proper_compass = False
self.gps_lock = threading.Lock()
self.gps_observers = []
self.watchdog_callbacks = []
def __del__(self):
self.disable_watchdog()
self.stop()
def add_serial_port(self, serial_port):
"""
Add a serial port to the list of ports to read from.
The serial port must be an instance of serial.Serial,
and the open() method must have been called.
:param serial_port: Serial object
:type serial_port: serial.Serial
"""
if serial_port not in self.serial_ports:
self.serial_ports.append(serial_port)
def remove_serial_port(self, serial_port):
"""
Remove serial port from the list of ports to remove.
This wont kill any threads reading serial ports.
Run stop then remove then start again.
:param serial_port: Serial object
:type serial_port: serial.Serial
"""
if serial_port in self.serial_ports:
self.serial_ports.remove(serial_port)
def start(self):
"""
Starts serial reading threads.
"""
if not self.started:
self.started = True
for port in self.serial_ports:
new_thread = GPSSerialReader(port, self)
new_thread.register_observer(self)
self.threads.append(new_thread)
for thread in self.threads:
thread.start()
logger.info("Started GPS manager")
else:
logger.warn("GPS manager already started")
def stop(self):
"""
Tells the serial threads to stop.
"""
self.stop_gps = True
time.sleep(1)
for thread in self.threads:
thread.join(1)
self.threads = []
self.started = False
logger.info("Stopping GPS manager")
def enable_watchdog(self, interval):
"""
Start watchdog timer to callback when gps data > interval secs old.
:param inteval: Number of seconds before GPS data is old.
:type inteval: int
"""
self.watchdog = WatchdogTimer(self.watchdog_callback, interval)
logger.debug("Starting watchdog timer")
def disable_watchdog(self):
"""
Stop watchdog timer.
"""
self.watchdog.stop()
self.watchdog = None
logger.debug("Stopped watchdog timer")
def watchdog_callback(self):
"""
Sets the watchdog field of old to True
"""
self.old = True
for wdg in self.watchdog_callbacks:
wdg()
def update(self, gps_dict):
"""
Updates the gps info held by this class,
a lock is used to prevent corruption.
:param gps_dict: GPS Dictionary passed.
:type gps_dict: dict
"""
self.gps_lock.acquire(True)
if gps_dict is not None:
self.old = False
if self.watchdog is not None:
self.watchdog.reset()
if gps_dict['type'] == 'gpgsa':
self.fix_quality = gps_dict['fix_quality']
elif gps_dict['type'] == 'hchdg':
self.proper_compass = True
self.heading = gps_dict['heading']
elif gps_dict['type'] == 'gpvtg':
# Use track made good? for heading if no proper compass
self.speed = gps_dict['speed']
if not self.proper_compass:
self.heading = gps_dict['heading']
elif gps_dict['type'] == 'gpgga':
self.lat = gps_dict['lat']
self.lon = gps_dict['lon']
self.alt = gps_dict['alt']
self.fix_type = gps_dict['fix_type']
if self.datetime is not None:
# Update if we have date (GPRMC should set that eventually)
self.datetime.replace(hour=gps_dict['hour'],
minute=gps_dict['min'],
second=gps_dict['seconds'])
else:
self.datetime = gps_dict['date']
elif gps_dict['type'] == 'gprmc':
self.lat = gps_dict['lat']
self.lon = gps_dict['lon']
self.datetime = gps_dict['date']
self.speed = gps_dict['speed']
# Use track made good? for heading if no proper compass
if not self.proper_compass:
self.heading = gps_dict['heading']
self.notify_observers()
self.gps_lock.release()
def register_observer(self, gps_object):
"""
Add object to the observing list
:param gps_object: Observing object
:type gps_object: object
"""
if gps_object not in self.gps_observers:
self.gps_observers.append(gps_object)
def remove_observer(self, gps_object):
"""
Remove the given observer from the list of observers
:param gps_object: Observing object
:type gps_object: object
"""
self.gps_observers.remove(gps_object)
def notify_observers(self):
"""
Notify all observers that there is new GPS data.
"""
logger.debug("Update observers")
for gps_object in self.gps_observers:
gps_object.update()
def register_watchdog_callback(self, wdg_callback):
"""
Register a callback function with the watchdog
:param wdg_callback: Callback function
:type wdg_callback: function
"""
if wdg_callback not in self.watchdog_callbacks:
self.watchdog_callbacks.append(wdg_callback)
def remove_watchdog_callback(self, wdg_callback):
"""
Stop the watchdog timer running the given callback
:param wdg_callback: Callback function
:type wdg_callback: function
"""
self.watchdog_callbacks.remove(wdg_callback)
| gpl-3.0 | -3,867,963,287,236,672,500 | 28.661538 | 81 | 0.531055 | false |
Arno-Nymous/pyload | module/plugins/hoster/UserscloudCom.py | 7 | 2796 | # -*- coding: utf-8 -*-
import re
from module.network.CookieJar import CookieJar
from module.network.HTTPRequest import HTTPRequest
from ..internal.SimpleHoster import SimpleHoster
class BIGHTTPRequest(HTTPRequest):
"""
Overcome HTTPRequest's load() size limit to allow
loading very big web pages by overrding HTTPRequest's write() function
"""
# @TODO: Add 'limit' parameter to HTTPRequest in v0.4.10
def __init__(self, cookies=None, options=None, limit=1000000):
self.limit = limit
HTTPRequest.__init__(self, cookies=cookies, options=options)
def write(self, buf):
""" writes response """
if self.limit and self.rep.tell() > self.limit or self.abort:
rep = self.getResponse()
if self.abort:
raise Abort()
f = open("response.dump", "wb")
f.write(rep)
f.close()
raise Exception("Loaded Url exceeded limit")
self.rep.write(buf)
class UserscloudCom(SimpleHoster):
__name__ = "UserscloudCom"
__type__ = "hoster"
__version__ = "0.09"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?userscloud\.com/(?P<ID>\w{12})'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Userscloud.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
INFO_PATTERN = r'<a href="https://userscloud.com/.+?" target="_blank">(?P<N>.+?) - (?P<S>[\d.,]+) (?P<U>[\w^_]+)</a>'
OFFLINE_PATTERN = r'The file you are trying to download is no longer available'
LINK_FREE_PATTERN = r'<a href="(https://\w+\.usercdn\.com.+?)"'
URL_REPLACEMENTS = [(__pattern__ + '.*', r'https://userscloud.com/\g<ID>')]
def setup(self):
self.multiDL = True
self.resume_download = False
self.chunk_limit = 1
try:
self.req.http.close()
except Exception:
pass
self.req.http = BIGHTTPRequest(
cookies=CookieJar(None),
options=self.pyload.requestFactory.getOptions(),
limit=300000)
def handle_free(self, pyfile):
url, inputs = self.parse_html_form('name="F1"')
if not inputs:
return
self.data = self.load(pyfile.url, post=inputs)
m = re.search(self.LINK_FREE_PATTERN, self.data)
if m is not None:
self.link = m.group(1)
| gpl-3.0 | 2,837,583,721,341,577,700 | 31.894118 | 121 | 0.574034 | false |
perseas/Pyrseas | tests/dbobject/test_textsearch.py | 1 | 11858 | # -*- coding: utf-8 -*-
"""Test text search objects"""
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
CREATE_TSC_STMT = "CREATE TEXT SEARCH CONFIGURATION sd.tsc1 (PARSER = tsp1)"
CREATE_TSD_STMT = "CREATE TEXT SEARCH DICTIONARY sd.tsd1 (TEMPLATE = simple, "\
"stopwords = 'english')"
CREATE_TSP_STMT = "CREATE TEXT SEARCH PARSER sd.tsp1 (START = prsd_start, " \
"GETTOKEN = prsd_nexttoken, END = prsd_end, LEXTYPES = prsd_lextype, " \
"HEADLINE = prsd_headline)"
CREATE_TST_STMT = "CREATE TEXT SEARCH TEMPLATE sd.tst1 (INIT = dsimple_init, "\
"LEXIZE = dsimple_lexize)"
COMMENT_TSC_STMT = "COMMENT ON TEXT SEARCH CONFIGURATION sd.tsc1 IS " \
"'Test configuration tsc1'"
COMMENT_TSD_STMT = "COMMENT ON TEXT SEARCH DICTIONARY sd.tsd1 IS " \
"'Test dictionary tsd1'"
COMMENT_TSP_STMT = "COMMENT ON TEXT SEARCH PARSER sd.tsp1 IS " \
"'Test parser tsp1'"
COMMENT_TST_STMT = "COMMENT ON TEXT SEARCH TEMPLATE sd.tst1 IS " \
"'Test template tst1'"
class TextSearchConfigToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing text search configurations"""
superuser = True
def test_map_ts_config(self):
"Map an existing text search configuration"
stmts = [CREATE_TSP_STMT, CREATE_TSC_STMT]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['text search configuration tsc1'] == {
'parser': 'tsp1'}
def test_map_cross_schema_ts_config(self):
"Map a text search config with parser in different schema"
stmts = ["CREATE SCHEMA s1",
"CREATE TEXT SEARCH PARSER s1.tsp1 "
"(START = prsd_start, GETTOKEN = prsd_nexttoken, "
"END = prsd_end, LEXTYPES = prsd_lextype)",
"CREATE TEXT SEARCH CONFIGURATION tsc1 (PARSER = s1.tsp1)"]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['text search configuration tsc1'] == {
'parser': 's1.tsp1'}
def test_map_ts_config_comment(self):
"Map a text search configuration with a comment"
stmts = [CREATE_TSP_STMT, CREATE_TSC_STMT, COMMENT_TSC_STMT]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['text search configuration tsc1'][
'description'] == 'Test configuration tsc1'
class TextSearchConfigToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation for input text search configurations"""
def test_create_ts_config(self):
"Create a text search configuration that didn't exist"
inmap = self.std_map()
inmap['schema sd'].update({'text search parser tsp1': {
'start': 'prsd_start', 'gettoken': 'prsd_nexttoken',
'end': 'prsd_end', 'lextypes': 'prsd_lextype',
'headline': 'prsd_headline'}, 'text search configuration tsc1': {
'parser': 'tsp1'}})
sql = self.to_sql(inmap, [CREATE_TSP_STMT])
assert fix_indent(sql[0]) == CREATE_TSC_STMT
def test_create_ts_config_in_schema(self):
"Create a text search config with parser in non-default schema"
inmap = self.std_map()
inmap.update({'schema s1': {'text search parser tsp1': {
'start': 'prsd_start', 'gettoken': 'prsd_nexttoken',
'end': 'prsd_end', 'lextypes': 'prsd_lextype'}}})
inmap['schema sd'].update({'text search configuration tsc1': {
'parser': 's1.tsp1'}})
sql = self.to_sql(inmap, ["CREATE SCHEMA s1"])
assert fix_indent(sql[0]) == "CREATE TEXT SEARCH PARSER s1.tsp1 " \
"(START = prsd_start, GETTOKEN = prsd_nexttoken, " \
"END = prsd_end, LEXTYPES = prsd_lextype)"
assert fix_indent(sql[1]) == \
"CREATE TEXT SEARCH CONFIGURATION sd.tsc1 (PARSER = s1.tsp1)"
def test_bad_map_ts_config_(self):
"Error creating a text search configuration with a bad map"
inmap = self.std_map()
inmap['schema sd'].update({'tsc1': {'parser': 'tsp1'}})
with pytest.raises(KeyError):
self.to_sql(inmap)
def test_drop_ts_config(self):
"Drop an existing text search configuration"
stmts = [CREATE_TSP_STMT, CREATE_TSC_STMT]
sql = self.to_sql(self.std_map(), stmts, superuser=True)
assert sql[0] == "DROP TEXT SEARCH CONFIGURATION sd.tsc1"
assert sql[1] == "DROP TEXT SEARCH PARSER sd.tsp1"
def test_comment_on_ts_config(self):
"Create a comment for an existing text search configuration"
stmts = [CREATE_TSP_STMT, CREATE_TSC_STMT]
inmap = self.std_map()
inmap['schema sd'].update({'text search configuration tsc1': {
'parser': 'tsp1', 'description': "Test configuration tsc1"},
'text search parser tsp1': {
'start': 'prsd_start', 'gettoken': 'prsd_nexttoken',
'end': 'prsd_end', 'lextypes': 'prsd_lextype',
'headline': 'prsd_headline'}})
sql = self.to_sql(inmap, stmts, superuser=True)
assert sql == [COMMENT_TSC_STMT]
class TextSearchDictToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing text search dictionaries"""
def test_map_ts_dict(self):
"Map an existing text search dictionary"
dbmap = self.to_map([CREATE_TSD_STMT])
assert dbmap['schema sd']['text search dictionary tsd1'] == {
'template': 'simple', 'options': "stopwords = 'english'"}
def test_map_ts_dict_comment(self):
"Map a text search dictionary with a comment"
stmts = [CREATE_TSD_STMT, COMMENT_TSD_STMT]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['text search dictionary tsd1'][
'description'], 'Test dictionary tsd1'
class TextSearchDictToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation for input text search dictionaries"""
def test_create_ts_dict(self):
"Create a text search dictionary that didn't exist"
inmap = self.std_map()
inmap['schema sd'].update({'text search dictionary tsd1': {
'template': 'simple', 'options': "stopwords = 'english'"}})
sql = self.to_sql(inmap)
assert fix_indent(sql[0]) == CREATE_TSD_STMT
def test_bad_map_ts_dict(self):
"Error creating a text search dictionary with a bad map"
inmap = self.std_map()
inmap['schema sd'].update({'tsd1': {
'template': 'simple', 'options': "stopwords = 'english'"}})
with pytest.raises(KeyError):
self.to_sql(inmap)
def test_drop_ts_dict(self):
"Drop an existing text search dictionary"
sql = self.to_sql(self.std_map(), [CREATE_TSD_STMT])
assert sql == ["DROP TEXT SEARCH DICTIONARY sd.tsd1"]
def test_comment_on_ts_dict(self):
"Create a comment for an existing text search dictionary"
inmap = self.std_map()
inmap['schema sd'].update({'text search dictionary tsd1': {
'template': 'simple', 'options': "stopwords = 'english'",
'description': "Test dictionary tsd1"}})
sql = self.to_sql(inmap, [CREATE_TSD_STMT])
assert sql == [COMMENT_TSD_STMT]
class TextSearchParserToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing text search parsers"""
superuser = True
def test_map_ts_parser(self):
"Map an existing text search parser"
stmts = [CREATE_TSP_STMT]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['text search parser tsp1'] == {
'start': 'prsd_start', 'gettoken': 'prsd_nexttoken',
'end': 'prsd_end', 'lextypes': 'prsd_lextype',
'headline': 'prsd_headline'}
def test_map_ts_parser_comment(self):
"Map a text search parser with a comment"
stmts = [CREATE_TSP_STMT, COMMENT_TSP_STMT]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['text search parser tsp1'][
'description'] == 'Test parser tsp1'
class TextSearchParserToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation for input text search parsers"""
def test_create_ts_parser(self):
"Create a text search parser that didn't exist"
inmap = self.std_map()
inmap['schema sd'].update({'text search parser tsp1': {
'start': 'prsd_start', 'gettoken': 'prsd_nexttoken',
'end': 'prsd_end', 'lextypes': 'prsd_lextype',
'headline': 'prsd_headline'}})
sql = self.to_sql(inmap)
assert fix_indent(sql[0]) == CREATE_TSP_STMT
def test_bad_map_ts_parser(self):
"Error creating a text search parser with a bad map"
inmap = self.std_map()
inmap['schema sd'].update({'tsp1': {
'start': 'prsd_start', 'gettoken': 'prsd_nexttoken',
'end': 'prsd_end', 'lextypes': 'prsd_lextype'}})
with pytest.raises(KeyError):
self.to_sql(inmap)
def test_drop_ts_parser(self):
"Drop an existing text search parser"
sql = self.to_sql(self.std_map(), [CREATE_TSP_STMT], superuser=True)
assert sql == ["DROP TEXT SEARCH PARSER sd.tsp1"]
def test_comment_on_ts_parser(self):
"Create a comment for an existing text search parser"
inmap = self.std_map()
inmap['schema sd'].update({'text search parser tsp1': {
'start': 'prsd_start', 'gettoken': 'prsd_nexttoken',
'end': 'prsd_end', 'lextypes': 'prsd_lextype',
'headline': 'prsd_headline', 'description': "Test parser tsp1"}})
sql = self.to_sql(inmap, [CREATE_TSP_STMT], superuser=True)
assert sql == [COMMENT_TSP_STMT]
class TextSearchTemplateToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing text search templates"""
superuser = True
def test_map_ts_template(self):
"Map an existing text search template"
dbmap = self.to_map([CREATE_TST_STMT])
assert dbmap['schema sd']['text search template tst1'] == {
'init': 'dsimple_init', 'lexize': 'dsimple_lexize'}
def test_map_ts_template_comment(self):
"Map a text search template with a comment"
stmts = [CREATE_TST_STMT, COMMENT_TST_STMT]
dbmap = self.to_map(stmts)
assert dbmap['schema sd']['text search template tst1'][
'description'], 'Test template tst1'
class TextSearchTemplateToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation for input text search templates"""
def test_create_ts_template(self):
"Create a text search template that didn't exist"
inmap = self.std_map()
inmap['schema sd'].update({'text search template tst1': {
'init': 'dsimple_init', 'lexize': 'dsimple_lexize'}})
sql = self.to_sql(inmap, superuser=True)
assert fix_indent(sql[0]) == CREATE_TST_STMT
def test_bad_map_ts_template(self):
"Error creating a text search template with a bad map"
inmap = self.std_map()
inmap['schema sd'].update({'tst1': {
'init': 'dsimple_init', 'lexize': 'dsimple_lexize'}})
with pytest.raises(KeyError):
self.to_sql(inmap)
def test_drop_ts_template(self):
"Drop an existing text search template"
sql = self.to_sql(self.std_map(), [CREATE_TST_STMT], superuser=True)
assert sql == ["DROP TEXT SEARCH TEMPLATE sd.tst1"]
def test_comment_on_ts_template(self):
"Create a comment for an existing text search template"
inmap = self.std_map()
inmap['schema sd'].update({'text search template tst1': {
'init': 'dsimple_init', 'lexize': 'dsimple_lexize',
'description': "Test template tst1"}})
sql = self.to_sql(inmap, [CREATE_TST_STMT], superuser=True)
assert sql == [COMMENT_TST_STMT]
| bsd-3-clause | 1,286,046,031,707,206,000 | 41.963768 | 79 | 0.616714 | false |
ocefpaf/ulmo | test/usace_rivergages_test.py | 1 | 1420 | import datetime
import ulmo
import test_util
def test_get_stations():
stations_file = 'usace/rivergages/get_stations.cfm'
with test_util.mocked_urls(stations_file):
stations = ulmo.usace.rivergages.get_stations()
assert 1900 <= len(stations) <= 2000
assert 'CE7F42E6' in stations
def test_get_station_parameters():
test_sets = [
('CE7F42E6', {
'HP': u'Pool Level (Ft)',
'PC': u'Cumulative Precipitation (In)'
})
]
for station_code, test_value in test_sets:
stations_file = 'usace/rivergages/parameters_%s.cfm' % station_code
with test_util.mocked_urls(stations_file):
parameters = ulmo.usace.rivergages.get_station_parameters(station_code)
assert parameters == test_value
def test_get_station_data():
test_sets = [
('CE7F42E6', [
(datetime.date(2013, 1, 1), 168.04),
(datetime.date(2013, 1, 15), 168.69)
])
]
for station_code, test_values in test_sets:
stations_file = 'usace/rivergages/data_%s.cfm' % station_code
with test_util.mocked_urls(stations_file):
station_data = ulmo.usace.rivergages.get_station_data('CE7F42E6', 'HP',
start='2013-1-1', end='2013-1-15')
for test_value in test_values:
assert test_value in iter(station_data.items())
| bsd-3-clause | 2,401,182,916,299,397,600 | 29.212766 | 83 | 0.592254 | false |
rcbops/keystone-buildpackage | keystone/contrib/extensions/service/raxgrp/api.py | 2 | 3825 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystone.backends.api import BaseUserAPI
#Base APIs
class RAXEXTBaseUserAPI(BaseUserAPI):
def get_by_group(self, user_id, group_id):
raise NotImplementedError
def tenant_group(self, values):
raise NotImplementedError
def tenant_group_delete(self, id, group_id):
raise NotImplementedError
def get_groups(self, id):
raise NotImplementedError
def users_tenant_group_get_page(self, group_id, marker, limit):
raise NotImplementedError
def users_tenant_group_get_page_markers(self, group_id, marker, limit):
raise NotImplementedError
def get_group_by_tenant(self, id):
raise NotImplementedError
def delete_tenant_user(self, id, tenant_id):
raise NotImplementedError
def users_get_by_tenant(self, user_id, tenant_id):
raise NotImplementedError
def user_role_add(self, values):
raise NotImplementedError
def user_get_update(self, id):
raise NotImplementedError
def users_get_page(self, marker, limit):
raise NotImplementedError
def users_get_page_markers(self, marker, limit):
raise NotImplementedError
def users_get_by_tenant_get_page(self, tenant_id, marker, limit):
raise NotImplementedError
def users_get_by_tenant_get_page_markers(self, tenant_id, marker, limit):
raise NotImplementedError
def check_password(self, user, password):
raise NotImplementedError
class RAXEXTBaseTenantGroupAPI(object):
def create(self, values):
raise NotImplementedError
def is_empty(self, id):
raise NotImplementedError
def get(self, id, tenant):
raise NotImplementedError
def get_page(self, tenant_id, marker, limit):
raise NotImplementedError
def get_page_markers(self, tenant_id, marker, limit):
raise NotImplementedError
def update(self, id, tenant_id, values):
raise NotImplementedError
def delete(self, id, tenant_id):
raise NotImplementedError
class RAXEXTBaseGroupAPI(object):
def get(self, id):
raise NotImplementedError
def get_users(self, id):
raise NotImplementedError
def get_all(self):
raise NotImplementedError
def get_page(self, marker, limit):
raise NotImplementedError
def get_page_markers(self, marker, limit):
raise NotImplementedError
def delete(self, id):
raise NotImplementedError
def get_by_user_get_page(self, user_id, marker, limit):
raise NotImplementedError
def get_by_user_get_page_markers(self, user_id, marker, limit):
raise NotImplementedError
#API
#TODO(Yogi) Refactor all API to separate classes specific to models.
GROUP = RAXEXTBaseGroupAPI()
TENANT_GROUP = RAXEXTBaseTenantGroupAPI()
USER = RAXEXTBaseUserAPI()
# Function to dynamically set module references.
def set_value(variable_name, value):
if variable_name == 'group':
global GROUP
GROUP = value
elif variable_name == 'tenant_group':
global TENANT_GROUP
TENANT_GROUP = value
elif variable_name == 'user':
global USER
USER = value
| apache-2.0 | -3,389,350,394,871,357,400 | 26.321429 | 77 | 0.694641 | false |
synopat/pyload | module/plugins/captcha/AdsCaptcha.py | 7 | 2037 | # -*- coding: utf-8 -*-
import random
import re
from ..internal.CaptchaService import CaptchaService
class AdsCaptcha(CaptchaService):
__name__ = "AdsCaptcha"
__type__ = "captcha"
__version__ = "0.14"
__status__ = "testing"
__description__ = """AdsCaptcha captcha service plugin"""
__license__ = "GPLv3"
__authors__ = [("pyLoad Team", "[email protected]")]
CAPTCHAID_PATTERN = r'api\.adscaptcha\.com/Get\.aspx\?.*?CaptchaId=(\d+)'
PUBLICKEY_PATTERN = r'api\.adscaptcha\.com/Get\.aspx\?.*?PublicKey=([\w\-]+)'
def detect_key(self, data=None):
html = data or self.retrieve_data()
m = re.search(self.PUBLICKEY_PATTERN, html)
n = re.search(self.CAPTCHAID_PATTERN, html)
if m and n:
#: Key is the tuple(PublicKey, CaptchaId)
self.key = (m.group(1).strip(), n.group(1).strip())
self.log_debug("Key: %s | ID: %s" % self.key)
return self.key
else:
self.log_debug("Key or id pattern not found")
return None
def challenge(self, key=None, data=None):
PublicKey, CaptchaId = key or self.retrieve_key(data)
html = self.pyfile.plugin.load("http://api.adscaptcha.com/Get.aspx",
get={'CaptchaId': CaptchaId,
'PublicKey': PublicKey})
try:
challenge = re.search("challenge: '(.+?)',", html).group(1)
server = re.search("server: '(.+?)',", html).group(1)
except AttributeError:
self.fail(_("AdsCaptcha challenge pattern not found"))
self.log_debug("Challenge: %s" % challenge)
return self.result(server, challenge), challenge
def result(self, server, challenge):
result = self.decrypt("%sChallenge.aspx" % server,
get={'cid': challenge, 'dummy': random.random()},
cookies=True,
input_type="jpg")
return result
| gpl-3.0 | -7,251,964,248,703,332,000 | 34.12069 | 81 | 0.542955 | false |
subramani95/neutron | neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py | 3 | 32512 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import contextlib
import mock
from six import moves
from webob import exc
from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db import servicetype_db as st_db
from neutron.extensions import loadbalancer
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers.common import agent_driver_base
from neutron.tests import base
from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
from neutron.tests.unit import testlib_api
class TestLoadBalancerPluginBase(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
def reset_device_driver():
agent_driver_base.AgentDriverBase.device_driver = None
self.addCleanup(reset_device_driver)
self.mock_importer = mock.patch.object(
agent_driver_base, 'importutils').start()
# needed to reload provider configuration
st_db.ServiceTypeManager._instance = None
agent_driver_base.AgentDriverBase.device_driver = 'dummy'
super(TestLoadBalancerPluginBase, self).setUp(
lbaas_provider=('LOADBALANCER:lbaas:neutron.services.'
'loadbalancer.drivers.common.agent_driver_base.'
'AgentDriverBase:default'))
# we need access to loaded plugins to modify models
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase):
def setUp(self):
super(TestLoadBalancerCallbacks, self).setUp()
self.callbacks = agent_driver_base.LoadBalancerCallbacks(
self.plugin_instance
)
get_lbaas_agents_patcher = mock.patch(
'neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.get_lbaas_agents')
get_lbaas_agents_patcher.start()
def test_get_ready_devices(self):
with self.vip() as vip:
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertEqual(ready, [vip['vip']['pool_id']])
def test_get_ready_devices_multiple_vips_and_pools(self):
ctx = context.get_admin_context()
# add 3 pools and 2 vips directly to DB
# to create 2 "ready" devices and one pool without vip
pools = []
for i in moves.xrange(3):
pools.append(ldb.Pool(id=uuidutils.generate_uuid(),
subnet_id=self._subnet_id,
protocol="HTTP",
lb_method="ROUND_ROBIN",
status=constants.ACTIVE,
admin_state_up=True))
ctx.session.add(pools[i])
vip0 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[0].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip0)
pools[0].vip_id = vip0.id
vip1 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[1].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip1)
pools[1].vip_id = vip1.id
ctx.session.flush()
self.assertEqual(ctx.session.query(ldb.Pool).count(), 3)
self.assertEqual(ctx.session.query(ldb.Vip).count(), 2)
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin'
'.list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {'pools': [{'id': pools[0].id},
{'id': pools[1].id},
{'id': pools[2].id}]}
ready = self.callbacks.get_ready_devices(ctx)
self.assertEqual(len(ready), 3)
self.assertIn(pools[0].id, ready)
self.assertIn(pools[1].id, ready)
self.assertIn(pools[2].id, ready)
# cleanup
ctx.session.query(ldb.Pool).delete()
ctx.session.query(ldb.Vip).delete()
def test_get_ready_devices_inactive_vip(self):
with self.vip() as vip:
# set the vip inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['vip']['id'],
{'vip': {'status': constants.INACTIVE}}
)
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertEqual([vip['vip']['pool_id']], ready)
def test_get_ready_devices_inactive_pool(self):
with self.vip() as vip:
# set the pool inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_pool(
context.get_admin_context(),
vip['vip']['pool_id'],
{'pool': {'status': constants.INACTIVE}}
)
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertFalse(ready)
def test_get_logical_device_non_active(self):
with self.pool() as pool:
ctx = context.get_admin_context()
for status in ('INACTIVE', 'PENDING_CREATE', 'PENDING_UPDATE'):
self.plugin_instance.update_status(
ctx, ldb.Pool, pool['pool']['id'], status)
pool['pool']['status'] = status
expected = {
'pool': pool['pool'],
'members': [],
'healthmonitors': [],
'driver': 'dummy'
}
logical_config = self.callbacks.get_logical_device(
ctx, pool['pool']['id']
)
self.assertEqual(expected, logical_config)
def test_get_logical_device_active(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
# activate objects
self.plugin_instance.update_status(
ctx, ldb.Pool, pool['pool']['id'], 'ACTIVE')
self.plugin_instance.update_status(
ctx, ldb.Member, member['member']['id'], 'ACTIVE')
self.plugin_instance.update_status(
ctx, ldb.Vip, vip['vip']['id'], 'ACTIVE')
# build the expected
port = self.plugin_instance._core_plugin.get_port(
ctx, vip['vip']['port_id']
)
subnet = self.plugin_instance._core_plugin.get_subnet(
ctx, vip['vip']['subnet_id']
)
port['fixed_ips'][0]['subnet'] = subnet
# reload pool to add members and vip
pool = self.plugin_instance.get_pool(
ctx, pool['pool']['id']
)
pool['status'] = constants.ACTIVE
vip['vip']['status'] = constants.ACTIVE
vip['vip']['port'] = port
member['member']['status'] = constants.ACTIVE
expected = {
'pool': pool,
'vip': vip['vip'],
'members': [member['member']],
'healthmonitors': [],
'driver': 'dummy'
}
logical_config = self.callbacks.get_logical_device(
ctx, pool['id']
)
self.assertEqual(logical_config, expected)
def test_get_logical_device_inactive_member(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
self.plugin_instance.update_status(ctx, ldb.Pool,
pool['pool']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Vip,
vip['vip']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Member,
member['member']['id'],
'INACTIVE')
logical_config = self.callbacks.get_logical_device(
ctx, pool['pool']['id'])
member['member']['status'] = constants.INACTIVE
self.assertEqual([member['member']],
logical_config['members'])
def test_get_logical_device_pending_create_member(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
self.plugin_instance.update_status(ctx, ldb.Pool,
pool['pool']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Vip,
vip['vip']['id'],
'ACTIVE')
member = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('PENDING_CREATE',
member['status'])
logical_config = self.callbacks.get_logical_device(
ctx, pool['pool']['id'])
self.assertEqual([member], logical_config['members'])
def test_get_logical_device_pending_create_health_monitor(self):
with self.health_monitor() as monitor:
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
self.plugin_instance.update_status(ctx, ldb.Pool,
pool['pool']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Vip,
vip['vip']['id'],
'ACTIVE')
self.plugin_instance.create_pool_health_monitor(
ctx, monitor, pool['pool']['id'])
pool = self.plugin_instance.get_pool(
ctx, pool['pool']['id'])
monitor = self.plugin_instance.get_health_monitor(
ctx, monitor['health_monitor']['id'])
self.assertEqual(
'PENDING_CREATE',
pool['health_monitors_status'][0]['status'])
logical_config = self.callbacks.get_logical_device(
ctx, pool['id'])
self.assertEqual([monitor],
logical_config['healthmonitors'])
def _update_port_test_helper(self, expected, func, **kwargs):
core = self.plugin_instance._core_plugin
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']):
ctx = context.get_admin_context()
func(ctx, port_id=vip['vip']['port_id'], **kwargs)
db_port = core.get_port(ctx, vip['vip']['port_id'])
for k, v in expected.iteritems():
self.assertEqual(db_port[k], v)
def test_plug_vip_port(self):
exp = {
'device_owner': 'neutron:' + constants.LOADBALANCER,
'device_id': 'c596ce11-db30-5c72-8243-15acaae8690f',
'admin_state_up': True
}
self._update_port_test_helper(
exp,
self.callbacks.plug_vip_port,
host='host'
)
def test_plug_vip_port_mock_with_host(self):
exp = {
'device_owner': 'neutron:' + constants.LOADBALANCER,
'device_id': 'c596ce11-db30-5c72-8243-15acaae8690f',
'admin_state_up': True,
portbindings.HOST_ID: 'host'
}
with mock.patch.object(
self.plugin._core_plugin, 'update_port') as mock_update_port:
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
self.callbacks.plug_vip_port(
ctx, port_id=vip['vip']['port_id'], host='host')
mock_update_port.assert_called_once_with(
ctx, vip['vip']['port_id'],
{'port': testlib_api.SubDictMatch(exp)})
def test_unplug_vip_port(self):
exp = {
'device_owner': '',
'device_id': '',
'admin_state_up': False
}
self._update_port_test_helper(
exp,
self.callbacks.unplug_vip_port,
host='host'
)
def test_pool_deployed(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
p = self.plugin_instance.get_pool(ctx, pool['pool']['id'])
self.assertEqual('PENDING_CREATE', p['status'])
v = self.plugin_instance.get_vip(ctx, vip['vip']['id'])
self.assertEqual('PENDING_CREATE', v['status'])
m = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('PENDING_CREATE', m['status'])
self.callbacks.pool_deployed(ctx, pool['pool']['id'])
p = self.plugin_instance.get_pool(ctx, pool['pool']['id'])
self.assertEqual('ACTIVE', p['status'])
v = self.plugin_instance.get_vip(ctx, vip['vip']['id'])
self.assertEqual('ACTIVE', v['status'])
m = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('ACTIVE', m['status'])
def test_update_status_pool(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
p = self.plugin_instance.get_pool(ctx, pool_id)
self.assertEqual('PENDING_CREATE', p['status'])
self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE')
p = self.plugin_instance.get_pool(ctx, pool_id)
self.assertEqual('ACTIVE', p['status'])
def test_update_status_pool_deleted_already(self):
with mock.patch.object(agent_driver_base, 'LOG') as mock_log:
pool_id = 'deleted_pool'
ctx = context.get_admin_context()
self.assertRaises(loadbalancer.PoolNotFound,
self.plugin_instance.get_pool, ctx, pool_id)
self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE')
self.assertTrue(mock_log.warning.called)
def test_update_status_health_monitor(self):
with contextlib.nested(
self.health_monitor(),
self.pool()
) as (hm, pool):
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
hm_id = hm['health_monitor']['id']
h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id,
pool_id)
self.assertEqual('PENDING_CREATE', h['status'])
self.callbacks.update_status(
ctx, 'health_monitor',
{'monitor_id': hm_id, 'pool_id': pool_id}, 'ACTIVE')
h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id,
pool_id)
self.assertEqual('ACTIVE', h['status'])
class TestLoadBalancerAgentApi(base.BaseTestCase):
def setUp(self):
super(TestLoadBalancerAgentApi, self).setUp()
self.api = agent_driver_base.LoadBalancerAgentApi('topic')
self.mock_cast = mock.patch.object(self.api, 'cast').start()
self.mock_msg = mock.patch.object(self.api, 'make_msg').start()
def test_init(self):
self.assertEqual(self.api.topic, 'topic')
def _call_test_helper(self, method_name, method_args):
rv = getattr(self.api, method_name)(mock.sentinel.context,
host='host',
**method_args)
self.assertEqual(rv, self.mock_cast.return_value)
self.mock_cast.assert_called_once_with(
mock.sentinel.context,
self.mock_msg.return_value,
topic='topic.host',
version=None
)
if method_name == 'agent_updated':
method_args = {'payload': method_args}
self.mock_msg.assert_called_once_with(
method_name,
**method_args
)
def test_agent_updated(self):
self._call_test_helper('agent_updated', {'admin_state_up': 'test'})
def test_create_pool(self):
self._call_test_helper('create_pool', {'pool': 'test',
'driver_name': 'dummy'})
def test_update_pool(self):
self._call_test_helper('update_pool', {'old_pool': 'test',
'pool': 'test'})
def test_delete_pool(self):
self._call_test_helper('delete_pool', {'pool': 'test'})
def test_create_vip(self):
self._call_test_helper('create_vip', {'vip': 'test'})
def test_update_vip(self):
self._call_test_helper('update_vip', {'old_vip': 'test',
'vip': 'test'})
def test_delete_vip(self):
self._call_test_helper('delete_vip', {'vip': 'test'})
def test_create_member(self):
self._call_test_helper('create_member', {'member': 'test'})
def test_update_member(self):
self._call_test_helper('update_member', {'old_member': 'test',
'member': 'test'})
def test_delete_member(self):
self._call_test_helper('delete_member', {'member': 'test'})
def test_create_monitor(self):
self._call_test_helper('create_pool_health_monitor',
{'health_monitor': 'test', 'pool_id': 'test'})
def test_update_monitor(self):
self._call_test_helper('update_pool_health_monitor',
{'old_health_monitor': 'test',
'health_monitor': 'test',
'pool_id': 'test'})
def test_delete_monitor(self):
self._call_test_helper('delete_pool_health_monitor',
{'health_monitor': 'test', 'pool_id': 'test'})
class TestLoadBalancerPluginNotificationWrapper(TestLoadBalancerPluginBase):
def setUp(self):
self.log = mock.patch.object(agent_driver_base, 'LOG')
api_cls = mock.patch.object(agent_driver_base,
'LoadBalancerAgentApi').start()
super(TestLoadBalancerPluginNotificationWrapper, self).setUp()
self.mock_api = api_cls.return_value
self.mock_get_driver = mock.patch.object(self.plugin_instance,
'_get_driver')
self.mock_get_driver.return_value = (agent_driver_base.
AgentDriverBase(
self.plugin_instance
))
def test_create_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
self.mock_api.create_vip.assert_called_once_with(
mock.ANY,
vip['vip'],
'host'
)
def test_update_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
ctx = context.get_admin_context()
old_vip = vip['vip'].copy()
vip['vip'].pop('status')
new_vip = self.plugin_instance.update_vip(
ctx,
vip['vip']['id'],
vip
)
self.mock_api.update_vip.assert_called_once_with(
mock.ANY,
old_vip,
new_vip,
'host'
)
self.assertEqual(
new_vip['status'],
constants.PENDING_UPDATE
)
def test_delete_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet, no_delete=True) as vip:
ctx = context.get_admin_context()
self.plugin_instance.delete_vip(ctx, vip['vip']['id'])
vip['vip']['status'] = 'PENDING_DELETE'
self.mock_api.delete_vip.assert_called_once_with(
mock.ANY,
vip['vip'],
'host'
)
def test_create_pool(self):
with self.pool() as pool:
self.mock_api.create_pool.assert_called_once_with(
mock.ANY,
pool['pool'],
mock.ANY,
'dummy'
)
def test_update_pool_non_active(self):
with self.pool() as pool:
pool['pool']['status'] = 'INACTIVE'
ctx = context.get_admin_context()
orig_pool = pool['pool'].copy()
del pool['pool']['provider']
self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool)
self.mock_api.delete_pool.assert_called_once_with(
mock.ANY, orig_pool, 'host')
def test_update_pool_no_vip_id(self):
with self.pool() as pool:
ctx = context.get_admin_context()
orig_pool = pool['pool'].copy()
del pool['pool']['provider']
updated = self.plugin_instance.update_pool(
ctx, pool['pool']['id'], pool)
self.mock_api.update_pool.assert_called_once_with(
mock.ANY, orig_pool, updated, 'host')
def test_update_pool_with_vip_id(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
old_pool = pool['pool'].copy()
old_pool['vip_id'] = vip['vip']['id']
del pool['pool']['provider']
updated = self.plugin_instance.update_pool(
ctx, pool['pool']['id'], pool)
self.mock_api.update_pool.assert_called_once_with(
mock.ANY, old_pool, updated, 'host')
def test_delete_pool(self):
with self.pool(no_delete=True) as pool:
req = self.new_delete_request('pools',
pool['pool']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
pool['pool']['status'] = 'PENDING_DELETE'
self.mock_api.delete_pool.assert_called_once_with(
mock.ANY, pool['pool'], 'host')
def test_create_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id) as member:
self.mock_api.create_member.assert_called_once_with(
mock.ANY, member['member'], 'host')
def test_update_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id) as member:
ctx = context.get_admin_context()
updated = self.plugin_instance.update_member(
ctx, member['member']['id'], member)
self.mock_api.update_member.assert_called_once_with(
mock.ANY, member['member'], updated, 'host')
def test_update_member_new_pool(self):
with self.pool() as pool1:
pool1_id = pool1['pool']['id']
with self.pool() as pool2:
pool2_id = pool2['pool']['id']
with self.member(pool_id=pool1_id) as member:
self.mock_api.create_member.reset_mock()
ctx = context.get_admin_context()
old_member = member['member'].copy()
member['member']['pool_id'] = pool2_id
updated = self.plugin_instance.update_member(
ctx, member['member']['id'], member)
self.mock_api.delete_member.assert_called_once_with(
mock.ANY, old_member, 'host')
self.mock_api.create_member.assert_called_once_with(
mock.ANY, updated, 'host')
def test_delete_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id,
no_delete=True) as member:
req = self.new_delete_request('members',
member['member']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
member['member']['status'] = 'PENDING_DELETE'
self.mock_api.delete_member.assert_called_once_with(
mock.ANY, member['member'], 'host')
def test_create_pool_health_monitor(self):
with contextlib.nested(
self.health_monitor(),
self.pool(),
) as (hm, pool):
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
# hm now has a ref to the pool with which it is associated
hm = self.plugin.get_health_monitor(
ctx, hm['health_monitor']['id'])
self.mock_api.create_pool_health_monitor.assert_called_once_with(
mock.ANY, hm, pool_id, 'host')
def test_delete_pool_health_monitor(self):
with contextlib.nested(
self.pool(),
self.health_monitor()
) as (pool, hm):
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
# hm now has a ref to the pool with which it is associated
hm = self.plugin.get_health_monitor(
ctx, hm['health_monitor']['id'])
hm['pools'][0]['status'] = 'PENDING_DELETE'
self.plugin_instance.delete_pool_health_monitor(
ctx, hm['id'], pool_id)
self.mock_api.delete_pool_health_monitor.assert_called_once_with(
mock.ANY, hm, pool_id, 'host')
def test_update_health_monitor_associated_with_pool(self):
with contextlib.nested(
self.health_monitor(type='HTTP'),
self.pool()
) as (monitor, pool):
data = {
'health_monitor': {
'id': monitor['health_monitor']['id'],
'tenant_id': self._tenant_id
}
}
req = self.new_create_request(
'pools',
data,
fmt=self.fmt,
id=pool['pool']['id'],
subresource='health_monitors')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
# hm now has a ref to the pool with which it is associated
ctx = context.get_admin_context()
hm = self.plugin.get_health_monitor(
ctx, monitor['health_monitor']['id'])
self.mock_api.create_pool_health_monitor.assert_called_once_with(
mock.ANY,
hm,
pool['pool']['id'],
'host'
)
self.mock_api.reset_mock()
data = {'health_monitor': {'delay': 20,
'timeout': 20,
'max_retries': 2,
'admin_state_up': False}}
updated = hm.copy()
updated.update(data['health_monitor'])
req = self.new_update_request("health_monitors",
data,
monitor['health_monitor']['id'])
req.get_response(self.ext_api)
self.mock_api.update_pool_health_monitor.assert_called_once_with(
mock.ANY,
hm,
updated,
pool['pool']['id'],
'host')
| apache-2.0 | 4,506,871,249,298,449,400 | 42.176627 | 79 | 0.495417 | false |
xiaolonw/fast-rcnn_flow2 | lib/datasets/factory.py | 1 | 1145 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Factory method for easily getting imdbs by name."""
__sets = {}
# import datasets.pascal_voc
import datasets.imagenet_vid
import numpy as np
"""def _selective_search_IJCV_top_k(split, year, top_k):
imdb = datasets.pascal_voc(split, year)
imdb.roidb_handler = imdb.selective_search_IJCV_roidb
imdb.config['top_k'] = top_k
return imdb
"""
year = '2015'
# Set up voc_<year>_<split> using selective search "fast" mode
for split in ['train']: #, 'val', 'trainval', 'test']:
name = 'vid_{}'.format( split)
__sets[name] = (lambda split=split, year=year:
datasets.imagenet_vid(split, year))
def get_imdb(name):
"""Get an imdb (image database) by name."""
if not __sets.has_key(name):
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return __sets.keys()
| mit | 5,836,217,457,901,340,000 | 27.625 | 62 | 0.582533 | false |
Inspq/ansible | lib/ansible/modules/windows/win_environment.py | 32 | 3191 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Jon Hawkesworth (@jhawkesworth) <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_environment
version_added: "2.0"
short_description: Modifies environment variables on windows hosts.
description:
- Uses .net Environment to set or remove environment variables and can set at User, Machine or Process level.
- User level environment variables will be set, but not available until the user has logged off and on again.
options:
state:
description:
- present to ensure environment variable is set, or absent to ensure it is removed
required: false
default: present
choices:
- present
- absent
name:
description:
- The name of the environment variable
required: true
default: no default
value:
description:
- The value to store in the environment variable. Can be omitted for state=absent
required: false
default: no default
level:
description:
- The level at which to set the environment variable.
- Use 'machine' to set for all users.
- Use 'user' to set for the current user that ansible is connected as.
- Use 'process' to set for the current process. Probably not that useful.
required: true
default: no default
choices:
- machine
- process
- user
author: "Jon Hawkesworth (@jhawkesworth)"
notes:
- This module is best-suited for setting the entire value of an
environment variable. For safe element-based management of
path-like environment vars, use the M(win_path) module.
- This module does not broadcast change events.
This means that the minority of windows applications which can have
their environment changed without restarting will not be notified and
therefore will need restarting to pick up new environment settings.
User level environment variables will require the user to log out
and in again before they become available.
'''
EXAMPLES = r'''
# Set an environment variable for all users
win_environment:
state: present
name: TestVariable
value: Test value
level: machine
# Remove an environment variable for the current users
win_environment:
state: absent
name: TestVariable
level: user
'''
| gpl-3.0 | 8,352,287,211,134,466,000 | 32.946809 | 113 | 0.705108 | false |
amonapp/amon | amon/apps/dashboards/urls.py | 2 | 1600 | from django.conf.urls import url
from amon.apps.dashboards import views
from amon.apps.dashboards import api
urlpatterns = (
url(r'^$', views.index, name='dashboards'),
url(r'^create/$', views.create_dashboard, name='create_dashboard'),
url(r'^edit/(?P<dashboard_id>\w+)/$', views.edit_dashboard, name='edit_dashboard'),
url(r'^reorder/(?P<dashboard_id>\w+)/$', views.reorder_dashboard, name='reorder_dashboard'),
url(r'^view/(?P<dashboard_id>\w+)/$', views.view_dashboard, name='view_dashboard'),
url(r'^delete/(?P<dashboard_id>\w+)/$', views.delete_dashboard, name='delete_dashboard'),
# Ajax
url(r'^a/edit_dashboard/(?P<dashboard_id>\w+)/$', api.edit_dashboard, name='ajax_dashboard_edit'),
url(r'^a/reorder_metrics/(?P<dashboard_id>\w+)/$', api.reorder_metrics, name='ajax_dashboard_reorder_metrics'),
url(r'^a/add_metric/(?P<dashboard_id>\w+)/$', api.add_metric, name='ajax_dashboard_add_metric'),
url(r'^a/remove_metric/$', api.remove_metric, name='ajax_dashboard_remove_metric'),
url(r'^a/get_all_metrics/(?P<dashboard_id>\w+)/$', api.get_all_metrics, name='ajax_dashboard_get_all_metrics'),
url(r'^a/get_server_metrics/$', api.get_server_metrics, name='ajax_dashboard_get_server_metrics'),
# Metric views
url(r'^chart/(?P<metric_id>\w+)/$', api.dashboard_metric, name='dashboard_metric'),
# Public
url(r'^public/charts/(?P<metric_id>\w+)/$', api.public_dashboard_metric, name='public_dashboard_metric'),
url(r'^(?P<account_id>\w+)/(?P<dashboard_id>\w+)/$', views.public_dashboard, name='public_dashboard'),
)
| agpl-3.0 | 4,939,081,914,387,220,000 | 46.058824 | 115 | 0.663125 | false |
jblackburne/scikit-learn | sklearn/datasets/mldata.py | 8 | 7848 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the scikit-learn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to scikit-learn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause | -5,963,252,273,811,201,000 | 31.7 | 79 | 0.623089 | false |
iCarto/siga | extScripting/scripts/jython/Lib/xml/dom/TreeWalker.py | 3 | 6802 | ########################################################################
#
# File Name: TreeWalker.py
#
# Documentation: http://docs.4suite.com/4DOM/TreeWalker.py.html
#
"""
Tree Walker from DOM Level 2. Allows multi-directional iteration over nodes.
WWW: http://4suite.com/4DOM e-mail: [email protected]
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
from NodeFilter import NodeFilter
from xml.dom import NoModificationAllowedErr
from xml.dom import NotSupportedErr
class TreeWalker:
def __init__(self, root, whatToShow, filter, expandEntityReferences):
self.__dict__['__root'] = root
self.__dict__['__whatToShow'] = whatToShow
self.__dict__['__filter'] = filter
self.__dict__['__expandEntityReferences'] = expandEntityReferences
self.__dict__['__currentNode'] = root
### Attribute Access Methods -- xxx.attr ###
def __getattr__(self, name):
attrFunc = self._readComputedAttrs.get(name)
if attrFunc:
return attrFunc(self)
def __setattr__(self, name, value):
#Make sure attribute is not read-only
if name in self.__class__._readOnlyAttrs:
raise NoModificationAllowedErr()
#If it's computed execute that function
attrFunc = self.__class__._writeComputedAttrs.get(name)
if attrFunc:
attrFunc(self, value)
#Otherwise, just set the attribute
else:
self.__dict__[name] = value
### Attribute Methods -- xxx._get_attr() ###
def _get_root(self):
return self.__dict__['__root']
def _get_filter(self):
return self.__dict__['__filter']
def _get_whatToShow(self):
return self.__dict__['__whatToShow']
def _get_expandEntityReferences(self):
return self.__dict__['__expandEntityReferences']
def _get_currentNode(self):
return self.__dict__['__currentNode']
def _set_currentNode(self, value):
if value == None:
raise NotSupportedErr()
self.__dict__['__currentNode'] = value
### Methods ###
def parentNode(self):
next_node = None
if self.__dict__['__currentNode'] != self.__dict__['__root']:
next_node = self.__dict__['__currentNode']._get_parentNode()
while next_node and next_node != self.__dict__['__root'] \
and not (self.__checkWhatToShow(next_node) \
and self.__checkFilter(next_node) == NodeFilter.FILTER_ACCEPT):
next_node = next_node._get_parentNode()
if next_node:
self.__dict__['__currentNode'] = next_node
return next_node
def firstChild(self):
next_node = None
if self.__checkFilter(self.__dict__['__currentNode']) != NodeFilter.FILTER_REJECT:
next_node = self.__dict__['__currentNode']._get_firstChild()
while next_node and not (self.__checkWhatToShow(next_node) \
and self.__checkFilter(next_node) == NodeFilter.FILTER_ACCEPT):
next_node = next_node._get_nextSibling()
if next_node:
self.__dict__['__currentNode'] = next_node
return next_node
def lastChild(self):
next_node = None
if self.__checkFilter(self.__dict__['__currentNode']) != NodeFilter.FILTER_REJECT:
next_node = self.__dict__['__currentNode']._get_lastChild()
while next_node and not (self.__checkWhatToShow(next_node) \
and self.__checkFilter(next_node) == NodeFilter.FILTER_ACCEPT):
next_node = next_node._get_previousSibling()
if next_node:
self.__dict__['__currentNode'] = next_node
return next_node
def previousSibling(self):
prev_node = None
if self.__dict__['__currentNode'] != self.__root:
prev_node = self.__dict__['__currentNode']._get_previousSibling()
while prev_node and not (self.__checkWhatToShow(prev_node) \
and self.__checkFilter(prev_node) == NodeFilter.FILTER_ACCEPT):
prev_node = prev_node._get_previousSibling()
if prev_node:
self.__dict__['__currentNode'] = prev_node
return prev_node
def nextSibling(self):
next_node = None
if self.__dict__['__currentNode'] != self.__root:
next_node = self.__dict__['__currentNode']._get_nextSibling()
while next_node and not (self.__checkWhatToShow(next_node) and self.__checkFilter(next_node) == NodeFilter.FILTER_ACCEPT):
next_node = next_node._get_nextSibling()
if next_node:
self.__dict__['__currentNode'] = next_node
return next_node
def nextNode(self):
next_node = self.__advance()
while next_node and not (self.__checkWhatToShow(next_node) and self.__checkFilter(next_node) == NodeFilter.FILTER_ACCEPT):
next_node = self.__advance()
return next_node
def previousNode(self):
prev_node = self.__regress()
while prev_node and not (self.__checkWhatToShow(prev_node) and self.__checkFilter(prev_node) == NodeFilter.FILTER_ACCEPT):
prev_node = self.__regress()
return prev_node
def __advance(self):
if self.firstChild():
return self.__dict__['__currentNode']
if self.nextSibling():
return self.__dict__['__currentNode']
if self.parentNode():
return self.nextSibling()
return None
def __regress(self):
if self.previousSibling():
self.lastChild()
return self.__dict__['__currentNode']
if self.parentNode():
return self.__dict__['__currentNode']
return None
def __checkWhatToShow(self, node):
show_bit = 1 << (node._get_nodeType() - 1)
return self.__dict__['__whatToShow'] & show_bit
def __checkFilter(self, node):
if self.__dict__['__filter']:
return self.__dict__['__filter'].acceptNode(node)
else:
return NodeFilter.FILTER_ACCEPT
### Attribute Access Mappings ###
_readComputedAttrs = {'root':_get_root,
'whatToShow':_get_whatToShow,
'filter':_get_filter,
'expandEntityReferences':_get_expandEntityReferences,
'currentNode':_get_currentNode
}
_writeComputedAttrs = {'currentNode': _set_currentNode
}
# Create the read-only list of attributes
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
_readComputedAttrs.keys())
| gpl-3.0 | -3,414,826,039,823,183,000 | 36.169399 | 134 | 0.567627 | false |
rockstor/rockstor-core | src/rockstor/storageadmin/south_migrations/0043_auto__add_field_emailclient_port.py | 5 | 43269 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'EmailClient.port'
db.add_column(u'storageadmin_emailclient', 'port',
self.gf('django.db.models.fields.IntegerField')(default=587),
keep_default=False)
def backwards(self, orm):
# Deleting field 'EmailClient.port'
db.delete_column(u'storageadmin_emailclient', 'port')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'oauth2_provider.application': {
'Meta': {'object_name': 'Application'},
'authorization_grant_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "u'2Din3x7H84XawtNaSik1jSdKv2wSMpKP8vmSaSFV'", 'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'client_secret': ('django.db.models.fields.CharField', [], {'default': "u'VxffZ3DckHg9OC2QSPFyy6urLxs4pZzyLVNkcOFJVkCHSjVtkib6ljRZLHst77m9ztEmU6VusbuH0GlB3rjgUEBLEG6xpsU1VClYVM3ncryZvAlkh3plwAH8shoyrBd9'", 'max_length': '255', 'db_index': 'True', 'blank': 'True'}),
'client_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'skip_authorization': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'oauth2_provider_application'", 'to': u"orm['auth.User']"})
},
'storageadmin.advancednfsexport': {
'Meta': {'object_name': 'AdvancedNFSExport'},
'export_str': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'storageadmin.apikeys': {
'Meta': {'object_name': 'APIKeys'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'user': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '8'})
},
'storageadmin.appliance': {
'Meta': {'object_name': 'Appliance'},
'client_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'client_secret': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'current_appliance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "'Rockstor'", 'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'mgmt_port': ('django.db.models.fields.IntegerField', [], {'default': '443'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'storageadmin.configbackup': {
'Meta': {'object_name': 'ConfigBackup'},
'config_backup': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'storageadmin.containeroption': {
'Meta': {'object_name': 'ContainerOption'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.DContainer']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'val': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
'storageadmin.dashboardconfig': {
'Meta': {'object_name': 'DashboardConfig'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'widgets': ('django.db.models.fields.CharField', [], {'max_length': '4096'})
},
'storageadmin.dcontainer': {
'Meta': {'object_name': 'DContainer'},
'dimage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.DImage']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'launch_order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1024'}),
'rockon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.RockOn']"}),
'uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'storageadmin.dcontainerenv': {
'Meta': {'unique_together': "(('container', 'key'),)", 'object_name': 'DContainerEnv'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.DContainer']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'val': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'})
},
'storageadmin.dcontainerlink': {
'Meta': {'unique_together': "(('destination', 'name'),)", 'object_name': 'DContainerLink'},
'destination': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'destination_container'", 'to': "orm['storageadmin.DContainer']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'source': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['storageadmin.DContainer']", 'unique': 'True'})
},
'storageadmin.dcustomconfig': {
'Meta': {'unique_together': "(('rockon', 'key'),)", 'object_name': 'DCustomConfig'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'rockon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.RockOn']"}),
'val': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'})
},
'storageadmin.dimage': {
'Meta': {'object_name': 'DImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'storageadmin.disk': {
'Meta': {'object_name': 'Disk'},
'btrfs_uuid': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parted': ('django.db.models.fields.BooleanField', [], {}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'smart_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'smart_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'smart_options': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'transport': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'})
},
'storageadmin.dport': {
'Meta': {'unique_together': "(('container', 'containerp'),)", 'object_name': 'DPort'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.DContainer']"}),
'containerp': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'hostp': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'hostp_default': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'uiport': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'storageadmin.dvolume': {
'Meta': {'unique_together': "(('container', 'dest_dir'),)", 'object_name': 'DVolume'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.DContainer']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'dest_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'min_size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']", 'null': 'True'}),
'uservol': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'storageadmin.emailclient': {
'Meta': {'object_name': 'EmailClient'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1024'}),
'port': ('django.db.models.fields.IntegerField', [], {'default': '587'}),
'receiver': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'smtp_server': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'storageadmin.group': {
'Meta': {'object_name': 'Group'},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gid': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'groupname': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'storageadmin.installedplugin': {
'Meta': {'object_name': 'InstalledPlugin'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'install_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'plugin_meta': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Plugin']"})
},
'storageadmin.iscsitarget': {
'Meta': {'object_name': 'IscsiTarget'},
'dev_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'dev_size': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']"}),
'tid': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'tname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'storageadmin.netatalkshare': {
'Meta': {'object_name': 'NetatalkShare'},
'description': ('django.db.models.fields.CharField', [], {'default': "'afp on rockstor'", 'max_length': '1024'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'share': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'netatalkshare'", 'unique': 'True', 'to': "orm['storageadmin.Share']"}),
'time_machine': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'})
},
'storageadmin.networkinterface': {
'Meta': {'object_name': 'NetworkInterface'},
'autoconnect': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True'}),
'ctype': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'dname': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'dns_servers': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'dspeed': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'dtype': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'gateway': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipaddr': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'itype': ('django.db.models.fields.CharField', [], {'default': "'io'", 'max_length': '100'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'netmask': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'storageadmin.nfsexport': {
'Meta': {'object_name': 'NFSExport'},
'export_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.NFSExportGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mount': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']"})
},
'storageadmin.nfsexportgroup': {
'Meta': {'object_name': 'NFSExportGroup'},
'admin_host': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'editable': ('django.db.models.fields.CharField', [], {'default': "'rw'", 'max_length': '2'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'host_str': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mount_security': ('django.db.models.fields.CharField', [], {'default': "'insecure'", 'max_length': '8'}),
'nohide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'syncable': ('django.db.models.fields.CharField', [], {'default': "'async'", 'max_length': '5'})
},
'storageadmin.oauthapp': {
'Meta': {'object_name': 'OauthApp'},
'application': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oauth2_provider.Application']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.User']"})
},
'storageadmin.plugin': {
'Meta': {'object_name': 'Plugin'},
'css_file_name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4096'}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'js_file_name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'})
},
'storageadmin.pool': {
'Meta': {'object_name': 'Pool'},
'compression': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mnt_options': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'raid': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'storageadmin.poolbalance': {
'Meta': {'object_name': 'PoolBalance'},
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']"}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'started'", 'max_length': '10'}),
'tid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'})
},
'storageadmin.poolscrub': {
'Meta': {'object_name': 'PoolScrub'},
'corrected_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'csum_discards': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'csum_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'data_extents_scrubbed': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kb_scrubbed': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'last_physical': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'malloc_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'no_csum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pid': ('django.db.models.fields.IntegerField', [], {}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']"}),
'read_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'started'", 'max_length': '10'}),
'super_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tree_bytes_scrubbed': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'tree_extents_scrubbed': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'uncorrectable_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unverified_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'verify_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storageadmin.posixacls': {
'Meta': {'object_name': 'PosixACLs'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'perms': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'smb_share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SambaShare']"})
},
'storageadmin.rockon': {
'Meta': {'object_name': 'RockOn'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'https': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'icon': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'more_info': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'ui': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'volume_add_support': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'})
},
'storageadmin.sambacustomconfig': {
'Meta': {'object_name': 'SambaCustomConfig'},
'custom_config': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'smb_share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SambaShare']"})
},
'storageadmin.sambashare': {
'Meta': {'object_name': 'SambaShare'},
'browsable': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'}),
'comment': ('django.db.models.fields.CharField', [], {'default': "'foo bar'", 'max_length': '100'}),
'guest_ok': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'read_only': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'shadow_copy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'share': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sambashare'", 'unique': 'True', 'to': "orm['storageadmin.Share']"}),
'snapshot_prefix': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'storageadmin.setup': {
'Meta': {'object_name': 'Setup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'setup_disks': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_network': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_system': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'storageadmin.sftp': {
'Meta': {'object_name': 'SFTP'},
'editable': ('django.db.models.fields.CharField', [], {'default': "'ro'", 'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'share': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['storageadmin.Share']", 'unique': 'True'})
},
'storageadmin.share': {
'Meta': {'object_name': 'Share'},
'compression_algo': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'eusage': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'group': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'owner': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '4096'}),
'perms': ('django.db.models.fields.CharField', [], {'default': "'755'", 'max_length': '9'}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']"}),
'pqgroup': ('django.db.models.fields.CharField', [], {'default': "'-1/-1'", 'max_length': '32'}),
'qgroup': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'replica': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rusage': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'subvol_name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'storageadmin.smartattribute': {
'Meta': {'object_name': 'SMARTAttribute'},
'aid': ('django.db.models.fields.IntegerField', [], {}),
'atype': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'failed': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'flag': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'normed_value': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'raw_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'threshold': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'worst': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storageadmin.smartcapability': {
'Meta': {'object_name': 'SMARTCapability'},
'capabilities': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'flag': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'storageadmin.smarterrorlog': {
'Meta': {'object_name': 'SMARTErrorLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'line': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'storageadmin.smarterrorlogsummary': {
'Meta': {'object_name': 'SMARTErrorLogSummary'},
'details': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'error_num': ('django.db.models.fields.IntegerField', [], {}),
'etype': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'lifetime_hours': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'storageadmin.smartidentity': {
'Meta': {'object_name': 'SMARTIdentity'},
'assessment': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'ata_version': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'capacity': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'device_model': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'enabled': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'firmware_version': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_smartdb': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'model_family': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'rotation_rate': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'sata_version': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scanned_on': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'sector_size': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'serial_number': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'supported': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'world_wide_name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'storageadmin.smartinfo': {
'Meta': {'object_name': 'SMARTInfo'},
'disk': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Disk']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'storageadmin.smarttestlog': {
'Meta': {'object_name': 'SMARTTestLog'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'lba_of_first_error': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'lifetime_hours': ('django.db.models.fields.IntegerField', [], {}),
'pct_completed': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'test_num': ('django.db.models.fields.IntegerField', [], {})
},
'storageadmin.smarttestlogdetail': {
'Meta': {'object_name': 'SMARTTestLogDetail'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'line': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'storageadmin.snapshot': {
'Meta': {'unique_together': "(('share', 'name'),)", 'object_name': 'Snapshot'},
'eusage': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'qgroup': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'real_name': ('django.db.models.fields.CharField', [], {'default': "'unknownsnap'", 'max_length': '4096'}),
'rusage': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']"}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'snap_type': ('django.db.models.fields.CharField', [], {'default': "'admin'", 'max_length': '64'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uvisible': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'writable': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'storageadmin.supportcase': {
'Meta': {'object_name': 'SupportCase'},
'case_type': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'zipped_log': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'storageadmin.tlscertificate': {
'Meta': {'object_name': 'TLSCertificate'},
'certificate': ('django.db.models.fields.CharField', [], {'max_length': '12288', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '12288', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1024'})
},
'storageadmin.updatesubscription': {
'Meta': {'object_name': 'UpdateSubscription'},
'appliance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Appliance']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'storageadmin.user': {
'Meta': {'object_name': 'User'},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'gid': ('django.db.models.fields.IntegerField', [], {'default': '5000'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Group']", 'null': 'True', 'blank': 'True'}),
'homedir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True', 'blank': 'True'}),
'shell': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'smb_shares': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'admin_users'", 'null': 'True', 'to': "orm['storageadmin.SambaShare']"}),
'uid': ('django.db.models.fields.IntegerField', [], {'default': '5000'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'suser'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '4096'})
}
}
complete_apps = ['storageadmin'] | gpl-3.0 | -1,445,510,853,986,478,800 | 77.816029 | 277 | 0.543322 | false |
3dcauldron/repo-to-rename-2 | userapi/views.py | 1 | 2060 | from users.models import User
from django.http import Http404
from userapi.serializers import UserSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.permissions import IsAdminUser
#Created using the tutorial at http://www.django-rest-framework.org/#tutorial
class UserList(APIView):
"""
List all users, or create a new user.
"""
permission_classes = [IsAdminUser]
def get(self, request, format=None):
users = User.objects.all()
serializer = UserSerializer(users, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = UserSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
user = self.get_object(pk)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class UserDetail(APIView):
"""
Retrieve, update or delete a user instance.
"""
permission_classes = [IsAdminUser]
def get_object(self, pk):
try:
return User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
user = self.get_object(pk)
user = UserSerializer(user)
return Response(user.data)
def put(self, request, pk, format=None):
user = self.get_object(pk)
serializer = UserSerializer(user, data=request.DATA)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
user = self.get_object(pk)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| mit | 5,640,777,558,705,504,000 | 33.333333 | 78 | 0.668447 | false |
AtsushiSakai/PythonRobotics | PathPlanning/ProbabilisticRoadMap/probabilistic_road_map.py | 1 | 7382 | """
Probabilistic Road Map (PRM) Planner
author: Atsushi Sakai (@Atsushi_twi)
"""
import random
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import cKDTree
# parameter
N_SAMPLE = 500 # number of sample_points
N_KNN = 10 # number of edge from one sampled point
MAX_EDGE_LEN = 30.0 # [m] Maximum edge length
show_animation = True
class Node:
"""
Node class for dijkstra search
"""
def __init__(self, x, y, cost, parent_index):
self.x = x
self.y = y
self.cost = cost
self.parent_index = parent_index
def __str__(self):
return str(self.x) + "," + str(self.y) + "," +\
str(self.cost) + "," + str(self.parent_index)
def prm_planning(sx, sy, gx, gy, ox, oy, rr):
obstacle_kd_tree = cKDTree(np.vstack((ox, oy)).T)
sample_x, sample_y = sample_points(sx, sy, gx, gy,
rr, ox, oy, obstacle_kd_tree)
if show_animation:
plt.plot(sample_x, sample_y, ".b")
road_map = generate_road_map(sample_x, sample_y, rr, obstacle_kd_tree)
rx, ry = dijkstra_planning(
sx, sy, gx, gy, road_map, sample_x, sample_y)
return rx, ry
def is_collision(sx, sy, gx, gy, rr, obstacle_kd_tree):
x = sx
y = sy
dx = gx - sx
dy = gy - sy
yaw = math.atan2(gy - sy, gx - sx)
d = math.hypot(dx, dy)
if d >= MAX_EDGE_LEN:
return True
D = rr
n_step = round(d / D)
for i in range(n_step):
dist, _ = obstacle_kd_tree.query([x, y])
if dist <= rr:
return True # collision
x += D * math.cos(yaw)
y += D * math.sin(yaw)
# goal point check
dist, _ = obstacle_kd_tree.query([gx, gy])
if dist <= rr:
return True # collision
return False # OK
def generate_road_map(sample_x, sample_y, rr, obstacle_kd_tree):
"""
Road map generation
sample_x: [m] x positions of sampled points
sample_y: [m] y positions of sampled points
rr: Robot Radius[m]
obstacle_kd_tree: KDTree object of obstacles
"""
road_map = []
n_sample = len(sample_x)
sample_kd_tree = cKDTree(np.vstack((sample_x, sample_y)).T)
for (i, ix, iy) in zip(range(n_sample), sample_x, sample_y):
dists, indexes = sample_kd_tree.query([ix, iy], k=n_sample)
edge_id = []
for ii in range(1, len(indexes)):
nx = sample_x[indexes[ii]]
ny = sample_y[indexes[ii]]
if not is_collision(ix, iy, nx, ny, rr, obstacle_kd_tree):
edge_id.append(indexes[ii])
if len(edge_id) >= N_KNN:
break
road_map.append(edge_id)
# plot_road_map(road_map, sample_x, sample_y)
return road_map
def dijkstra_planning(sx, sy, gx, gy, road_map, sample_x, sample_y):
"""
s_x: start x position [m]
s_y: start y position [m]
gx: goal x position [m]
gy: goal y position [m]
ox: x position list of Obstacles [m]
oy: y position list of Obstacles [m]
rr: robot radius [m]
road_map: ??? [m]
sample_x: ??? [m]
sample_y: ??? [m]
@return: Two lists of path coordinates ([x1, x2, ...], [y1, y2, ...]), empty list when no path was found
"""
start_node = Node(sx, sy, 0.0, -1)
goal_node = Node(gx, gy, 0.0, -1)
open_set, closed_set = dict(), dict()
open_set[len(road_map) - 2] = start_node
path_found = True
while True:
if not open_set:
print("Cannot find path")
path_found = False
break
c_id = min(open_set, key=lambda o: open_set[o].cost)
current = open_set[c_id]
# show graph
if show_animation and len(closed_set.keys()) % 2 == 0:
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.plot(current.x, current.y, "xg")
plt.pause(0.001)
if c_id == (len(road_map) - 1):
print("goal is found!")
goal_node.parent_index = current.parent_index
goal_node.cost = current.cost
break
# Remove the item from the open set
del open_set[c_id]
# Add it to the closed set
closed_set[c_id] = current
# expand search grid based on motion model
for i in range(len(road_map[c_id])):
n_id = road_map[c_id][i]
dx = sample_x[n_id] - current.x
dy = sample_y[n_id] - current.y
d = math.hypot(dx, dy)
node = Node(sample_x[n_id], sample_y[n_id],
current.cost + d, c_id)
if n_id in closed_set:
continue
# Otherwise if it is already in the open set
if n_id in open_set:
if open_set[n_id].cost > node.cost:
open_set[n_id].cost = node.cost
open_set[n_id].parent_index = c_id
else:
open_set[n_id] = node
if path_found is False:
return [], []
# generate final course
rx, ry = [goal_node.x], [goal_node.y]
parent_index = goal_node.parent_index
while parent_index != -1:
n = closed_set[parent_index]
rx.append(n.x)
ry.append(n.y)
parent_index = n.parent_index
return rx, ry
def plot_road_map(road_map, sample_x, sample_y): # pragma: no cover
for i, _ in enumerate(road_map):
for ii in range(len(road_map[i])):
ind = road_map[i][ii]
plt.plot([sample_x[i], sample_x[ind]],
[sample_y[i], sample_y[ind]], "-k")
def sample_points(sx, sy, gx, gy, rr, ox, oy, obstacle_kd_tree):
max_x = max(ox)
max_y = max(oy)
min_x = min(ox)
min_y = min(oy)
sample_x, sample_y = [], []
while len(sample_x) <= N_SAMPLE:
tx = (random.random() * (max_x - min_x)) + min_x
ty = (random.random() * (max_y - min_y)) + min_y
dist, index = obstacle_kd_tree.query([tx, ty])
if dist >= rr:
sample_x.append(tx)
sample_y.append(ty)
sample_x.append(sx)
sample_y.append(sy)
sample_x.append(gx)
sample_y.append(gy)
return sample_x, sample_y
def main():
print(__file__ + " start!!")
# start and goal position
sx = 10.0 # [m]
sy = 10.0 # [m]
gx = 50.0 # [m]
gy = 50.0 # [m]
robot_size = 5.0 # [m]
ox = []
oy = []
for i in range(60):
ox.append(i)
oy.append(0.0)
for i in range(60):
ox.append(60.0)
oy.append(i)
for i in range(61):
ox.append(i)
oy.append(60.0)
for i in range(61):
ox.append(0.0)
oy.append(i)
for i in range(40):
ox.append(20.0)
oy.append(i)
for i in range(40):
ox.append(40.0)
oy.append(60.0 - i)
if show_animation:
plt.plot(ox, oy, ".k")
plt.plot(sx, sy, "^r")
plt.plot(gx, gy, "^c")
plt.grid(True)
plt.axis("equal")
rx, ry = prm_planning(sx, sy, gx, gy, ox, oy, robot_size)
assert rx, 'Cannot found path'
if show_animation:
plt.plot(rx, ry, "-r")
plt.pause(0.001)
plt.show()
if __name__ == '__main__':
main()
| mit | -103,944,456,182,888,080 | 24.108844 | 108 | 0.523029 | false |
whaleygeek/osmc | recipes/fence.py | 1 | 8116 | # fence.py 17/03/2015 D.J.Whale
#
# a Minecraft wrapper, that validates all coordinates around a 3D fence.
# coordinates inside the fence will delegate to the real minecraft interface.
# coordinates outside will call a buzz() method and squash the call.
#
# This is for building electric fences around sandboxed regions.
import mcpi.minecraft as minecraft
from mcpi.vec3 import Vec3
def trace(msg):
print("fence." + str(msg))
class Fence():
def __init__(self, x1, y1, z1, x2, y2, z2):
self.x1 = x1
self.y1 = y1
self.z1 = z1
self.x2 = x2
self.y2 = y2
self.z2 = z2
def isPointInside(self, x, y, z):
if x >= self.x1 and x <= self.x2 \
and y >= self.y1 and y <= self.y2 \
and z >= self.z1 and z <= self.z2:
return True # inside
return False # outside
def isSpikeInside(self, x, z):
return self.isPointInside(x, self.y1, z)
def isRegionInside(self, x1, y1, z1, x2, y2, z2):
if x1 < self.x1 or x1 > self.x2:
return False
if x2 < self.x1 or x2 > self.x2:
return False
if y1 < self.y1 or y1 > self.y2:
return False
if y2 < self.y1 or y2 > self.y2:
return False
if z1 < self.z1 or z1 > self.z2:
return False
if z2 < self.z1 or z2 > self.z2:
return False
return True # must all be inside
def checkPointOk(self, x, y, z):
if not self.isPointInside(x, y, z):
self.buzz()
def checkSpikeOk(self, x, z):
if not self.isSpikeInside(x, z):
self.buzz()
def checkRegionOk(self, x1, y1, z1, x2, y2, z2):
if not self.isRegionInside(x1, y1, z1, x2, y2, z2):
self.buzz()
def buzz(self, culprit=None):
if culprit == None:
coords = (self.x1, self.y1, self.z1, self.x2, self.y2, self.z2)
print("outside of fenced region:" + str(coords))
else:
print(str(culprit) + " outside of fenced region")
#TODO optionally throw exception
class FencedMinecraft():
def __init__(self, fence, inner):
self.fence = fence
self.inner = inner
#self.camera = FencedCamera(fence, inner.camera)
#self.entity = FencedEntity(fence, inner.entity)
self.player = FencedPlayer(fence, inner.player)
self.events = FencedEvents(fence, inner.events)
@staticmethod
def create(addr, x1, y1, z1, x2, y2, z2, inner=None):
trace("create")
fence = Fence(x1, y1, z1, x2, y2, z2)
if inner == None:
# statically bound at this point to the imported mcpi.minecraft
inner = minecraft.Minecraft.create(addr)
return FencedMinecraft(fence, inner)
def getBlock(self, *args):
"""Get block (x,y,z) => id:int"""
trace("getBlock:" + str(args))
x,y,z = args
if self.fence.checkPointInside(x,y,z):
return self.inner.getBlock(args)
else:
return None
def getBlockWithData(self, *args):
"""Get block with data (x,y,z) => Block"""
trace("getBlockWithData:" + str(args))
x,y,z = args
if self.checkPointInside(x,y,z):
return self.fence.getBlockWithData(args)
else:
return None
def getBlocks(self, *args):
"""Get a cuboid of blocks (x0,y0,z0,x1,y1,z1) => [id:int]"""
trace("getBlocks:" + str(args))
x1,y1,z1,x2,y2,z2 = args
if self.checkRegionOk(x1,y1,z1,x2,y2,z2):
return self.fence.getBlocks(args)
else:
return None
def setBlock(self, *args):
"""Set block (x,y,z,id,[data])"""
trace("setBlock:" + str(args))
x,y,z,b = args
if self.fence.checkPointOk(x,y,z):
self.inner.setBlock(args)
def setBlocks(self, *args):
"""Set a cuboid of blocks (x0,y0,z0,x1,y1,z1,id,[data])"""
trace("setBlocks:" + str(args))
x1,y1,z1,x2,y2,z2,data = args
if self.fence.checkRegionOk(x1,y1,z1,x2,y2,z2):
self.inner.setBlocks(args)
def getHeight(self, *args):
"""Get the height of the world (x,z) => int"""
trace("getHeight:" + str(args))
x,z = args
if self.fence.checkSpikeOk(x,z):
return self.inner.getHeight(args)
else:
return None
def getPlayerEntityIds(self):
"""Get the entity ids of the connected players => [id:int]"""
trace("getPlayerEntityIds")
return self.inner.getPlayerEntityIds()
#def saveCheckpoint(self):
# """Save a checkpoint that can be used for restoring the world"""
# pass
#def restoreCheckpoint(self):
# """Restore the world state to the checkpoint"""
# pass
def postToChat(self, msg):
"""Post a message to the game chat"""
trace("postToChat:" + str(msg))
self.inner.postToChat(msg)
#def setting(self, setting, status):
# """Set a world setting (setting, status). keys: world_immutable, nametags_visible"""
# pass
class FencedPlayer():
def __init__(self, fence, inner):
self.fence = fence
self.inner = inner
def getPos(self):
trace("player.getPos")
return inner.getPos()
def setPos(self, *args):
trace("player.setPos:" + str(args))
x,y,z = args
if self.fence.checkPointOk(x,y,z):
self.inner.setPos(args)
def getTilePos(self):
trace("player.getTilePos:" + str(args))
return self.inner.getTilePos()
def setTilePos(self, *args):
trace("setTilePos:" + str(args))
x,y,z = args
if self.fence.checkPointOk(x,y,z):
self.inner.setTilePos(args)
class FencedEvents:
def __init__(self, fence, inner):
self.fence = fence
self.inner = inner
def clearAll(self):
"""Clear all old events"""
trace("events.clearAll")
self.inner.clearAll()
def pollBlockHits(self):
"""Only triggered by sword => [BlockEvent]"""
trace("events.pollBlockHits")
#TODO work through list and filter out any out of range
return self.inner.pollBlockHits()
class FencedCamera:
def __init__(self, fence, inner):
self.fence = fence
self.inner = inner
def setNormal(self, *args):
"""Set camera mode to normal Minecraft view ([entityId])"""
self.inner.setNormal(args)
def setFixed(self):
"""Set camera mode to fixed view"""
self.inner.setFixed()
def setFollow(self, *args):
"""Set camera mode to follow an entity ([entityId])"""
self.inner.setFollow(args)
def setPos(self, *args):
"""Set camera entity position (x,y,z)"""
x,y,z = args
if self.fence.checkPointOk(x,y,z):
self.inner.setPos(args)
# TEST HARNESS
def test():
"""Test that the fencing works"""
mc = FencedMinecraft.create("localhost", 10,10,10,20,20,20)
print("\n")
print("try:outside of fence")
mc.player.setTilePos(100, 100, 100) # should buzz
print("\n")
print("try:inside of fence")
mc.player.setTilePos(11, 11, 11) # should not buzz
print("\n")
print("try:outside of fence")
mc.setBlock(100, 100, 100, 1) # should buzz
print("\n")
print("try:inside of fence")
mc.setBlock(11, 11, 11, 1) # should not buzz
print("\n")
print("try:outside of fence")
mc.setBlocks(100, 100, 100, 110, 110, 100, 1) # should buzz
print("\n")
print("try:inside of fence")
mc.setBlocks(10, 10, 10, 12, 12, 12, 1) # should buzz
print("\n")
print("try:overlapping fence")
mc.setBlocks(8, 8, 8, 12, 12, 12, 1) # should buzz
print("\n")
# Only run the test code if this file is the main program
if __name__ == "__main__":
test()
# END
| mit | 4,879,673,079,171,629,000 | 25.697368 | 93 | 0.559142 | false |
coteyr/home-assistant | tests/components/test_mqtt.py | 2 | 10552 | """
tests.components.test_mqtt
~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests MQTT component.
"""
from collections import namedtuple
import unittest
from unittest import mock
import socket
import homeassistant.components.mqtt as mqtt
from homeassistant.const import (
EVENT_CALL_SERVICE, ATTR_DOMAIN, ATTR_SERVICE, EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP)
from tests.common import (
get_test_home_assistant, mock_mqtt_component, fire_mqtt_message)
class TestMQTT(unittest.TestCase):
""" Test the MQTT module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = get_test_home_assistant(1)
mock_mqtt_component(self.hass)
self.calls = []
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def record_calls(self, *args):
self.calls.append(args)
def test_client_starts_on_home_assistant_start(self):
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
self.hass.pool.block_till_done()
self.assertTrue(mqtt.MQTT_CLIENT.start.called)
def test_client_stops_on_home_assistant_start(self):
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
self.hass.pool.block_till_done()
self.hass.bus.fire(EVENT_HOMEASSISTANT_STOP)
self.hass.pool.block_till_done()
self.assertTrue(mqtt.MQTT_CLIENT.stop.called)
def test_setup_fails_if_no_broker_config(self):
self.assertFalse(mqtt.setup(self.hass, {mqtt.DOMAIN: {}}))
def test_setup_fails_if_no_connect_broker(self):
with mock.patch('homeassistant.components.mqtt.MQTT',
side_effect=socket.error()):
self.assertFalse(mqtt.setup(self.hass, {mqtt.DOMAIN: {
mqtt.CONF_BROKER: 'test-broker',
}}))
def test_publish_calls_service(self):
self.hass.bus.listen_once(EVENT_CALL_SERVICE, self.record_calls)
mqtt.publish(self.hass, 'test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(
'test-topic',
self.calls[0][0].data['service_data'][mqtt.ATTR_TOPIC])
self.assertEqual(
'test-payload',
self.calls[0][0].data['service_data'][mqtt.ATTR_PAYLOAD])
def test_service_call_without_topic_does_not_publish(self):
self.hass.bus.fire(EVENT_CALL_SERVICE, {
ATTR_DOMAIN: mqtt.DOMAIN,
ATTR_SERVICE: mqtt.SERVICE_PUBLISH
})
self.hass.pool.block_till_done()
self.assertTrue(not mqtt.MQTT_CLIENT.publish.called)
def test_service_call_with_template_payload_renders_template(self):
"""
If 'payload_template' is provided and 'payload' is not, then render it.
"""
mqtt.publish_template(self.hass, "test/topic", "{{ 1+1 }}")
self.hass.pool.block_till_done()
self.assertTrue(mqtt.MQTT_CLIENT.publish.called)
self.assertEqual(mqtt.MQTT_CLIENT.publish.call_args[0][1], "2")
def test_service_call_with_payload_doesnt_render_template(self):
"""
If a 'payload' is provided then use that instead of 'payload_template'.
"""
payload = "not a template"
payload_template = "a template"
# Call the service directly because the helper functions don't allow
# you to provide payload AND payload_template.
self.hass.services.call(mqtt.DOMAIN, mqtt.SERVICE_PUBLISH, {
mqtt.ATTR_TOPIC: "test/topic",
mqtt.ATTR_PAYLOAD: payload,
mqtt.ATTR_PAYLOAD_TEMPLATE: payload_template
}, blocking=True)
self.assertTrue(mqtt.MQTT_CLIENT.publish.called)
self.assertEqual(mqtt.MQTT_CLIENT.publish.call_args[0][1], payload)
def test_service_call_without_payload_or_payload_template(self):
"""
If neither 'payload' or 'payload_template' is provided then fail.
"""
# Call the service directly because the helper functions require you to
# provide a payload.
self.hass.services.call(mqtt.DOMAIN, mqtt.SERVICE_PUBLISH, {
mqtt.ATTR_TOPIC: "test/topic"
}, blocking=True)
self.assertFalse(mqtt.MQTT_CLIENT.publish.called)
def test_subscribe_topic(self):
mqtt.subscribe(self.hass, 'test-topic', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_not_match(self):
mqtt.subscribe(self.hass, 'test-topic', self.record_calls)
fire_mqtt_message(self.hass, 'another-test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
def test_subscribe_topic_level_wildcard(self):
mqtt.subscribe(self.hass, 'test-topic/+/on', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier/on', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic/bier/on', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_level_wildcard_no_subtree_match(self):
mqtt.subscribe(self.hass, 'test-topic/+/on', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
def test_subscribe_topic_subtree_wildcard_subtree_topic(self):
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier/on', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic/bier/on', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_subtree_wildcard_root_topic(self):
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_subtree_wildcard_no_match(self):
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'another-test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
class TestMQTTCallbacks(unittest.TestCase):
""" Test the MQTT callbacks. """
def setUp(self): # pylint: disable=invalid-name
self.hass = get_test_home_assistant(1)
# mock_mqtt_component(self.hass)
with mock.patch('paho.mqtt.client.Client'):
mqtt.setup(self.hass, {
mqtt.DOMAIN: {
mqtt.CONF_BROKER: 'mock-broker',
}
})
self.hass.config.components.append(mqtt.DOMAIN)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_receiving_mqtt_message_fires_hass_event(self):
calls = []
def record(event):
calls.append(event)
self.hass.bus.listen_once(mqtt.EVENT_MQTT_MESSAGE_RECEIVED, record)
MQTTMessage = namedtuple('MQTTMessage', ['topic', 'qos', 'payload'])
message = MQTTMessage('test_topic', 1, 'Hello World!'.encode('utf-8'))
mqtt.MQTT_CLIENT._mqtt_on_message(None, {'hass': self.hass}, message)
self.hass.pool.block_till_done()
self.assertEqual(1, len(calls))
last_event = calls[0]
self.assertEqual('Hello World!', last_event.data['payload'])
self.assertEqual(message.topic, last_event.data['topic'])
self.assertEqual(message.qos, last_event.data['qos'])
def test_mqtt_failed_connection_results_in_disconnect(self):
for result_code in range(1, 6):
mqtt.MQTT_CLIENT._mqttc = mock.MagicMock()
mqtt.MQTT_CLIENT._mqtt_on_connect(None, {'topics': {}}, 0,
result_code)
self.assertTrue(mqtt.MQTT_CLIENT._mqttc.disconnect.called)
def test_mqtt_subscribes_topics_on_connect(self):
from collections import OrderedDict
prev_topics = OrderedDict()
prev_topics['topic/test'] = 1,
prev_topics['home/sensor'] = 2,
prev_topics['still/pending'] = None
mqtt.MQTT_CLIENT.topics = prev_topics
mqtt.MQTT_CLIENT.progress = {1: 'still/pending'}
# Return values for subscribe calls (rc, mid)
mqtt.MQTT_CLIENT._mqttc.subscribe.side_effect = ((0, 2), (0, 3))
mqtt.MQTT_CLIENT._mqtt_on_connect(None, None, 0, 0)
self.assertFalse(mqtt.MQTT_CLIENT._mqttc.disconnect.called)
expected = [(topic, qos) for topic, qos in prev_topics.items()
if qos is not None]
self.assertEqual(
expected,
[call[1] for call in mqtt.MQTT_CLIENT._mqttc.subscribe.mock_calls])
self.assertEqual({
1: 'still/pending',
2: 'topic/test',
3: 'home/sensor',
}, mqtt.MQTT_CLIENT.progress)
def test_mqtt_disconnect_tries_no_reconnect_on_stop(self):
mqtt.MQTT_CLIENT._mqtt_on_disconnect(None, None, 0)
self.assertFalse(mqtt.MQTT_CLIENT._mqttc.reconnect.called)
@mock.patch('homeassistant.components.mqtt.time.sleep')
def test_mqtt_disconnect_tries_reconnect(self, mock_sleep):
mqtt.MQTT_CLIENT.topics = {
'test/topic': 1,
'test/progress': None
}
mqtt.MQTT_CLIENT.progress = {
1: 'test/progress'
}
mqtt.MQTT_CLIENT._mqttc.reconnect.side_effect = [1, 1, 1, 0]
mqtt.MQTT_CLIENT._mqtt_on_disconnect(None, None, 1)
self.assertTrue(mqtt.MQTT_CLIENT._mqttc.reconnect.called)
self.assertEqual(4, len(mqtt.MQTT_CLIENT._mqttc.reconnect.mock_calls))
self.assertEqual([1, 2, 4],
[call[1][0] for call in mock_sleep.mock_calls])
self.assertEqual({'test/topic': 1}, mqtt.MQTT_CLIENT.topics)
self.assertEqual({}, mqtt.MQTT_CLIENT.progress)
| mit | 8,665,669,149,904,865,000 | 37.510949 | 79 | 0.625379 | false |
google-research/falken | service/learner/brains/saved_model_to_tflite_model_test.py | 1 | 13046 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Test conversion from TensorFlow SavedModel to TFLite model."""
import collections
import tempfile
import typing
from absl.testing import absltest
from learner import test_data
from learner.brains import networks
from learner.brains import saved_model_to_tflite_model
from learner.brains import tfa_specs
import tensorflow as tf
from tf_agents.agents.behavioral_cloning import behavioral_cloning_agent
from tf_agents.policies import policy_saver
from tf_agents.trajectories import time_step as ts
# pylint: disable=g-bad-import-order
import common.generate_flatbuffers # pylint: disable=unused-import
from tflite import Model
from tflite import Tensor
from tflite import TensorType
class MockTensor(tf.Tensor):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
MockTensorSpec = collections.namedtuple('MockTensorSpec', ['name'])
MockFunction = collections.namedtuple(
'MockFunction', ['inputs', 'structured_input_signature',
'outputs', 'structured_outputs'])
class SavedModelToTFLiteModelTest(absltest.TestCase):
"""Test saved_model_to_tflite_model."""
def save_model(self) -> str:
"""Create and save a model.
Returns:
Path to the directory containing the saved model.
"""
saved_model_path = tempfile.TemporaryDirectory().name
brain_spec = tfa_specs.BrainSpec(test_data.brain_spec())
agent = behavioral_cloning_agent.BehavioralCloningAgent(
ts.time_step_spec(brain_spec.observation_spec.tfa_spec),
brain_spec.action_spec.tfa_spec,
cloning_network=networks.FalkenNetwork(
brain_spec, {
'dropout': None,
'fc_layers': [32],
'feelers_version': 'v1'
}),
optimizer=None,
loss_fn=lambda *unused_args: None)
agent.initialize()
_ = agent.policy.variables()
policy_saver.PolicySaver(agent.policy, batch_size=1).save(saved_model_path)
return saved_model_path
def test_rename_tflite_tensors(self):
"""Test patching TF Lite FlatBuffer Tensors with a new names."""
tensor0 = Tensor.TensorT()
tensor0.name = 'foo_bar_0'.encode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING)
tensor1 = Tensor.TensorT()
tensor1.name = 'bar_0_baz'.encode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING)
tensor2 = Tensor.TensorT()
tensor2.name = 'bar_1_baz'.encode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING)
saved_model_to_tflite_model._rename_tflite_tensors(
[tensor0, tensor1, tensor2], [0, 2],
{'foo_bar_0': '0/foo/bar/0',
'bar_1_baz': '0/bar/1/baz'})
self.assertEqual(tensor0.name.decode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING), '0/foo/bar/0')
self.assertEqual(tensor1.name.decode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING), 'bar_0_baz')
self.assertEqual(tensor2.name.decode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING), '0/bar/1/baz')
def test_tf_tensor_name_to_tflite_name(self):
"""Test converting TF tensor names to TF lite tensor names."""
self.assertEqual(
saved_model_to_tflite_model._tf_tensor_name_to_tflite_name('foo_bar:0'),
'foo_bar')
self.assertEqual(
saved_model_to_tflite_model._tf_tensor_name_to_tflite_name('bar_baz:1'),
'bar_baz')
self.assertEqual(
saved_model_to_tflite_model._tf_tensor_name_to_tflite_name('a_tensor'),
'a_tensor')
def test_tf_tensor_spec_name_to_tensor_name(self):
"""Test converting TF tensor spec names to tensor argument names."""
self.assertEqual(
saved_model_to_tflite_model._tf_tensor_spec_name_to_tensor_name(
'0/foo/Bar/1/bazNumber'), 'foo_bar_1_baznumber')
self.assertEqual(
saved_model_to_tflite_model._tf_tensor_spec_name_to_tensor_name(
'magic/Stuff'), 'magic_stuff')
def test_create_tflite_to_tf_tensor_name_map(self):
"""Test creating a map of TF Lite to TF tensor spec name."""
input_map, output_map = (
saved_model_to_tflite_model._create_tflite_to_tf_tensor_name_map(
MockFunction(
inputs=[MockTensor('foo_0_bar:0'),
MockTensor('bish_0_bosh:0')],
structured_input_signature=(
[MockTensorSpec('0/foo/0/bar')],
{'0/bish/0/bosh': MockTensorSpec('0/bish/0/bosh')}
),
outputs=[MockTensor('identity_0:0'),
MockTensor('random_1:0')],
structured_outputs={
'action': {'turn_key': MockTensor('action/turn_key'),
'open_door': MockTensor('action/open_door')}})))
self.assertEqual(input_map, {'foo_0_bar': '0/foo/0/bar',
'bish_0_bosh': '0/bish/0/bosh'})
self.assertEqual(output_map, {'identity_0': 'action/open_door',
'random_1': 'action/turn_key'})
def test_create_tflite_to_tf_tensor_name_map_broken_function(self):
"""Fail with mismatched tensor spec to tensor name."""
with self.assertRaises(AssertionError):
saved_model_to_tflite_model._create_tflite_to_tf_tensor_name_map(
MockFunction(
inputs=[MockTensor('foo_0_bar'),
MockTensor('bish_0_bosh:0')],
structured_input_signature=(
[MockTensorSpec('0/foo/0/bar')],
{'0/bish/0/bosh': MockTensorSpec('0/bish/0/bosh')}
),
outputs=[], structured_outputs=[]))
def test_convert_saved_model(self):
"""Convert a saved model to TF Lite model."""
# Convert to a TFLite FlatBuffer.
tflite_flatbuffer = saved_model_to_tflite_model.convert(
self.save_model(), ['action'])
model = Model.ModelT.InitFromObj(
Model.Model.GetRootAsModel(tflite_flatbuffer, 0))
self.assertLen(model.subgraphs, 1)
subgraph = model.subgraphs[0]
inputs_and_outputs = []
for i in list(subgraph.inputs) + list(subgraph.outputs):
tensor = subgraph.tensors[i]
shape = tensor.shapeSignature if tensor.shapeSignature else tensor.shape
inputs_and_outputs.append((
tensor.name.decode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING),
tensor.type, repr([d for d in shape])))
self.assertCountEqual(
inputs_and_outputs,
[('0/discount',
TensorType.TensorType.FLOAT32, '[1]'),
('0/observation/global_entities/0/position',
TensorType.TensorType.FLOAT32, '[1, 3]'),
('0/observation/global_entities/0/rotation',
TensorType.TensorType.FLOAT32, '[1, 4]'),
('0/observation/global_entities/1/position',
TensorType.TensorType.FLOAT32, '[1, 3]'),
('0/observation/global_entities/1/rotation',
TensorType.TensorType.FLOAT32, '[1, 4]'),
('0/observation/global_entities/2/drink',
TensorType.TensorType.INT32, '[1, 1]'),
('0/observation/global_entities/2/evilness',
TensorType.TensorType.FLOAT32, '[1, 1]'),
('0/observation/global_entities/2/position',
TensorType.TensorType.FLOAT32, '[1, 3]'),
('0/observation/global_entities/2/rotation',
TensorType.TensorType.FLOAT32, '[1, 4]'),
('0/observation/player/health',
TensorType.TensorType.FLOAT32, '[1, 1]'),
('0/observation/player/position',
TensorType.TensorType.FLOAT32, '[1, 3]'),
('0/observation/player/rotation',
TensorType.TensorType.FLOAT32, '[1, 4]'),
('0/observation/player/feeler',
TensorType.TensorType.FLOAT32, '[1, 3, 2]'),
('0/observation/camera/position',
TensorType.TensorType.FLOAT32, '[1, 3]'),
('0/observation/camera/rotation',
TensorType.TensorType.FLOAT32, '[1, 4]'),
('0/reward',
TensorType.TensorType.FLOAT32, '[1]'),
('0/step_type',
TensorType.TensorType.INT32, '[1]'),
('action/switch_weapon',
TensorType.TensorType.INT32, '[1, 1]'),
('action/throttle',
TensorType.TensorType.FLOAT32, '[1, 1]'),
('action/joy_pitch_yaw',
TensorType.TensorType.FLOAT32, '[1, 2]'),
('action/joy_xz',
TensorType.TensorType.FLOAT32, '[1, 2]'),
('action/joy_xz_world',
TensorType.TensorType.FLOAT32, '[1, 2]'),
])
def test_verify_function_output_order(self):
"""Verify the outputs of a tf.function.ConcreteFunction are sorted."""
# Outputs of tf_agents policies are dictionaries with each key indicating
# the name of the output (action) spec for the agent.
# http://cs/piper///depot/google3/third_party/py/tf_agents/policies/\
# policy_saver.py;l=603;rcl=314434347
# This dictionary (which is unsorted) is flattened and sorted when
# serialized by tf.function.ConcreteFunction._build_call_outputs().
# Expected input mapping from TFLite to TF inputs of the Lout module.
expected_input_map = {'player_drink_booze': 'player/drink/booze',
'player_drink_bubbles': 'player/drink/bubbles',
'player_drink_water': 'player/drink/water'}
# Test with all 2^N combinations of the following outputs.
outputs = ['action/player/chestpuff', 'action/player/bowl',
'action/player/stride', 'action/player/bluff',
'action/player/swing']
combinations = 2 ** len(outputs)
for combination in range(1, combinations):
selected = set()
offset = 0
bits = combination
while bits:
if bits & 1:
selected.add(outputs[offset])
offset += 1
bits >>= 1
class Lout(tf.Module):
"""Test module that provides a signature to serialize."""
def __init__(self, output_names: typing.Iterable[str], **kwargs):
"""Initialize the module to generate the specified set of outputs.
Args:
output_names: Set of outputs to generate.
**kwargs: Passed to tf.Module's initializer.
"""
self._output_names = tf.constant(list(output_names))
super(Lout, self).__init__(**kwargs)
@tf.function(input_signature=[
tf.TensorSpec((1,), dtype=tf.float32, name='player/drink/booze'),
tf.TensorSpec((1,), dtype=tf.float32, name='player/drink/water'),
tf.TensorSpec((1,), dtype=tf.int32, name='player/drink/bubbles'),
])
def __call__(self, ethanol, h2o,
carbonation) -> typing.Iterable[tf.Tensor]:
"""Generate a set of outputs.
The values of the outputs are _not_ used by the test case.
Args:
ethanol: Increases value of outputs.
h2o: Dilutes value of outputs.
carbonation: Modulates value of outputs.
Returns:
Dictionary of outputs whose names match _output_names.
"""
output_tensors = {}
for index, output in enumerate(self._output_names.numpy()):
output_tensors[output.decode()] = tf.identity(
(ethanol - h2o) * float(index % carbonation), name=output)
return output_tensors
# Call the function at least once to instance.
lout = Lout(selected, name=f'Lout{combination}')
_ = lout(tf.constant([0.2]), tf.constant([0.5]), tf.constant([5]))
with tempfile.TemporaryDirectory() as temp_name:
# Save and load the model to retrieve the graph signature.
tf.saved_model.save(lout, temp_name)
tf.keras.backend.clear_session()
model = tf.saved_model.load(temp_name)
graph = model.signatures['serving_default']
input_map, output_map = (
saved_model_to_tflite_model._create_tflite_to_tf_tensor_name_map(
graph))
# Output tensors should be sorted by tensor name.
expected_output_map = {}
for index, output in enumerate(sorted(selected)):
expected_output_map[f'Identity_{index}'
if index > 0 else 'Identity'] = output
self.assertCountEqual(input_map, expected_input_map)
self.assertCountEqual(output_map, expected_output_map)
if __name__ == '__main__':
absltest.main()
| apache-2.0 | -3,998,864,741,181,076,000 | 40.415873 | 80 | 0.620803 | false |
oshtaier/robottelo | tests/foreman/cli/test_gpgkey.py | 1 | 46636 | # -*- encoding: utf-8 -*-
# vim: ts=4 sw=4 expandtab ai
# pylint: disable=R0904
"""Test class for GPG Key CLI"""
from ddt import ddt
from fauxfactory import gen_string, gen_alphanumeric, gen_integer
from robottelo.cli.factory import CLIFactoryError, make_gpg_key, make_org
from robottelo.cli.gpgkey import GPGKey
from robottelo.cli.org import Org
from robottelo.common import ssh
from robottelo.common.constants import VALID_GPG_KEY_FILE
from robottelo.common.decorators import (
data, run_only_on, skip_if_bug_open, stubbed)
from robottelo.common.helpers import get_data_file
from tempfile import mkstemp
from robottelo.test import CLITestCase
VALID_GPG_KEY_FILE_PATH = get_data_file(VALID_GPG_KEY_FILE)
def positive_create_data():
"""Random data for positive creation"""
return (
{'name': gen_string("latin1", 10)},
{'name': gen_string("utf8", 10)},
{'name': gen_string("alpha", 10)},
{'name': gen_string("alphanumeric", 10)},
{'name': gen_string("numeric", 20)},
{'name': gen_string("html", 10)},
)
def negative_create_data():
"""Random data for negative creation"""
return (
{'name': ' '},
{'name': gen_string('alpha', 300)},
{'name': gen_string('numeric', 300)},
{'name': gen_string('alphanumeric', 300)},
{'name': gen_string('utf8', 300)},
{'name': gen_string('latin1', 300)},
{'name': gen_string('html', 300)},
)
@run_only_on('sat')
@ddt
class TestGPGKey(CLITestCase):
"""Tests for GPG Keys via Hammer CLI"""
search_key = 'name'
@classmethod
def setUpClass(cls): # noqa
"""Create a shared organization for all tests to avoid generating
hundreds of organizations
"""
CLITestCase.setUpClass()
cls.org = make_org(cached=True)
def create_gpg_key_file(self, content=None):
"""Creates a fake GPG Key file and returns its path or None if an error
happens.
"""
(file_handle, key_filename) = mkstemp(text=True)
if not content:
content = gen_alphanumeric(gen_integer(20, 50))
with open(key_filename, "w") as gpg_key_file:
gpg_key_file.write(content)
return key_filename
return None
# Bug verification
@skip_if_bug_open('redmine', 4271)
def test_redmine_4271(self):
"""@Test: cvs output for gpg subcommand doesn\'t work
@Feature: GPG Keys
@Assert: cvs output for gpg info works
@BZ: Redmine#4271
"""
# GPG Key data
data = {'name': gen_string("alpha", 10)}
data['organization-id'] = self.org['id']
# Setup a new key file
data['key'] = VALID_GPG_KEY_FILE_PATH
try:
new_obj = make_gpg_key(data)
except CLIFactoryError as err:
self.fail(err)
# Can we find the new object?
result = GPGKey().info(
{'id': new_obj['id']}
)
self.assertEqual(result.return_code, 0,
"Failed to get object information")
self.assertEqual(
len(result.stderr), 0, "There should not be an exception here")
self.assertEqual(
new_obj[self.search_key], result.stdout[self.search_key])
@skip_if_bug_open('redmine', 4272)
def test_redmine_4272(self):
"""@Test: gpg info should display key content
@Feature: GPG Keys
@Assert: gpg info should display key content
@BZ: Redmine#4272
"""
# GPG Key data
data = {'name': gen_string("alpha", 10)}
data['organization-id'] = self.org['id']
# Setup a new key file
content = gen_alphanumeric()
gpg_key = self.create_gpg_key_file(content=content)
self.assertIsNotNone(gpg_key, 'GPG Key file must be created')
data['key'] = gpg_key
try:
new_obj = make_gpg_key(data)
except CLIFactoryError as err:
self.fail(err)
# Can we find the new object?
result = GPGKey().info(
{'id': new_obj['id']}
)
self.assertEqual(result.return_code, 0,
"Failed to get object information")
self.assertEqual(
len(result.stderr), 0, "There should not be an exception here")
self.assertEqual(
result.stdout['content'], content)
@skip_if_bug_open('bugzilla', 1108227)
def test_bugzilla_1108227(self):
"""@Test: Hammer fails to get a gpg info by name
@Feature: GPG Keys
@Assert: can get gpg key info by name
@BZ: 1108227
"""
# GPG Key data
data = {'name': gen_string("alpha", 10)}
data['organization-id'] = self.org['id']
# Setup a new key file
data['key'] = VALID_GPG_KEY_FILE_PATH
try:
new_obj = make_gpg_key(data)
except CLIFactoryError as err:
self.fail(err)
# Can we find the new object?
result = GPGKey().info({
'name': new_obj['name'],
'organization-id': self.org['id'],
})
self.assertEqual(result.return_code, 0, "Failed to create object")
self.assertEqual(
len(result.stderr), 0, "There should not be an exception here")
self.assertEqual(
new_obj[self.search_key], result.stdout[self.search_key])
# Positive Create
@skip_if_bug_open('bugzilla', 1172009)
@data(*positive_create_data())
def test_positive_create_1(self, data):
"""@test: Create gpg key with valid name and valid gpg key via file
import using the default created organization
@feature: GPG Keys
@assert: gpg key is created
@BZ: 1172009
"""
result = Org.list()
self.assertGreater(len(result.stdout), 0, 'No organization found')
org = result.stdout[0]
# Setup data to pass to the factory
data = data.copy()
data['key'] = VALID_GPG_KEY_FILE_PATH
data['organization-id'] = org['id']
try:
new_obj = make_gpg_key(data)
except CLIFactoryError as err:
self.fail(err)
# Can we find the new object?
result = GPGKey().exists(
{'organization-id': org['id']},
(self.search_key, new_obj[self.search_key])
)
self.assertEqual(result.return_code, 0, "Failed to create object")
self.assertEqual(
len(result.stderr), 0, "There should not be an exception here")
self.assertEqual(
new_obj[self.search_key], result.stdout[self.search_key])
@skip_if_bug_open('bugzilla', 1172009)
@data(*positive_create_data())
def test_positive_create_2(self, data):
"""@test: Create gpg key with valid name and valid gpg key via file
import using the a new organization
@feature: GPG Keys
@assert: gpg key is created
@BZ: 1172009
"""
# Setup data to pass to the factory
data = data.copy()
data['key'] = VALID_GPG_KEY_FILE_PATH
data['organization-id'] = self.org['id']
try:
new_obj = make_gpg_key(data)
except CLIFactoryError as err:
self.fail(err)
# Can we find the new object?
result = GPGKey().exists(
{'organization-id': self.org['id']},
(self.search_key, new_obj[self.search_key])
)
self.assertEqual(result.return_code, 0, "Failed to create object")
self.assertEqual(
len(result.stderr), 0, "There should not be an exception here")
self.assertEqual(
new_obj[self.search_key], result.stdout[self.search_key])
# Negative Create
@skip_if_bug_open('bugzilla', 1172009)
@data(*positive_create_data())
def test_negative_create_1(self, data):
"""@test: Create gpg key with valid name and valid gpg key via file
import then try to create new one with same name
@feature: GPG Keys
@assert: gpg key is not created
@BZ: 1172009
"""
# Setup data to pass to the factory
data = data.copy()
data['organization-id'] = self.org['id']
try:
new_obj = make_gpg_key(data)
except CLIFactoryError as err:
self.fail(err)
# Can we find the new object?
result = GPGKey().exists(
{'organization-id': self.org['id']},
(self.search_key, new_obj[self.search_key])
)
self.assertEqual(result.return_code, 0, "Failed to create object")
self.assertEqual(
len(result.stderr), 0, "There should not be an exception here")
self.assertEqual(
new_obj[self.search_key], result.stdout[self.search_key])
# Setup a new key file
data['key'] = '/tmp/%s' % gen_alphanumeric()
gpg_key = self.create_gpg_key_file()
self.assertIsNotNone(gpg_key, 'GPG Key file must be created')
ssh.upload_file(local_file=gpg_key, remote_file=data['key'])
# Try to create a gpg key with the same name
new_obj = GPGKey().create(data)
self.assertNotEqual(
new_obj.return_code, 0, "Object should not be created")
self.assertGreater(
len(new_obj.stderr), 0, "Should have raised an exception")
@data(*positive_create_data())
def test_negative_create_2(self, data):
"""@test: Create gpg key with valid name and no gpg key
@feature: GPG Keys
@assert: gpg key is not created
"""
# Setup data to pass to create
data = data.copy()
data['organization-id'] = self.org['id']
# Try to create a new object passing @data to factory method
new_obj = GPGKey().create(data)
self.assertNotEqual(
new_obj.return_code, 0, "Object should not be created")
self.assertGreater(
len(new_obj.stderr), 0, "Should have raised an exception")
@data(*negative_create_data())
def test_negative_create_3(self, data):
"""@test: Create gpg key with invalid name and valid gpg key via
file import
@feature: GPG Keys
@assert: gpg key is not created
"""
# Setup data to pass to create
data = data.copy()
data['key'] = '/tmp/%s' % gen_alphanumeric()
data['organization-id'] = self.org['id']
ssh.upload_file(
local_file=VALID_GPG_KEY_FILE_PATH, remote_file=data['key'])
# Try to create a new object passing @data to factory method
new_obj = GPGKey().create(data)
self.assertNotEqual(
new_obj.return_code, 0, "Object should not be created")
self.assertGreater(
len(new_obj.stderr), 0, "Should have raised an exception")
# Positive Delete
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_positive_delete_1(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then delete it
@feature: GPG Keys
@assert: gpg key is deleted
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key text is valid text from a valid gpg key file
"""
@stubbed()
def test_positive_delete_2(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then delete it
@feature: GPG Keys
@assert: gpg key is deleted
@status: manual
"""
pass
# Negative Delete
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
delete using a negative gpg key ID
delete using a random string as the gpg key ID
"""
@stubbed()
def test_negative_delete_1(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then fail to delete it
@feature: GPG Keys
@assert: gpg key is not deleted
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key text is valid text from a valid gpg key file
delete using a negative gpg key ID
delete using a random string as the gpg key ID
"""
@stubbed()
def test_negative_delete_2(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then fail to delete it
@feature: GPG Keys
@assert: gpg key is not deleted
@status: manual
"""
pass
# Positive Update
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_positive_update_1(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then update its name
@feature: GPG Keys
@assert: gpg key is updated
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_positive_update_2(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then update its gpg key file
@feature: GPG Keys
@assert: gpg key is updated
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key text is valid text from a valid gpg key file
"""
@stubbed()
def test_positive_update_3(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then update its name
@feature: GPG Keys
@assert: gpg key is updated
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key text is valid text from a valid gpg key file
"""
@stubbed()
def test_positive_update_4(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then update its gpg key text
@feature: GPG Keys
@assert: gpg key is updated
@status: manual
"""
pass
# Negative Update
"""DATADRIVENGOESHERE
update name is blank
update name is alpha 300 characters long
update name is numeric 300 characters long
update name is alphanumeric 300 characters long
update name is utf-8 300 characters long
update name is latin1 300 characters long
update name is html 300 characters long
gpg key file is valid always
"""
@stubbed()
def test_negative_update_1(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then fail to update its name
@feature: GPG Keys
@assert: gpg key is not updated
@status: manual
"""
pass
"""DATADRIVENGOESHERE
update name is blank
update name is alpha 300 characters long
update name is numeric 300 characters long
update name is alphanumeric 300 characters long
update name is utf-8 300 characters long
update name is latin1 300 characters long
update name is html 300 characters long
gpg key text is valid text from a valid gpg key file
"""
@stubbed()
def test_negative_update_2(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then fail to update its name
@feature: GPG Keys
@assert: gpg key is not updated
@status: manual
"""
pass
# Product association
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_1(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it with empty (no repos) custom product
@feature: GPG Keys
@assert: gpg key is associated with product
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_2(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it with custom product that has one repository
@feature: GPG Keys
@assert: gpg key is associated with product but not the repository
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_3(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it with custom product that has more than one
repository
@feature: GPG Keys
@assert: gpg key is associated with product but not the repositories
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_4(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it with custom product using Repo discovery
method
@feature: GPG Keys
@assert: gpg key is associated with product but not the repositories
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_5(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it to repository from custom product that has
one repository
@feature: GPG Keys
@assert: gpg key is associated with product and the repository
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_6(self):
"""@test: Create gpg key via file import and associate with custom repo
GPGKey should contain valid name and valid key and should be associated
to one repository from custom product. Make sure custom product should
have more than one repository.
@feature: GPG Keys
@assert: gpg key is associated with the repository
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_7(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it to repos from custom product using Repo
discovery method
@feature: GPG Keys
@assert: gpg key is associated with product and all the repositories
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_8(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it with empty (no repos) custom product then
update the key
@feature: GPG Keys
@assert: gpg key is associated with product before/after update
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_9(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it with custom product that has one repository
then update the key
@feature: GPG Keys
@assert: gpg key is associated with product before/after update but
not the repository
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_10(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it with custom product that has more than one
repository then update the key
@feature: GPG Keys
@assert: gpg key is associated with product before/after update but
not the repositories
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_11(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it with custom product using Repo discovery
method then update the key
@feature: GPG Keys
@assert: gpg key is associated with product before/after update but
not the repositories
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_12(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it to repository from custom product that has
one repository then update the key
@feature: GPG Keys
@assert: gpg key is associated with product and repository
before/after update
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_13(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it to repository from custom product that has
more than one repository then update the key
@feature: GPG Keys
@assert: gpg key is associated with product and single repository
before/after update
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_14(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it to repos from custom product using Repo
discovery method then update the key
@feature: GPG Keys
@assert: gpg key is associated with product and all repositories
before/after update
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_15(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it with empty (no repos) custom product
then delete it
@feature: GPG Keys
@assert: gpg key is associated with product during creation but removed
from product after deletion
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_16(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it with custom product that has one repository
then delete it
@feature: GPG Keys
@assert: gpg key is associated with product but not the repository
during creation but removed from product after deletion
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_17(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it with custom product that has more than one
repository then delete it
@feature: GPG Keys
@assert: gpg key is associated with product but not the repositories
during creation but removed from product after deletion
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_18(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it with custom product using Repo discovery
method then delete it
@feature: GPG Keys
@assert: gpg key is associated with product but not the repositories
during creation but removed from product after deletion
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_19(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it to repository from custom product that has
one repository then delete the key
@feature: GPG Keys
@assert: gpg key is associated with product and single repository
during creation but removed from product and repository after deletion
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_20(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it to repository from custom product that has
more than one repository then delete the key
@feature: GPG Keys
@assert: gpg key is associated with product and single repository
during creation but removed from product and repository after deletion
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_21(self):
"""@test: Create gpg key with valid name and valid gpg key via file
import then associate it to repos from custom product using Repo
discovery method then delete the key
@feature: GPG Keys
@assert: gpg key is associated with product and all repositories
during creation but removed from product and all repositories after
deletion
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_22(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it with empty (no repos)
custom product
@feature: GPG Keys
@assert: gpg key is associated with product
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_23(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it with custom product that has
one repository
@feature: GPG Keys
@assert: gpg key is associated with product but not the repository
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_24(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it with custom product that has
more than one repository
@feature: GPG Keys
@assert: gpg key is associated with product but not the repositories
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_25(self):
"""@test: Create gpg key with valid name and valid gpg key via text via
cut and paste/string then associate it with custom product using
Repo discovery method
@feature: GPG Keys
@assert: gpg key is associated with product but not the repositories
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_26(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it to repository from custom
product that has one repository
@feature: GPG Keys
@assert: gpg key is associated with product and the repository
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_27(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it to repository from custom
product that has more than one repository
@feature: GPG Keys
@assert: gpg key is associated with product and one of the repositories
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_28(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it to repos from custom product
using Repo discovery method
@feature: GPG Keys
@assert: gpg key is associated with product and all the repositories
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_29(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it with empty (no repos)
custom product then update the key
@feature: GPG Keys
@assert: gpg key is associated with product before/after update
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_30(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it with custom product that has
one repository then update the key
@feature: GPG Keys
@assert: gpg key is associated with product before/after update
but not the repository
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_31(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it with custom product that has
more than one repository then update the key
@feature: GPG Keys
@assert: gpg key is associated with product before/after update
but not the repositories
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_32(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it with custom product using
Repo discovery method then update the key
@feature: GPG Keys
@assert: gpg key is associated with product before/after update
but not the repositories
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_33(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it to repository from custom
product that has one repository then update the key
@feature: GPG Keys
@assert: gpg key is associated with product and repository
before/after update
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_34(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it to repository from custom
product that has more than one repository then update the key
@feature: GPG Keys
@assert: gpg key is associated with product and single repository
before/after update
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_35(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it to repos from custom product
using Repo discovery method then update the key
@feature: GPG Keys
@assert: gpg key is associated with product and all repositories
before/after update
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_36(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it with empty (no repos) custom
product then delete it
@feature: GPG Keys
@assert: gpg key is associated with product during creation but
removed from product after deletion
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_37(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it with custom product that has
one repository then delete it
@feature: GPG Keys
@assert: gpg key is associated with product but not the repository
during creation but removed from product after deletion
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_38(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it with custom product that has
more than one repository then delete it
@feature: GPG Keys
@assert: gpg key is associated with product but not the repositories
during creation but removed from product after deletion
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_39(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it with custom product using
Repo discovery method then delete it
@feature: GPG Keys
@assert: gpg key is associated with product but not the repositories
during creation but removed from product after deletion
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_40(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it to repository from custom
product that has one repository then delete the key
@feature: GPG Keys
@assert: gpg key is associated with product and single repository
during creation but removed from product and repository after deletion
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_41(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it to repository from custom
product that has more than one repository then delete the key
@feature: GPG Keys
@assert: gpg key is associated with product and single repository
during creation but removed from product and repository after deletion
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_key_associate_42(self):
"""@test: Create gpg key with valid name and valid gpg key text via
cut and paste/string then associate it to repos from custom product
using Repo discovery method then delete the key
@feature: GPG Keys
@assert: gpg key is associated with product and all repositories
during creation but removed from product and all repositories
after deletion
@status: manual
"""
pass
# Content
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_consume_content_1(self):
"""@test: Hosts can install packages using gpg key associated with
single custom repository
@feature: GPG Keys
@assert: host can install package from custom repository
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_consume_content_2(self):
"""@test: Hosts can install packages using gpg key associated with
multiple custom repositories
@feature: GPG Keys
@assert: host can install package from custom repositories
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_consume_content_3(self):
"""@test:Hosts can install packages using different gpg keys associated
with multiple custom repositories
@feature: GPG Keys
@assert: host can install package from custom repositories
@status: manual
"""
pass
# Miscelaneous
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_list_key_1(self):
"""@test: Create gpg key and list it
@feature: GPG Keys
@assert: gpg key is displayed/listed
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_search_key_1(self):
"""@test: Create gpg key and search/find it
@feature: GPG Keys
@assert: gpg key can be found
@status: manual
"""
pass
"""DATADRIVENGOESHERE
name is alpha
name is numeric
name is alphanumeric
name is utf-8
name is latin1
name is html
gpg key file is valid always
"""
@stubbed()
def test_info_key_1(self):
"""@test: Create single gpg key and get its info
@feature: GPG Keys
@assert: specific information for gpg key matches the creation values
@status: manual
"""
pass
| gpl-3.0 | 647,027,988,911,771,600 | 24.318132 | 79 | 0.589094 | false |
schtibe/django-rest-framework-json-api | example/tests/integration/test_model_resource_name.py | 4 | 5946 | import pytest
from django.core.urlresolvers import reverse
from example.tests.utils import load_json
from example import models, serializers, views
pytestmark = pytest.mark.django_db
class _PatchedModel:
class JSONAPIMeta:
resource_name = "resource_name_from_JSONAPIMeta"
def _check_resource_and_relationship_comment_type_match(django_client):
entry_response = django_client.get(reverse("entry-list"))
comment_response = django_client.get(reverse("comment-list"))
comment_resource_type = load_json(comment_response.content).get('data')[0].get('type')
comment_relationship_type = load_json(entry_response.content).get(
'data')[0].get('relationships').get('comments').get('data')[0].get('type')
assert comment_resource_type == comment_relationship_type, "The resource type seen in the relationships and head resource do not match"
def _check_relationship_and_included_comment_type_are_the_same(django_client, url):
response = django_client.get(url + "?include=comments")
data = load_json(response.content).get('data')[0]
comment = load_json(response.content).get('included')[0]
comment_relationship_type = data.get('relationships').get('comments').get('data')[0].get('type')
comment_included_type = comment.get('type')
assert comment_relationship_type == comment_included_type, "The resource type seen in the relationships and included do not match"
@pytest.mark.usefixtures("single_entry")
class TestModelResourceName:
def test_model_resource_name_on_list(self, client):
models.Comment.__bases__ += (_PatchedModel,)
response = client.get(reverse("comment-list"))
data = load_json(response.content)['data'][0]
# name should be super-author instead of model name RenamedAuthor
assert (data.get('type') == 'resource_name_from_JSONAPIMeta'), (
'resource_name from model incorrect on list')
# Precedence tests
def test_resource_name_precendence(self, client):
# default
response = client.get(reverse("comment-list"))
data = load_json(response.content)['data'][0]
assert (data.get('type') == 'comments'), (
'resource_name from model incorrect on list')
# model > default
models.Comment.__bases__ += (_PatchedModel,)
response = client.get(reverse("comment-list"))
data = load_json(response.content)['data'][0]
assert (data.get('type') == 'resource_name_from_JSONAPIMeta'), (
'resource_name from model incorrect on list')
# serializer > model
serializers.CommentSerializer.Meta.resource_name = "resource_name_from_serializer"
response = client.get(reverse("comment-list"))
data = load_json(response.content)['data'][0]
assert (data.get('type') == 'resource_name_from_serializer'), (
'resource_name from serializer incorrect on list')
# view > serializer > model
views.CommentViewSet.resource_name = 'resource_name_from_view'
response = client.get(reverse("comment-list"))
data = load_json(response.content)['data'][0]
assert (data.get('type') == 'resource_name_from_view'), (
'resource_name from view incorrect on list')
def teardown_method(self, method):
models.Comment.__bases__ = (models.Comment.__bases__[0],)
try:
delattr(serializers.CommentSerializer.Meta, "resource_name")
except AttributeError:
pass
try:
delattr(views.CommentViewSet, "resource_name")
except AttributeError:
pass
@pytest.mark.usefixtures("single_entry")
class TestResourceNameConsistency:
# Included rename tests
def test_type_match_on_included_and_inline_base(self, client):
_check_relationship_and_included_comment_type_are_the_same(client, reverse("entry-list"))
def test_type_match_on_included_and_inline_with_JSONAPIMeta(self, client):
models.Comment.__bases__ += (_PatchedModel,)
_check_relationship_and_included_comment_type_are_the_same(client, reverse("entry-list"))
def test_type_match_on_included_and_inline_with_serializer_resource_name(self, client):
serializers.CommentSerializer.Meta.resource_name = "resource_name_from_serializer"
_check_relationship_and_included_comment_type_are_the_same(client, reverse("entry-list"))
def test_type_match_on_included_and_inline_with_serializer_resource_name_and_JSONAPIMeta(self, client):
models.Comment.__bases__ += (_PatchedModel,)
serializers.CommentSerializer.Meta.resource_name = "resource_name_from_serializer"
_check_relationship_and_included_comment_type_are_the_same(client, reverse("entry-list"))
# Relation rename tests
def test_resource_and_relationship_type_match(self, client):
_check_resource_and_relationship_comment_type_match(client)
def test_resource_and_relationship_type_match_with_serializer_resource_name(self, client):
serializers.CommentSerializer.Meta.resource_name = "resource_name_from_serializer"
_check_resource_and_relationship_comment_type_match(client)
def test_resource_and_relationship_type_match_with_JSONAPIMeta(self, client):
models.Comment.__bases__ += (_PatchedModel,)
_check_resource_and_relationship_comment_type_match(client)
def test_resource_and_relationship_type_match_with_serializer_resource_name_and_JSONAPIMeta(self, client):
models.Comment.__bases__ += (_PatchedModel,)
serializers.CommentSerializer.Meta.resource_name = "resource_name_from_serializer"
_check_resource_and_relationship_comment_type_match(client)
def teardown_method(self, method):
models.Comment.__bases__ = (models.Comment.__bases__[0],)
try:
delattr(serializers.CommentSerializer.Meta, "resource_name")
except AttributeError:
pass
| bsd-2-clause | -3,258,020,664,099,435,500 | 42.40146 | 139 | 0.685166 | false |
Gazer022/bitcoin | contrib/devtools/update-translations.py | 22 | 8083 | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'bitcoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
sys.exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
sys.exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| mit | -3,276,723,175,113,380,400 | 37.860577 | 124 | 0.621923 | false |
kishikawakatsumi/Mozc-for-iOS | src/__init__.py | 1 | 1579 | # -*- coding: utf-8 -*-
# Copyright 2010-2014, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "mazda"
| apache-2.0 | 9,181,313,976,961,226,000 | 49.935484 | 72 | 0.773274 | false |
MalloyPower/python-compliance | analysis-code/feature-finder/find_features.py | 1 | 8880 | #!/usr/bin/python3
''' Run the front-end in this directory over the Qualitas suite.
Assume the front-end is a feature-counter, so it returns totals
per file; we accumulate these and print totals per application.
This is an old clone of the qualitas_test.py file used in multitest.
'''
from __future__ import print_function
import os
import sys
import subprocess
import shutil
import re
import linecache
import qualitas
# Metrics are (from scan.l):
# 0: print_as_function ? 1 : 0
# 1: set_count - set_comprehension,
# 2: set_comprehension,
# 3: dict_comprehension,
# 4: extra_func_parens, <--- I'm deleteing these.
# 5: with_as_count
EXTRA_FUNC_PARENS_POS = 4 # delete data for func parens
METRIC_KINDS = ['Print Fcn', 'Set Lit', 'Set Comp', 'Dict Comp', 'With As']
# This is where the Makefile lives:
FRONTEND_DIR = os.path.join(os.getcwd())
# This is the front-end that the Makefile builds:
FRONTEND_EXE = os.path.join(FRONTEND_DIR, 'run')
# I need to record errors and metrics for each processed file:
ERROR_LOG = os.path.join(os.getcwd(), 'error.log')
METRICS_LOG = os.path.join(os.getcwd(), 'metrics.log')
# sed trick to add a newline to the end of a file:
ADD_NEWLINE = 'sed -e \'$a\\\' '
class TestHarness:
def __init__(self, version, verbose=False):
self.verbose = verbose
self.init_counters()
self.make_executable(version)
@staticmethod
def delete_logs():
if os.path.isfile(ERROR_LOG): os.remove(ERROR_LOG)
if os.path.isfile(METRICS_LOG): os.remove(METRICS_LOG)
@staticmethod
def shell_run(cmd):
return subprocess.call(cmd, cwd=FRONTEND_DIR, shell=True)
def make_executable(self, ver, forceMake=False):
self.ver_front_end = '%s-%s' % (FRONTEND_EXE, ver)
if forceMake or not os.path.isfile(self.ver_front_end):
print('--- Building front-end for v%s' % ver, file=sys.stderr)
retcode = TestHarness.shell_run('make')
assert retcode == 0, '\t*** FAILED to make the parser'
os.rename(FRONTEND_EXE, self.ver_front_end)
def init_counters(self):
self.noPassed = 0
self.noFailed = 0
self.uses_counts = [0] * len(METRIC_KINDS)
self.file_counts = [0] * len(METRIC_KINDS)
self.files_using = 0 # No of files >0 for any (non-print-func) metric
def add_metrics(self, this_uses):
''' Add in the metric values from a single file'''
assert len(this_uses)==len(METRIC_KINDS), \
'inconsistent number of metrics %d' % len(this_uses)
# Did this file use any of the 3x features?
if sum(this_uses[1:]) > 0:
self.files_using += 1
# Now add usage counts in to the running totals:
self.uses_counts = [i+j for i,j in zip(self.uses_counts, this_uses)]
# Change usage counts to file counts:
this_file = [min(1,m) for m in this_uses] # All 0 or 1
self.file_counts = [i+j for i,j in zip(self.file_counts, this_file)]
def get_file_metrics(self):
return list(self.file_counts)
def get_use_metrics(self):
return list(self.uses_counts)
def set_verbose(self):
self.verbose = True
@staticmethod
def count_tests(testpath):
'''Work out the total number of '.py' files'''
assert os.path.isdir(testpath), testpath + 'must be a directory'
count = 0
for _, _, files in os.walk(testpath):
pyFiles = [filename for filename in files
if filename.endswith('.py')]
count += len(pyFiles)
return count
def print_context(self, filename, line_no):
'''For syntax error, print some lines around the error line '''
for d in [line_no-1, line_no, line_no+1]: # one each side
print('%d:%s' % (d, linecache.getline(filename, d)),
file=sys.stderr, flush=True, end='')
print('')
def check_return_code(self, retcode, testcase):
if retcode > 0: # Syntax error
if self.verbose:
print('\n* ' + testcase+ ' failed.', file=sys.stderr)
with open(ERROR_LOG, 'r') as tmp_fh:
error_msg = tmp_fh.read()
match = re.match('^(\d+)', error_msg)
if match:
line_no = int(match.group(1))
print(error_msg, file=sys.stderr, flush=True, end='')
self.print_context(testcase, line_no)
self.noFailed += 1
else: # No error, so collect metrics:
with open(METRICS_LOG, 'r') as tmp_fh:
this_metrics = tmp_fh.read().split(':')
this_metrics = [int(m) for m in this_metrics]
del this_metrics[EXTRA_FUNC_PARENS_POS]
self.add_metrics(this_metrics)
self.noPassed += 1
self.delete_logs()
def test_one_file(self, root, filename):
testcase = os.path.join(root, filename)
toExec = ADD_NEWLINE + ' "%s" | %s >%s 2>%s' \
% (testcase, self.ver_front_end, METRICS_LOG, ERROR_LOG)
retcode = TestHarness.shell_run(toExec)
self.check_return_code(retcode, testcase)
def test_directory(self, testpath, reinit=False):
assert os.path.isdir(testpath), testpath + 'should be a directory'
if reinit:
self.init_counters()
for root, dirs, files in os.walk(testpath):
for filename in files:
if filename.endswith('.py'):
self.test_one_file(root, filename)
def get_total_files(self):
return (self.noPassed + self.noFailed)
def get_total_uses(self):
'''Sum of all uses except for print-as-func:'''
return sum(self.uses_counts[1:])
def total_files_using(self):
return self.files_using
def percent_files_using(self):
return self.total_files_using() * 100.0 / self.get_total_files()
def __str__(self):
return '%d Passed, %d Failed (%5.2f%% passed)' \
% (self.noPassed, self.noFailed, self.percentPassed())
def latex_table_row(data, effect=None, want_hline=False):
row_str = ' & '.join(['%5s' % str(d) for d in data]) + ' \\\\'
if want_hline:
row_str += '\n\\hline'
return row_str
def print_latex_table(testcases, metrics, kind, totals):
# First column of table should be the application names:
row_data = [['%12s' % t] for t in testcases]
print('%%%%% Counting {} %%%%%'.format(kind))
# Data columns are the metrics for each version:
for i, plist in enumerate(metrics):
row_data[i].extend(plist)
# Now print the table, row-by-row:
print('\\begin{tabular}{l*{%d}{c}c}' % len(metrics[0]))
print(latex_table_row(['Application'] + METRIC_KINDS + totals,'bf', True))
# Print rows, sorted descending by total uses
for row in sorted(row_data, key=lambda m: m[-2], reverse=True):
print(latex_table_row(row))
print('\\hline')
print('\\end{tabular}')
def test_all(pyVersions, testroot, testcases):
# We assemble the data column-by-column (one column per Python version)
use_metrics = [[] for t in testcases] # one row per testcase
file_metrics = [[] for t in testcases] # one row per testcase
for runver in pyVersions:
harness = TestHarness(runver, False)
print('Running front-end for v%s on %d apps:'
% (runver, len(testcases)),
file=sys.stderr, flush=True, end='')
for i,testdir in enumerate(testcases):
print(" %s," % testdir, file=sys.stderr, flush=True, end='')
harness.test_directory(os.path.join(testroot,testdir), True)
# Add in uses data for this app:
use_metrics[i].extend(harness.get_use_metrics())
use_metrics[i].append(harness.get_total_uses())
# Add in files data for this app:
file_metrics[i].extend(harness.get_file_metrics())
file_metrics[i].append(harness.total_files_using())
file_metrics[i].append(round(harness.percent_files_using()))
print(' done.', file=sys.stderr)
return (use_metrics, file_metrics)
''' Use the command-line args to specify application(s) '''
if __name__ == '__main__':
full_suite = qualitas.get_dirnames()
testcases = [arg for arg in sys.argv[1:] if arg in full_suite]
if testcases == []: # None specified, so test *all* the applications
testcases = full_suite
QROOT = qualitas.corpus_for_year('2017')
PYVER = ['2.7']
(use_metrics, file_metrics) = test_all(PYVER, QROOT, testcases)
print_latex_table(testcases, use_metrics, 'uses', ['Total Uses'])
print_latex_table(testcases, file_metrics, 'files', ['Total Files', 'Perc Files'])
| mit | -2,061,717,688,297,375,500 | 37.441558 | 86 | 0.596622 | false |
GeoCat/QGIS | python/plugins/db_manager/db_plugins/gpkg/plugin.py | 5 | 10587 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
# this will disable the dbplugin if the connector raise an ImportError
from .connector import GPKGDBConnector
from qgis.PyQt.QtCore import Qt, QFileInfo
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QApplication, QAction, QFileDialog
from qgis.core import QgsDataSourceUri, QgsSettings
from qgis.gui import QgsMessageBar
from ..plugin import DBPlugin, Database, Table, VectorTable, RasterTable, TableField, TableIndex, TableTrigger, \
InvalidDataException
from . import resources_rc
hasattr(resources_rc, 'foo')
def classFactory():
return GPKGDBPlugin
class GPKGDBPlugin(DBPlugin):
@classmethod
def icon(self):
return QIcon(":/db_manager/gpkg/icon")
@classmethod
def typeName(self):
return 'gpkg'
@classmethod
def typeNameString(self):
return 'GeoPackage'
@classmethod
def providerName(self):
return 'ogr'
@classmethod
def connectionSettingsKey(self):
return 'providers/ogr/GPKG/connections'
def databasesFactory(self, connection, uri):
return GPKGDatabase(connection, uri)
def connect(self, parent=None):
conn_name = self.connectionName()
settings = QgsSettings()
settings.beginGroup(u"/%s/%s" % (self.connectionSettingsKey(), conn_name))
if not settings.contains("path"): # non-existent entry?
raise InvalidDataException(self.tr(u'There is no defined database connection "{0}".').format(conn_name))
database = settings.value("path")
uri = QgsDataSourceUri()
uri.setDatabase(database)
return self.connectToUri(uri)
@classmethod
def addConnection(self, conn_name, uri):
settings = QgsSettings()
settings.beginGroup(u"/%s/%s" % (self.connectionSettingsKey(), conn_name))
settings.setValue("path", uri.database())
return True
@classmethod
def addConnectionActionSlot(self, item, action, parent, index):
QApplication.restoreOverrideCursor()
try:
filename, selected_filter = QFileDialog.getOpenFileName(parent,
parent.tr("Choose GeoPackage file"), None, "GeoPackage (*.gpkg)")
if not filename:
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
conn_name = QFileInfo(filename).fileName()
uri = QgsDataSourceUri()
uri.setDatabase(filename)
self.addConnection(conn_name, uri)
index.internalPointer().itemChanged()
class GPKGDatabase(Database):
def __init__(self, connection, uri):
Database.__init__(self, connection, uri)
def connectorsFactory(self, uri):
return GPKGDBConnector(uri)
def dataTablesFactory(self, row, db, schema=None):
return GPKGTable(row, db, schema)
def vectorTablesFactory(self, row, db, schema=None):
return GPKGVectorTable(row, db, schema)
def rasterTablesFactory(self, row, db, schema=None):
return GPKGRasterTable(row, db, schema)
def info(self):
from .info_model import GPKGDatabaseInfo
return GPKGDatabaseInfo(self)
def sqlResultModel(self, sql, parent):
from .data_model import GPKGSqlResultModel
return GPKGSqlResultModel(self, sql, parent)
def registerDatabaseActions(self, mainWindow):
action = QAction(self.tr("Run &Vacuum"), self)
mainWindow.registerAction(action, self.tr("&Database"), self.runVacuumActionSlot)
Database.registerDatabaseActions(self, mainWindow)
def runVacuumActionSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, (DBPlugin, Table)) or item.database() is None:
parent.infoBar.pushMessage(self.tr("No database selected or you are not connected to it."),
QgsMessageBar.INFO, parent.iface.messageTimeout())
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
self.runVacuum()
def runVacuum(self):
self.database().aboutToChange.emit()
self.database().connector.runVacuum()
self.database().refresh()
def runAction(self, action):
action = str(action)
if action.startswith("vacuum/"):
if action == "vacuum/run":
self.runVacuum()
return True
return Database.runAction(self, action)
def uniqueIdFunction(self):
return None
def toSqlLayer(self, sql, geomCol, uniqueCol, layerName="QueryLayer", layerType=None, avoidSelectById=False, filter=""):
from qgis.core import QgsVectorLayer
vl = QgsVectorLayer(self.uri().database(), layerName, 'ogr')
vl.setSubsetString(sql)
return vl
class GPKGTable(Table):
def __init__(self, row, db, schema=None):
Table.__init__(self, db, None)
self.name, self.isView, self.isSysTable = row
def ogrUri(self):
ogrUri = u"%s|layername=%s" % (self.uri().database(), self.name)
return ogrUri
def mimeUri(self):
# QGIS has no provider to load Geopackage vectors, let's use OGR
return u"vector:ogr:%s:%s" % (self.name, self.ogrUri())
def toMapLayer(self):
from qgis.core import QgsVectorLayer
provider = "ogr"
uri = self.ogrUri()
return QgsVectorLayer(uri, self.name, provider)
def tableFieldsFactory(self, row, table):
return GPKGTableField(row, table)
def tableIndexesFactory(self, row, table):
return GPKGTableIndex(row, table)
def tableTriggersFactory(self, row, table):
return GPKGTableTrigger(row, table)
def tableDataModel(self, parent):
from .data_model import GPKGTableDataModel
return GPKGTableDataModel(self, parent)
class GPKGVectorTable(GPKGTable, VectorTable):
def __init__(self, row, db, schema=None):
GPKGTable.__init__(self, row[:-5], db, schema)
VectorTable.__init__(self, db, schema)
# GPKG does case-insensitive checks for table names, but the
# GPKG provider didn't do the same in Qgis < 1.9, so self.geomTableName
# stores the table name like stored in the geometry_columns table
self.geomTableName, self.geomColumn, self.geomType, self.geomDim, self.srid = row[-5:]
self.extent = self.database().connector.getTableExtent((self.schemaName(), self.name), self.geomColumn, force=False)
def uri(self):
uri = self.database().uri()
uri.setDataSource('', self.geomTableName, self.geomColumn)
return uri
def hasSpatialIndex(self, geom_column=None):
geom_column = geom_column if geom_column is not None else self.geomColumn
return self.database().connector.hasSpatialIndex((self.schemaName(), self.name), geom_column)
def createSpatialIndex(self, geom_column=None):
self.aboutToChange.emit()
ret = VectorTable.createSpatialIndex(self, geom_column)
if ret is not False:
self.database().refresh()
return ret
def deleteSpatialIndex(self, geom_column=None):
self.aboutToChange.emit()
ret = VectorTable.deleteSpatialIndex(self, geom_column)
if ret is not False:
self.database().refresh()
return ret
def refreshTableEstimatedExtent(self):
return
def refreshTableExtent(self):
prevExtent = self.extent
self.extent = self.database().connector.getTableExtent((self.schemaName(), self.name), self.geomColumn, force=True)
if self.extent != prevExtent:
self.refresh()
def runAction(self, action):
if GPKGTable.runAction(self, action):
return True
return VectorTable.runAction(self, action)
class GPKGRasterTable(GPKGTable, RasterTable):
def __init__(self, row, db, schema=None):
GPKGTable.__init__(self, row[:-3], db, schema)
RasterTable.__init__(self, db, schema)
self.prefixName, self.geomColumn, self.srid = row[-3:]
self.geomType = 'RASTER'
self.extent = self.database().connector.getTableExtent((self.schemaName(), self.name), self.geomColumn)
def gpkgGdalUri(self):
gdalUri = u'GPKG:%s:%s' % (self.uri().database(), self.prefixName)
return gdalUri
def mimeUri(self):
# QGIS has no provider to load rasters, let's use GDAL
uri = u"raster:gdal:%s:%s" % (self.name, self.uri().database())
return uri
def toMapLayer(self):
from qgis.core import QgsRasterLayer, QgsContrastEnhancement
# QGIS has no provider to load rasters, let's use GDAL
uri = self.gpkgGdalUri()
rl = QgsRasterLayer(uri, self.name)
if rl.isValid():
rl.setContrastEnhancement(QgsContrastEnhancement.StretchToMinimumMaximum)
return rl
class GPKGTableField(TableField):
def __init__(self, row, table):
TableField.__init__(self, table)
self.num, self.name, self.dataType, self.notNull, self.default, self.primaryKey = row
self.hasDefault = self.default
class GPKGTableIndex(TableIndex):
def __init__(self, row, table):
TableIndex.__init__(self, table)
self.num, self.name, self.isUnique, self.columns = row
class GPKGTableTrigger(TableTrigger):
def __init__(self, row, table):
TableTrigger.__init__(self, table)
self.name, self.function = row
| gpl-2.0 | -2,452,361,057,841,515,500 | 32.716561 | 133 | 0.616039 | false |
rubenvb/skia | tools/skp/page_sets/skia_ynevsvg_desktop.py | 6 | 1260 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry import story
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
class SkiaBuildbotDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaBuildbotDesktopPage, self).__init__(
url=url,
name=url,
page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState)
self.archive_data_file = 'data/skia_ynevsvg_desktop.json'
def RunNavigateSteps(self, action_runner):
action_runner.Navigate(self.url)
action_runner.Wait(5)
class SkiaYnevsvgDesktopPageSet(story.StorySet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaYnevsvgDesktopPageSet, self).__init__(
archive_data_file='data/skia_ynevsvg_desktop.json')
urls_list = [
# Why: from skbug.com/4713
('https://storage.googleapis.com/skia-infra-testdata/images-for-skps/'
'ynev.svg'),
]
for url in urls_list:
self.AddStory(SkiaBuildbotDesktopPage(url, self))
| bsd-3-clause | 2,906,132,539,367,698,000 | 29 | 76 | 0.706349 | false |
benley/Mathics | test/test_console.py | 2 | 2071 | import os
import sys
import pexpect
import unittest
class ConsoleTest(unittest.TestCase):
def setUp(self):
os.environ["TERM"] = "dumb"
self.console = pexpect.spawn('python2 mathics/main.py --color NOCOLOR')
def testLaunch(self):
cons = self.console
self.assertRegexpMatches(cons.readline(), '.*\r\n')
self.assertRegexpMatches(
cons.readline(), 'Mathics \\d\\.\\d.*\r\n')
self.assertRegexpMatches(
cons.readline(), 'on (CPython|PyPy) \\d.\\d.\\d \\(.+\\) ?\r\n')
self.assertRegexpMatches(
cons.readline(), 'using ([a-zA-Z]+ [\\.\\d]+(, |\r\n$))+')
self.assertRegexpMatches(cons.readline(), '\r\n')
self.assertRegexpMatches(
cons.readline(),
'Copyright \\(C\\) 2011\-20\\d\\d The Mathics Team.\r\n')
self.assertEqual(
''.join(cons.readline() for i in range(7)),
'This program comes with ABSOLUTELY NO WARRANTY.\r\n'
'This is free software, and you are welcome to redistribute it\r\n'
'under certain conditions.\r\n'
'See the documentation for the full license.\r\n'
'\r\n'
'Quit by pressing CONTROL-D\r\n'
'\r\n')
def testPrompt(self):
cons = self.console
cons.expect('Quit by pressing CONTROL-D\r\n\r\n')
self.lineno = 1
def check(query, result):
inprompt = 'In[{0}]:= '.format(self.lineno)
self.assertEqual(
cons.read(len(inprompt)), inprompt)
cons.sendline(query)
self.assertEqual(
cons.readline(), '{0}\r\n'.format(query))
outprompt = 'Out[{0}]= {1}\r\n'.format(self.lineno, result)
self.assertEqual(cons.readline(), outprompt)
self.assertEqual(cons.readline(), '\r\n')
self.lineno += 1
check('1 + 1', '2')
check('2 * 3', '6')
def tearDown(self):
self.console.close()
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 6,230,428,200,777,340,000 | 29.014493 | 79 | 0.53887 | false |
afrolov1/nova | nova/volume/encryptors/__init__.py | 16 | 2378 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.volume.encryptors import nop
LOG = logging.getLogger(__name__)
def get_volume_encryptor(connection_info, **kwargs):
"""Creates a VolumeEncryptor used to encrypt the specified volume.
:param: the connection information used to attach the volume
:returns VolumeEncryptor: the VolumeEncryptor for the volume
"""
encryptor = nop.NoOpEncryptor(connection_info, **kwargs)
location = kwargs.get('control_location', None)
if location and location.lower() == 'front-end': # case insensitive
provider = kwargs.get('provider')
try:
encryptor = importutils.import_object(provider, connection_info,
**kwargs)
except Exception as e:
LOG.error(_("Error instantiating %(provider)s: %(exception)s"),
provider=provider, exception=e)
raise
return encryptor
def get_encryption_metadata(context, volume_api, volume_id, connection_info):
metadata = {}
if ('data' in connection_info and
connection_info['data'].get('encrypted', False)):
try:
metadata = volume_api.get_volume_encryption_metadata(context,
volume_id)
except Exception as e:
LOG.error(_("Failed to retrieve encryption metadata for "
"volume %(volume_id)s: %(exception)s"),
{'volume_id': volume_id, 'exception': e})
raise
return metadata
| apache-2.0 | 2,015,992,177,406,842,400 | 37.354839 | 78 | 0.640454 | false |
SciTools/iris | lib/iris/tests/unit/common/lenient/test__lenient_client.py | 3 | 6294 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for the :func:`iris.common.lenient._lenient_client`.
"""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from inspect import getmodule
from unittest.mock import sentinel
from iris.common.lenient import _LENIENT, _lenient_client
class Test(tests.IrisTest):
def setUp(self):
module_name = getmodule(self).__name__
self.client = f"{module_name}" + ".Test.{}.<locals>.myclient"
self.service = f"{module_name}" + ".Test.{}.<locals>.myservice"
self.active = "active"
self.args_in = sentinel.arg1, sentinel.arg2
self.kwargs_in = dict(kwarg1=sentinel.kwarg1, kwarg2=sentinel.kwarg2)
def test_args_too_many(self):
emsg = "Invalid lenient client arguments, expecting 1"
with self.assertRaisesRegex(AssertionError, emsg):
_lenient_client(None, None)
def test_args_not_callable(self):
emsg = "Invalid lenient client argument, expecting a callable"
with self.assertRaisesRegex(AssertionError, emsg):
_lenient_client(None)
def test_args_and_kwargs(self):
def func():
pass
emsg = (
"Invalid lenient client, got both arguments and keyword arguments"
)
with self.assertRaisesRegex(AssertionError, emsg):
_lenient_client(func, services=func)
def test_call_naked(self):
@_lenient_client
def myclient():
return _LENIENT.__dict__.copy()
result = myclient()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_naked")
self.assertEqual(result[self.active], qualname_client)
self.assertNotIn(qualname_client, result)
def test_call_naked_alternative(self):
def myclient():
return _LENIENT.__dict__.copy()
result = _lenient_client(myclient)()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_naked_alternative")
self.assertEqual(result[self.active], qualname_client)
self.assertNotIn(qualname_client, result)
def test_call_naked_client_args_kwargs(self):
@_lenient_client
def myclient(*args, **kwargs):
return args, kwargs
args_out, kwargs_out = myclient(*self.args_in, **self.kwargs_in)
self.assertEqual(args_out, self.args_in)
self.assertEqual(kwargs_out, self.kwargs_in)
def test_call_naked_doc(self):
@_lenient_client
def myclient():
"""myclient doc-string"""
self.assertEqual(myclient.__doc__, "myclient doc-string")
def test_call_no_kwargs(self):
@_lenient_client()
def myclient():
return _LENIENT.__dict__.copy()
result = myclient()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_no_kwargs")
self.assertEqual(result[self.active], qualname_client)
self.assertNotIn(qualname_client, result)
def test_call_no_kwargs_alternative(self):
def myclient():
return _LENIENT.__dict__.copy()
result = (_lenient_client())(myclient)()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_no_kwargs_alternative")
self.assertEqual(result[self.active], qualname_client)
self.assertNotIn(qualname_client, result)
def test_call_kwargs_none(self):
@_lenient_client(services=None)
def myclient():
return _LENIENT.__dict__.copy()
result = myclient()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_kwargs_none")
self.assertEqual(result[self.active], qualname_client)
self.assertNotIn(qualname_client, result)
def test_call_kwargs_single(self):
service = sentinel.service
@_lenient_client(services=service)
def myclient():
return _LENIENT.__dict__.copy()
result = myclient()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_kwargs_single")
self.assertEqual(result[self.active], qualname_client)
self.assertIn(qualname_client, result)
self.assertEqual(result[qualname_client], (service,))
def test_call_kwargs_single_callable(self):
def myservice():
pass
@_lenient_client(services=myservice)
def myclient():
return _LENIENT.__dict__.copy()
test_name = "test_call_kwargs_single_callable"
result = myclient()
self.assertIn(self.active, result)
qualname_client = self.client.format(test_name)
self.assertEqual(result[self.active], qualname_client)
self.assertIn(qualname_client, result)
qualname_services = (self.service.format(test_name),)
self.assertEqual(result[qualname_client], qualname_services)
def test_call_kwargs_iterable(self):
services = (sentinel.service1, sentinel.service2)
@_lenient_client(services=services)
def myclient():
return _LENIENT.__dict__.copy()
result = myclient()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_kwargs_iterable")
self.assertEqual(result[self.active], qualname_client)
self.assertIn(qualname_client, result)
self.assertEqual(set(result[qualname_client]), set(services))
def test_call_client_args_kwargs(self):
@_lenient_client()
def myclient(*args, **kwargs):
return args, kwargs
args_out, kwargs_out = myclient(*self.args_in, **self.kwargs_in)
self.assertEqual(args_out, self.args_in)
self.assertEqual(kwargs_out, self.kwargs_in)
def test_call_doc(self):
@_lenient_client()
def myclient():
"""myclient doc-string"""
self.assertEqual(myclient.__doc__, "myclient doc-string")
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | 2,178,220,996,251,594,500 | 33.582418 | 79 | 0.635526 | false |
Frankkkkk/arctic | arctic/arctic.py | 1 | 19601 | import logging
import pymongo
from pymongo.errors import OperationFailure, AutoReconnect
import threading
from ._util import indent
from .auth import authenticate, get_auth
from .decorators import mongo_retry
from .exceptions import LibraryNotFoundException, ArcticException, QuotaExceededException
from .hooks import get_mongodb_uri
from .store import version_store, bson_store
from .tickstore import tickstore, toplevel
from .chunkstore import chunkstore
from six import string_types
__all__ = ['Arctic', 'VERSION_STORE', 'TICK_STORE', 'CHUNK_STORE', 'register_library_type']
logger = logging.getLogger(__name__)
# Default Arctic application name: 'arctic'
APPLICATION_NAME = 'arctic'
VERSION_STORE = version_store.VERSION_STORE_TYPE
TICK_STORE = tickstore.TICK_STORE_TYPE
CHUNK_STORE = chunkstore.CHUNK_STORE_TYPE
LIBRARY_TYPES = {version_store.VERSION_STORE_TYPE: version_store.VersionStore,
tickstore.TICK_STORE_TYPE: tickstore.TickStore,
toplevel.TICK_STORE_TYPE: toplevel.TopLevelTickStore,
chunkstore.CHUNK_STORE_TYPE: chunkstore.ChunkStore,
bson_store.BSON_STORE_TYPE: bson_store.BSONStore
}
def register_library_type(name, type_):
"""
Register a Arctic Library Type handler
"""
if name in LIBRARY_TYPES:
raise ArcticException("Library %s already registered as %s" % (name, LIBRARY_TYPES[name]))
LIBRARY_TYPES[name] = type_
class Arctic(object):
"""
The Arctic class is a top-level God object, owner of all arctic_<user> databases
accessible in Mongo.
Each database contains one or more ArcticLibrarys which may have implementation
specific functionality.
Current Mongo Library types:
- arctic.VERSION_STORE - Versioned store for chunked Pandas and numpy objects
(other Python types are pickled)
- arctic.TICK_STORE - Tick specific library. Supports 'snapshots', efficiently
stores updates, not versioned.
Arctic and ArcticLibrary are responsible for Connection setup, authentication,
dispatch to the appropriate library implementation, and quotas.
"""
DB_PREFIX = 'arctic'
METADATA_COLL = "ARCTIC"
METADATA_DOC_ID = "ARCTIC_META"
_MAX_CONNS = 4
__conn = None
def __init__(self, mongo_host, app_name=APPLICATION_NAME, allow_secondary=False,
socketTimeoutMS=10 * 60 * 1000, connectTimeoutMS=2 * 1000,
serverSelectionTimeoutMS=30 * 1000):
"""
Constructs a Arctic Datastore.
Parameters:
-----------
mongo_host: A MongoDB hostname, alias or Mongo Connection
app_name: `str` is the name of application used for resolving credentials when
authenticating against the mongo_host.
We will fetch credentials using the authentication hook.
Teams should override this such that different applications don't accidentally
run with privileges to other applications' databases
allow_secondary: `bool` indicates if we allow reads against
secondary members in the cluster. These reads may be
a few seconds behind (but are usually split-second up-to-date).
serverSelectionTimeoutMS: `int` the main tunable used for configuring how long
the pymongo driver will spend on MongoDB cluster discovery. This parameter
takes precedence over connectTimeoutMS: https://jira.mongodb.org/browse/DRIVERS-222
"""
self._application_name = app_name
self._library_cache = {}
self._allow_secondary = allow_secondary
self._socket_timeout = socketTimeoutMS
self._connect_timeout = connectTimeoutMS
self._server_selection_timeout = serverSelectionTimeoutMS
self._lock = threading.RLock()
if isinstance(mongo_host, string_types):
self.mongo_host = mongo_host
else:
self.__conn = mongo_host
# Workaround for: https://jira.mongodb.org/browse/PYTHON-927
mongo_host.server_info()
self.mongo_host = ",".join(["{}:{}".format(x[0], x[1]) for x in mongo_host.nodes])
self._adminDB = self._conn.admin
@property
@mongo_retry
def _conn(self):
with self._lock:
if self.__conn is None:
host = get_mongodb_uri(self.mongo_host)
logger.info("Connecting to mongo: {0} ({1})".format(self.mongo_host, host))
self.__conn = pymongo.MongoClient(host=host,
maxPoolSize=self._MAX_CONNS,
socketTimeoutMS=self._socket_timeout,
connectTimeoutMS=self._connect_timeout,
serverSelectionTimeoutMS=self._server_selection_timeout)
self._adminDB = self.__conn.admin
# Authenticate against admin for the user
auth = get_auth(self.mongo_host, self._application_name, 'admin')
if auth:
authenticate(self._adminDB, auth.user, auth.password)
# Accessing _conn is synchronous. The new PyMongo driver may be lazier than the previous.
# Force a connection.
self.__conn.server_info()
return self.__conn
def reset(self):
with self._lock:
if self.__conn is not None:
self.__conn.close()
self.__conn = None
for _, l in self._library_cache.items():
l._reset()
def __str__(self):
return "<Arctic at %s, connected to %s>" % (hex(id(self)), str(self._conn))
def __repr__(self):
return str(self)
def __getstate__(self):
return {'mongo_host': self.mongo_host,
'app_name': self._application_name,
'allow_secondary': self._allow_secondary,
'socketTimeoutMS': self._socket_timeout,
'connectTimeoutMS': self._connect_timeout,
'serverSelectionTimeoutMS': self._server_selection_timeout}
def __setstate__(self, state):
return Arctic.__init__(self, **state)
@mongo_retry
def list_libraries(self):
"""
Returns
-------
list of Arctic library names
"""
libs = []
for db in self._conn.database_names():
if db.startswith(self.DB_PREFIX + '_'):
for coll in self._conn[db].collection_names():
if coll.endswith(self.METADATA_COLL):
libs.append(db[len(self.DB_PREFIX) + 1:] + "." + coll[:-1 * len(self.METADATA_COLL) - 1])
elif db == self.DB_PREFIX:
for coll in self._conn[db].collection_names():
if coll.endswith(self.METADATA_COLL):
libs.append(coll[:-1 * len(self.METADATA_COLL) - 1])
return libs
@mongo_retry
def initialize_library(self, library, lib_type=VERSION_STORE, **kwargs):
"""
Create an Arctic Library or a particular type.
Parameters
----------
library : `str`
The name of the library. e.g. 'library' or 'user.library'
lib_type : `str`
The type of the library. e.g. arctic.VERSION_STORE or arctic.TICK_STORE
Or any type registered with register_library_type
Default: arctic.VERSION_STORE
kwargs :
Arguments passed to the Library type for initialization.
"""
l = ArcticLibraryBinding(self, library)
# Check that we don't create too many namespaces
if len(self._conn[l.database_name].collection_names()) > 3000:
raise ArcticException("Too many namespaces %s, not creating: %s" %
(len(self._conn[l.database_name].collection_names()), library))
l.set_library_type(lib_type)
LIBRARY_TYPES[lib_type].initialize_library(l, **kwargs)
# Add a 10G quota just in case the user is calling this with API.
if not l.get_quota():
l.set_quota(10 * 1024 * 1024 * 1024)
@mongo_retry
def delete_library(self, library):
"""
Delete an Arctic Library, and all associated collections in the MongoDB.
Parameters
----------
library : `str`
The name of the library. e.g. 'library' or 'user.library'
"""
l = ArcticLibraryBinding(self, library)
colname = l.get_top_level_collection().name
logger.info('Dropping collection: %s' % colname)
l._db.drop_collection(colname)
for coll in l._db.collection_names():
if coll.startswith(colname + '.'):
logger.info('Dropping collection: %s' % coll)
l._db.drop_collection(coll)
if library in self._library_cache:
del self._library_cache[library]
del self._library_cache[l.get_name()]
def get_library(self, library):
"""
Return the library instance. Can generally use slicing to return the library:
arctic_store[library]
Parameters
----------
library : `str`
The name of the library. e.g. 'library' or 'user.library'
"""
if library in self._library_cache:
return self._library_cache[library]
try:
error = None
l = ArcticLibraryBinding(self, library)
lib_type = l.get_library_type()
except (OperationFailure, AutoReconnect) as e:
error = e
if error:
raise LibraryNotFoundException("Library %s was not correctly initialized in %s.\nReason: %r)" %
(library, self, error))
elif not lib_type:
raise LibraryNotFoundException("Library %s was not correctly initialized in %s." %
(library, self))
elif lib_type not in LIBRARY_TYPES:
raise LibraryNotFoundException("Couldn't load LibraryType '%s' for '%s' (has the class been registered?)" %
(lib_type, library))
instance = LIBRARY_TYPES[lib_type](l)
self._library_cache[library] = instance
# The library official name may be different from 'library': e.g. 'library' vs 'user.library'
self._library_cache[l.get_name()] = instance
return self._library_cache[library]
def __getitem__(self, key):
if isinstance(key, string_types):
return self.get_library(key)
else:
raise ArcticException("Unrecognised library specification - use [libraryName]")
def set_quota(self, library, quota):
"""
Set a quota (in bytes) on this user library. The quota is 'best effort',
and should be set conservatively.
Parameters
----------
library : `str`
The name of the library. e.g. 'library' or 'user.library'
quota : `int`
Advisory quota for the library - in bytes
"""
l = ArcticLibraryBinding(self, library)
l.set_quota(quota)
def get_quota(self, library):
"""
Return the quota currently set on the library.
Parameters
----------
library : `str`
The name of the library. e.g. 'library' or 'user.library'
"""
l = ArcticLibraryBinding(self, library)
return l.get_quota()
def check_quota(self, library):
"""
Check the quota on the library, as would be done during normal writes.
Parameters
----------
library : `str`
The name of the library. e.g. 'library' or 'user.library'
Raises
------
arctic.exceptions.QuotaExceededException if the quota has been exceeded
"""
l = ArcticLibraryBinding(self, library)
l.check_quota()
def rename_library(self, from_lib, to_lib):
"""
Renames a library
Parameters
----------
from_lib: str
The name of the library to be renamed
to_lib: str
The new name of the library
"""
l = ArcticLibraryBinding(self, from_lib)
colname = l.get_top_level_collection().name
logger.info('Dropping collection: %s' % colname)
l._db[colname].rename(to_lib)
for coll in l._db.collection_names():
if coll.startswith(colname + '.'):
l._db[coll].rename(coll.replace(from_lib, to_lib))
if from_lib in self._library_cache:
del self._library_cache[from_lib]
del self._library_cache[l.get_name()]
def get_library_type(self, lib):
"""
Returns the type of the library
Parameters
----------
lib: str
the library
"""
l = ArcticLibraryBinding(self, lib)
return l.get_library_type()
class ArcticLibraryBinding(object):
"""
The ArcticLibraryBinding type holds the binding between the library name and the
concrete implementation of the library.
Also provides access to additional metadata about the library
- Access to the library's top-level collection
- Enforces quota on the library
- Access to custom metadata about the library
"""
DB_PREFIX = Arctic.DB_PREFIX
TYPE_FIELD = "TYPE"
QUOTA = 'QUOTA'
quota = None
quota_countdown = 0
@classmethod
def _parse_db_lib(clz, library):
"""
Returns the canonical (database_name, library) for the passed in
string 'library'.
"""
database_name = library.split('.', 2)
if len(database_name) == 2:
library = database_name[1]
if database_name[0].startswith(clz.DB_PREFIX):
database_name = database_name[0]
else:
database_name = clz.DB_PREFIX + '_' + database_name[0]
else:
database_name = clz.DB_PREFIX
return database_name, library
def __init__(self, arctic, library):
self.arctic = arctic
database_name, library = self._parse_db_lib(library)
self.library = library
self.database_name = database_name
self.get_top_level_collection() # Eagerly trigger auth
@property
def _db(self):
db = self.arctic._conn[self.database_name]
self._auth(db)
return db
@property
def _library_coll(self):
return self._db[self.library]
def __str__(self):
return """<ArcticLibrary at %s, %s.%s>
%s""" % (hex(id(self)), self._db.name, self._library_coll.name, indent(str(self.arctic), 4))
def __repr__(self):
return str(self)
def __getstate__(self):
return {'arctic': self.arctic, 'library': '.'.join([self.database_name, self.library])}
def __setstate__(self, state):
return ArcticLibraryBinding.__init__(self, state['arctic'], state['library'])
@mongo_retry
def _auth(self, database):
#Get .mongopass details here
if not hasattr(self.arctic, 'mongo_host'):
return
auth = get_auth(self.arctic.mongo_host, self.arctic._application_name, database.name)
if auth:
authenticate(database, auth.user, auth.password)
def get_name(self):
return self._db.name + '.' + self._library_coll.name
def get_top_level_collection(self):
"""
Return the top-level collection for the Library. This collection is to be used
for storing data.
Note we expect (and callers require) this collection to have default read-preference: primary
The read path may choose to reduce this if secondary reads are allowed.
"""
return self._library_coll
def set_quota(self, quota_bytes):
"""
Set a quota (in bytes) on this user library. The quota is 'best effort',
and should be set conservatively.
A quota of 0 is 'unlimited'
"""
self.set_library_metadata(ArcticLibraryBinding.QUOTA, quota_bytes)
self.quota = quota_bytes
self.quota_countdown = 0
def get_quota(self):
"""
Get the current quota on this user library.
"""
return self.get_library_metadata(ArcticLibraryBinding.QUOTA)
def check_quota(self):
"""
Check whether the user is within quota. Should be called before
every write. Will raise() if the library has exceeded its allotted
quota.
"""
# Don't check on every write, that would be slow
if self.quota_countdown > 0:
self.quota_countdown -= 1
return
# Re-cache the quota after the countdown
self.quota = self.get_library_metadata(ArcticLibraryBinding.QUOTA)
if self.quota is None or self.quota == 0:
self.quota = 0
return
# Figure out whether the user has exceeded their quota
library = self.arctic[self.get_name()]
stats = library.stats()
def to_gigabytes(bytes_):
return bytes_ / 1024. / 1024. / 1024.
# Have we exceeded our quota?
size = stats['totals']['size']
count = stats['totals']['count']
if size >= self.quota:
raise QuotaExceededException("Mongo Quota Exceeded: %s %.3f / %.0f GB used" % (
'.'.join([self.database_name, self.library]),
to_gigabytes(size),
to_gigabytes(self.quota)))
# Quota not exceeded, print an informational message and return
avg_size = size // count if count > 1 else 100 * 1024
remaining = self.quota - size
remaining_count = remaining / avg_size
if remaining_count < 100 or float(remaining) / self.quota < 0.1:
logger.warning("Mongo Quota: %s %.3f / %.0f GB used" % (
'.'.join([self.database_name, self.library]),
to_gigabytes(size),
to_gigabytes(self.quota)))
else:
logger.info("Mongo Quota: %s %.3f / %.0f GB used" % (
'.'.join([self.database_name, self.library]),
to_gigabytes(size),
to_gigabytes(self.quota)))
# Set-up a timer to prevent us for checking for a few writes.
# This will check every average half-life
self.quota_countdown = int(max(remaining_count // 2, 1))
def get_library_type(self):
return self.get_library_metadata(ArcticLibraryBinding.TYPE_FIELD)
def set_library_type(self, lib_type):
self.set_library_metadata(ArcticLibraryBinding.TYPE_FIELD, lib_type)
@mongo_retry
def get_library_metadata(self, field):
lib_metadata = self._library_coll[self.arctic.METADATA_COLL].find_one({"_id": self.arctic.METADATA_DOC_ID})
if lib_metadata is not None:
return lib_metadata.get(field)
else:
return None
@mongo_retry
def set_library_metadata(self, field, value):
self._library_coll[self.arctic.METADATA_COLL].update_one({'_id': self.arctic.METADATA_DOC_ID},
{'$set': {field: value}}, upsert=True)
| lgpl-2.1 | 300,281,165,316,110,100 | 36.549808 | 119 | 0.579664 | false |
rlr/fjord | vendor/packages/translate-toolkit/translate/filters/checks.py | 3 | 68988 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2011 Zuza Software Foundation
# 2013 F Wolff
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This is a set of validation checks that can be performed on translation
units.
Derivatives of UnitChecker (like StandardUnitChecker) check translation units,
and derivatives of TranslationChecker (like StandardChecker) check
(source, target) translation pairs.
When adding a new test here, please document and explain their behaviour on the
:doc:`pofilter tests </commands/pofilter_tests>` page.
"""
import re
import logging
from translate.filters import decoration
from translate.filters import helpers
from translate.filters import prefilters
from translate.filters import spelling
from translate.filters.decorators import (critical, functional, cosmetic,
extraction)
from translate.lang import factory
from translate.lang import data
from translate.misc import lru
logger = logging.getLogger(__name__)
# These are some regular expressions that are compiled for use in some tests
# printf syntax based on http://en.wikipedia.org/wiki/Printf which doesn't
# cover everything we leave \w instead of specifying the exact letters as
# this should capture printf types defined in other platforms.
# Extended to support Python named format specifiers and objective-C special
# "%@" format specifier
# (see https://developer.apple.com/library/mac/documentation/Cocoa/Conceptual/Strings/Articles/formatSpecifiers.html)
printf_pat = re.compile('''
%( # initial %
(?:(?P<ord>\d+)\$| # variable order, like %1$s
\((?P<key>\w+)\))? # Python style variables, like %(var)s
(?P<fullvar>
[+#-]* # flags
(?:\d+)? # width
(?:\.\d+)? # precision
(hh\|h\|l\|ll)? # length formatting
(?P<type>[\w%@])) # type (%s, %d, etc.)
)''', re.VERBOSE)
# The name of the XML tag
tagname_re = re.compile("<[\s]*([\w\/]*).*?(/)?[\s]*>", re.DOTALL)
# We allow escaped quotes, probably for old escaping style of OOo helpcontent
#TODO: remove escaped strings once usage is audited
property_re = re.compile(" (\w*)=((\\\\?\".*?\\\\?\")|(\\\\?'.*?\\\\?'))")
# The whole tag
tag_re = re.compile("<[^>]+>")
gconf_attribute_re = re.compile('"[a-z_]+?"')
def tagname(string):
"""Returns the name of the XML/HTML tag in string"""
tagname_match = tagname_re.match(string)
return tagname_match.groups(1)[0] + tagname_match.groups('')[1]
def intuplelist(pair, list):
"""Tests to see if pair == (a,b,c) is in list, but handles None entries in
list as wildcards (only allowed in positions "a" and "c"). We take a
shortcut by only considering "c" if "b" has already matched."""
a, b, c = pair
if (b, c) == (None, None):
#This is a tagname
return pair
for pattern in list:
x, y, z = pattern
if (x, y) in [(a, b), (None, b)]:
if z in [None, c]:
return pattern
return pair
def tagproperties(strings, ignore):
"""Returns all the properties in the XML/HTML tag string as
(tagname, propertyname, propertyvalue), but ignore those combinations
specified in ignore."""
properties = []
for string in strings:
tag = tagname(string)
properties += [(tag, None, None)]
#Now we isolate the attribute pairs.
pairs = property_re.findall(string)
for property, value, a, b in pairs:
#Strip the quotes:
value = value[1:-1]
canignore = False
if (tag, property, value) in ignore or \
intuplelist((tag, property, value), ignore) != (tag, property, value):
canignore = True
break
if not canignore:
properties += [(tag, property, value)]
return properties
class FilterFailure(Exception):
"""This exception signals that a Filter didn't pass, and gives an
explanation or a comment.
"""
def __init__(self, messages):
if not isinstance(messages, list):
messages = [messages]
assert isinstance(messages[0], unicode) # Assumption: all of same type
self.messages = messages
def __unicode__(self):
return unicode(u", ".join(self.messages))
def __str__(self):
return str(u", ".join(self.messages))
class SeriousFilterFailure(FilterFailure):
"""This exception signals that a Filter didn't pass, and the bad translation
might break an application (so the string will be marked fuzzy)"""
pass
#(tag, attribute, value) specifies a certain attribute which can be changed/
#ignored if it exists inside tag. In the case where there is a third element
#in the tuple, it indicates a property value that can be ignored if present
#(like defaults, for example)
#If a certain item is None, it indicates that it is relevant for all values of
#the property/tag that is specified as None. A non-None value of "value"
#indicates that the value of the attribute must be taken into account.
common_ignoretags = [(None, "xml-lang", None)]
common_canchangetags = [("img", "alt", None),
(None, "title", None),
(None, "dir", None),
(None, "lang", None),
]
# Actually the title tag is allowed on many tags in HTML (but probably not all)
class CheckerConfig(object):
"""Object representing the configuration of a checker."""
def __init__(self, targetlanguage=None, accelmarkers=None, varmatches=None,
notranslatewords=None, musttranslatewords=None,
validchars=None, punctuation=None, endpunctuation=None,
ignoretags=None, canchangetags=None, criticaltests=None,
credit_sources=None):
# Init lists
self.accelmarkers = self._init_list(accelmarkers)
self.varmatches = self._init_list(varmatches)
self.criticaltests = self._init_list(criticaltests)
self.credit_sources = self._init_list(credit_sources)
# Lang data
self.updatetargetlanguage(targetlanguage)
self.sourcelang = factory.getlanguage('en')
# Inits with default values
self.punctuation = self._init_default(data.normalized_unicode(punctuation),
self.lang.punctuation)
self.endpunctuation = self._init_default(data.normalized_unicode(endpunctuation),
self.lang.sentenceend)
self.ignoretags = self._init_default(ignoretags, common_ignoretags)
self.canchangetags = self._init_default(canchangetags, common_canchangetags)
# Other data
# TODO: allow user configuration of untranslatable words
self.notranslatewords = dict.fromkeys([data.normalized_unicode(key) for key in self._init_list(notranslatewords)])
self.musttranslatewords = dict.fromkeys([data.normalized_unicode(key) for key in self._init_list(musttranslatewords)])
validchars = data.normalized_unicode(validchars)
self.validcharsmap = {}
self.updatevalidchars(validchars)
def _init_list(self, list):
"""initialise configuration paramaters that are lists
:type list: List
:param list: None (we'll initialise a blank list) or a list paramater
:rtype: List
"""
if list is None:
list = []
return list
def _init_default(self, param, default):
"""Initialise parameters that can have default options.
:param param: the user supplied paramater value
:param default: default values when param is not specified
:return: the paramater as specified by the user of the default settings
"""
if param is None:
return default
return param
def update(self, otherconfig):
"""Combines the info in ``otherconfig`` into this config object."""
self.targetlanguage = otherconfig.targetlanguage or self.targetlanguage
self.updatetargetlanguage(self.targetlanguage)
self.accelmarkers.extend([c for c in otherconfig.accelmarkers if not c in self.accelmarkers])
self.varmatches.extend(otherconfig.varmatches)
self.notranslatewords.update(otherconfig.notranslatewords)
self.musttranslatewords.update(otherconfig.musttranslatewords)
self.validcharsmap.update(otherconfig.validcharsmap)
self.punctuation += otherconfig.punctuation
self.endpunctuation += otherconfig.endpunctuation
#TODO: consider also updating in the following cases:
self.ignoretags = otherconfig.ignoretags
self.canchangetags = otherconfig.canchangetags
self.criticaltests.extend(otherconfig.criticaltests)
self.credit_sources = otherconfig.credit_sources
def updatevalidchars(self, validchars):
"""Updates the map that eliminates valid characters."""
if validchars is None:
return True
validcharsmap = dict([(ord(validchar), None) for validchar in data.normalized_unicode(validchars)])
self.validcharsmap.update(validcharsmap)
def updatetargetlanguage(self, langcode):
"""Updates the target language in the config to the given target
language.
"""
self.targetlanguage = langcode
self.lang = factory.getlanguage(langcode)
def cache_results(f):
def cached_f(self, param1):
key = (f.__name__, param1)
res_cache = self.results_cache
if key in res_cache:
return res_cache[key]
else:
value = f(self, param1)
res_cache[key] = value
return value
return cached_f
class UnitChecker(object):
"""Parent Checker class which does the checking based on functions available
in derived classes.
"""
preconditions = {}
#: Categories where each checking function falls into
#: Function names are used as keys, categories are the values
categories = {}
def __init__(self, checkerconfig=None, excludefilters=None,
limitfilters=None, errorhandler=None):
self.errorhandler = errorhandler
if checkerconfig is None:
self.setconfig(CheckerConfig())
else:
self.setconfig(checkerconfig)
# Exclude functions defined in UnitChecker from being treated as tests.
self.helperfunctions = {}
for functionname in dir(UnitChecker):
function = getattr(self, functionname)
if callable(function):
self.helperfunctions[functionname] = function
self.defaultfilters = self.getfilters(excludefilters, limitfilters)
self.results_cache = {}
def getfilters(self, excludefilters=None, limitfilters=None):
"""Returns dictionary of available filters, including/excluding those
in the given lists.
"""
filters = {}
if limitfilters is None:
# use everything available unless instructed
limitfilters = dir(self)
if excludefilters is None:
excludefilters = {}
for functionname in limitfilters:
if functionname in excludefilters:
continue
if functionname in self.helperfunctions:
continue
if functionname == "errorhandler":
continue
filterfunction = getattr(self, functionname, None)
if not callable(filterfunction):
continue
filters[functionname] = filterfunction
return filters
def setconfig(self, config):
"""Sets the accelerator list."""
self.config = config
self.accfilters = [prefilters.filteraccelerators(accelmarker) for accelmarker in self.config.accelmarkers]
self.varfilters = [prefilters.filtervariables(startmatch, endmatch, prefilters.varname)
for startmatch, endmatch in self.config.varmatches]
self.removevarfilter = [prefilters.filtervariables(startmatch, endmatch,
prefilters.varnone)
for startmatch, endmatch in self.config.varmatches]
def setsuggestionstore(self, store):
"""Sets the filename that a checker should use for evaluating
suggestions.
"""
self.suggestion_store = store
if self.suggestion_store:
self.suggestion_store.require_index()
def filtervariables(self, str1):
"""Filter out variables from ``str1``."""
return helpers.multifilter(str1, self.varfilters)
filtervariables = cache_results(filtervariables)
def removevariables(self, str1):
"""Remove variables from ``str1``."""
return helpers.multifilter(str1, self.removevarfilter)
removevariables = cache_results(removevariables)
def filteraccelerators(self, str1):
"""Filter out accelerators from ``str1``."""
return helpers.multifilter(str1, self.accfilters, None)
filteraccelerators = cache_results(filteraccelerators)
def filteraccelerators_by_list(self, str1, acceptlist=None):
"""Filter out accelerators from ``str1``."""
return helpers.multifilter(str1, self.accfilters, acceptlist)
def filterwordswithpunctuation(self, str1):
"""Replaces words with punctuation with their unpunctuated
equivalents.
"""
return prefilters.filterwordswithpunctuation(str1)
filterwordswithpunctuation = cache_results(filterwordswithpunctuation)
def filterxml(self, str1):
"""Filter out XML from the string so only text remains."""
return tag_re.sub("", str1)
filterxml = cache_results(filterxml)
def run_test(self, test, unit):
"""Runs the given test on the given unit.
Note that this can raise a :exc:`FilterFailure` as part of normal operation.
"""
return test(unit)
def run_filters(self, unit, categorised=False):
"""Run all the tests in this suite.
:rtype: Dictionary
:return: Content of the dictionary is as follows::
{'testname': { 'message': message_or_exception, 'category': failure_category } }
"""
self.results_cache = {}
failures = {}
ignores = self.config.lang.ignoretests[:]
functionnames = self.defaultfilters.keys()
priorityfunctionnames = self.preconditions.keys()
otherfunctionnames = filter(lambda functionname: functionname not in self.preconditions, functionnames)
for functionname in priorityfunctionnames + otherfunctionnames:
if functionname in ignores:
continue
filterfunction = getattr(self, functionname, None)
# This filterfunction may only be defined on another checker if
# using TeeChecker
if filterfunction is None:
continue
filtermessage = filterfunction.__doc__
try:
filterresult = self.run_test(filterfunction, unit)
except FilterFailure, e:
filterresult = False
filtermessage = unicode(e)
except Exception, e:
if self.errorhandler is None:
raise ValueError("error in filter %s: %r, %r, %s" % \
(functionname, unit.source, unit.target, e))
else:
filterresult = self.errorhandler(functionname, unit.source,
unit.target, e)
if not filterresult:
# We test some preconditions that aren't actually a cause for
# failure
if functionname in self.defaultfilters:
failures[functionname] = {
'message': filtermessage,
'category': self.categories[functionname],
}
if functionname in self.preconditions:
for ignoredfunctionname in self.preconditions[functionname]:
ignores.append(ignoredfunctionname)
self.results_cache = {}
if not categorised:
for name, info in failures.iteritems():
failures[name] = info['message']
return failures
class TranslationChecker(UnitChecker):
"""A checker that passes source and target strings to the checks, not the
whole unit.
This provides some speedup and simplifies testing.
"""
def __init__(self, checkerconfig=None, excludefilters=None,
limitfilters=None, errorhandler=None):
super(TranslationChecker, self).__init__(checkerconfig, excludefilters,
limitfilters, errorhandler)
# caches for spell checking results across units/runs
self.source_spell_cache = lru.LRUCachingDict(256, cullsize=5, aggressive_gc=False)
self.target_spell_cache = lru.LRUCachingDict(512, cullsize=5, aggressive_gc=False)
def run_test(self, test, unit):
"""Runs the given test on the given unit.
Note that this can raise a :exc:`FilterFailure` as part of normal
operation.
"""
if self.hasplural:
filtermessages = []
filterresult = True
for pluralform in unit.target.strings:
try:
if not test(self.str1, unicode(pluralform)):
filterresult = False
except FilterFailure, e:
filterresult = False
filtermessages.extend(e.messages)
if not filterresult and filtermessages:
raise FilterFailure(filtermessages)
else:
return filterresult
else:
return test(self.str1, self.str2)
def run_filters(self, unit, categorised=False):
"""Do some optimisation by caching some data of the unit for the
benefit of :meth:`~TranslationChecker.run_test`.
"""
self.str1 = data.normalized_unicode(unit.source) or u""
self.str2 = data.normalized_unicode(unit.target) or u""
self.hasplural = unit.hasplural()
self.locations = unit.getlocations()
return super(TranslationChecker, self).run_filters(unit, categorised)
class TeeChecker:
"""A Checker that controls multiple checkers."""
#: Categories where each checking function falls into
#: Function names are used as keys, categories are the values
categories = {}
def __init__(self, checkerconfig=None, excludefilters=None,
limitfilters=None, checkerclasses=None, errorhandler=None,
languagecode=None):
"""construct a TeeChecker from the given checkers"""
self.limitfilters = limitfilters
if checkerclasses is None:
checkerclasses = [StandardChecker]
self.checkers = [checkerclass(checkerconfig=checkerconfig,
excludefilters=excludefilters,
limitfilters=limitfilters,
errorhandler=errorhandler) for checkerclass in checkerclasses]
if languagecode:
for checker in self.checkers:
checker.config.updatetargetlanguage(languagecode)
# Let's hook up the language specific checker
lang_checker = self.checkers[0].config.lang.checker
if lang_checker:
self.checkers.append(lang_checker)
self.combinedfilters = self.getfilters(excludefilters, limitfilters)
self.config = checkerconfig or self.checkers[0].config
def getfilters(self, excludefilters=None, limitfilters=None):
"""Returns a dictionary of available filters, including/excluding
those in the given lists.
"""
if excludefilters is None:
excludefilters = {}
filterslist = [checker.getfilters(excludefilters, limitfilters) for checker in self.checkers]
self.combinedfilters = {}
for filters in filterslist:
self.combinedfilters.update(filters)
# TODO: move this somewhere more sensible (a checkfilters method?)
if limitfilters is not None:
for filtername in limitfilters:
if not filtername in self.combinedfilters:
logger.warning("could not find filter %s", filtername)
return self.combinedfilters
def run_filters(self, unit, categorised=False):
"""Run all the tests in the checker's suites."""
failures = {}
for checker in self.checkers:
failures.update(checker.run_filters(unit, categorised))
return failures
def setsuggestionstore(self, store):
"""Sets the filename that a checker should use for evaluating
suggestions.
"""
for checker in self.checkers:
checker.setsuggestionstore(store)
class StandardChecker(TranslationChecker):
"""The basic test suite for source -> target translations."""
@extraction
def untranslated(self, str1, str2):
"""Checks whether a string has been translated at all."""
str2 = prefilters.removekdecomments(str2)
return not (len(str1.strip()) > 0 and len(str2) == 0)
@functional
def unchanged(self, str1, str2):
"""Checks whether a translation is basically identical to the original
string.
"""
str1 = self.filteraccelerators(self.removevariables(str1)).strip()
str2 = self.filteraccelerators(self.removevariables(str2)).strip()
if len(str1) < 2:
return True
# If the whole string is upperase, or nothing in the string can go
# towards uppercase, let's assume there is nothing translatable
# TODO: reconsider
if (str1.isupper() or str1.upper() == str1) and str1 == str2:
return True
if self.config.notranslatewords:
words1 = str1.split()
if len(words1) == 1 and [word for word in words1 if word in self.config.notranslatewords]:
#currently equivalent to:
# if len(words1) == 1 and words1[0] in self.config.notranslatewords:
#why do we only test for one notranslate word?
return True
# we could also check for things like str1.isnumeric(), but the test
# above (str1.upper() == str1) makes this unnecessary
if str1.lower() == str2.lower():
raise FilterFailure(u"Consider translating")
return True
@functional
def blank(self, str1, str2):
"""Checks whether a translation only contains spaces."""
len1 = len(str1.strip())
len2 = len(str2.strip())
if len1 > 0 and len(str2) != 0 and len2 == 0:
raise FilterFailure(u"Translation is empty")
else:
return True
@functional
def short(self, str1, str2):
"""Checks whether a translation is much shorter than the original
string.
"""
len1 = len(str1.strip())
len2 = len(str2.strip())
if (len1 > 0) and (0 < len2 < (len1 * 0.1)) or ((len1 > 1) and (len2 == 1)):
raise FilterFailure(u"The translation is much shorter than the original")
else:
return True
@functional
def long(self, str1, str2):
"""Checks whether a translation is much longer than the original
string.
"""
len1 = len(str1.strip())
len2 = len(str2.strip())
if (len1 > 0) and (0 < len1 < (len2 * 0.1)) or ((len1 == 1) and (len2 > 1)):
raise FilterFailure(u"The translation is much longer than the original")
else:
return True
@critical
def escapes(self, str1, str2):
"""Checks whether escaping is consistent between the two strings."""
if not helpers.countsmatch(str1, str2, (u"\\", u"\\\\")):
escapes1 = u", ".join([u"'%s'" % word for word in str1.split() if u"\\" in word])
escapes2 = u", ".join([u"'%s'" % word for word in str2.split() if u"\\" in word])
raise SeriousFilterFailure(u"Escapes in original (%s) don't match "
"escapes in translation (%s)" %
(escapes1, escapes2))
else:
return True
@critical
def newlines(self, str1, str2):
"""Checks whether newlines are consistent between the two strings."""
if not helpers.countsmatch(str1, str2, (u"\n", u"\r")):
raise FilterFailure(u"Different line endings")
if str1.endswith(u"\n") and not str2.endswith(u"\n"):
raise FilterFailure(u"Newlines different at end")
if str1.startswith(u"\n") and not str2.startswith(u"\n"):
raise FilterFailure(u"Newlines different at beginning")
return True
@critical
def tabs(self, str1, str2):
"""Checks whether tabs are consistent between the two strings."""
if not helpers.countmatch(str1, str2, "\t"):
raise SeriousFilterFailure(u"Different tabs")
else:
return True
@cosmetic
def singlequoting(self, str1, str2):
"""Checks whether singlequoting is consistent between the two strings."""
str1 = self.filterwordswithpunctuation(self.filteraccelerators(self.filtervariables(str1)))
str1 = self.config.lang.punctranslate(str1)
str2 = self.filterwordswithpunctuation(self.filteraccelerators(self.filtervariables(str2)))
if helpers.countsmatch(str1, str2, (u"'", u"''", u"\\'")):
return True
else:
raise FilterFailure(u"Different quotation marks")
@cosmetic
def doublequoting(self, str1, str2):
"""Checks whether doublequoting is consistent between the
two strings.
"""
str1 = self.filteraccelerators(self.filtervariables(str1))
str1 = self.filterxml(str1)
str1 = self.config.lang.punctranslate(str1)
str2 = self.filteraccelerators(self.filtervariables(str2))
str2 = self.filterxml(str2)
if helpers.countsmatch(str1, str2, (u'"', u'""', u'\\"', u"«",
u"»", u"“", u"”")):
return True
else:
raise FilterFailure(u"Different quotation marks")
@cosmetic
def doublespacing(self, str1, str2):
"""Checks for bad double-spaces by comparing to original."""
str1 = self.filteraccelerators(str1)
str2 = self.filteraccelerators(str2)
if helpers.countmatch(str1, str2, u" "):
return True
else:
raise FilterFailure(u"Different use of double spaces")
@cosmetic
def puncspacing(self, str1, str2):
"""Checks for bad spacing after punctuation."""
# Convert all nbsp to space, and just check spaces. Useful intermediate
# step to stricter nbsp checking?
str1 = self.filteraccelerators(self.filtervariables(str1))
str1 = self.config.lang.punctranslate(str1)
str1 = str1.replace(u"\u00a0", u" ")
if str1.find(u" ") == -1:
return True
str2 = self.filteraccelerators(self.filtervariables(str2))
str2 = str2.replace(u"\u00a0", u" ")
for puncchar in self.config.punctuation:
plaincount1 = str1.count(puncchar)
if not plaincount1:
continue
plaincount2 = str2.count(puncchar)
if plaincount1 != plaincount2:
continue
spacecount1 = str1.count(puncchar + u" ")
spacecount2 = str2.count(puncchar + u" ")
if spacecount1 != spacecount2:
# Handle extra spaces that are because of transposed punctuation
if abs(spacecount1 - spacecount2) == 1 and str1.endswith(puncchar) != str2.endswith(puncchar):
continue
raise FilterFailure(u"Different spacing around punctuation")
return True
@critical
def printf(self, str1, str2):
"""Checks whether printf format strings match."""
count1 = count2 = plural = None
# self.hasplural only set by run_filters, not always available
if 'hasplural' in self.__dict__:
plural = self.hasplural
for var_num2, match2 in enumerate(printf_pat.finditer(str2)):
count2 = var_num2 + 1
str2ord = match2.group('ord')
str2key = match2.group('key')
if str2ord:
str1ord = None
for var_num1, match1 in enumerate(printf_pat.finditer(str1)):
count1 = var_num1 + 1
if match1.group('ord'):
if str2ord == match1.group('ord'):
str1ord = str2ord
if match2.group('fullvar') != match1.group('fullvar'):
raise FilterFailure(u"Different printf variable: %s" % match2.group())
elif int(str2ord) == var_num1 + 1:
str1ord = str2ord
if match2.group('fullvar') != match1.group('fullvar'):
raise FilterFailure(u"Different printf variable: %s" % match2.group())
if str1ord == None:
raise FilterFailure(u"Added printf variable: %s" % match2.group())
elif str2key:
str1key = None
for var_num1, match1 in enumerate(printf_pat.finditer(str1)):
count1 = var_num1 + 1
if match1.group('key') and str2key == match1.group('key'):
str1key = match1.group('key')
# '%.0s' "placeholder" in plural will match anything
if plural and match2.group('fullvar') == '.0s':
continue
if match1.group('fullvar') != match2.group('fullvar'):
raise FilterFailure(u"Different printf variable: %s" % match2.group())
if str1key == None:
raise FilterFailure(u"Added printf variable: %s" % match2.group())
else:
for var_num1, match1 in enumerate(printf_pat.finditer(str1)):
count1 = var_num1 + 1
# '%.0s' "placeholder" in plural will match anything
if plural and match2.group('fullvar') == '.0s':
continue
if (var_num1 == var_num2) and (match1.group('fullvar') != match2.group('fullvar')):
raise FilterFailure(u"Different printf variable: %s" % match2.group())
if count2 is None:
str1_variables = list(m.group() for m in printf_pat.finditer(str1))
if str1_variables:
raise FilterFailure(u"Missing printf variable: %s" % u", ".join(str1_variables))
if (count1 or count2) and (count1 != count2):
raise FilterFailure(u"Different number of printf variables")
return 1
@functional
def accelerators(self, str1, str2):
"""Checks whether accelerators are consistent between the
two strings.
"""
str1 = self.filtervariables(str1)
str2 = self.filtervariables(str2)
messages = []
for accelmarker in self.config.accelmarkers:
counter1 = decoration.countaccelerators(accelmarker, self.config.sourcelang.validaccel)
counter2 = decoration.countaccelerators(accelmarker, self.config.lang.validaccel)
count1, countbad1 = counter1(str1)
count2, countbad2 = counter2(str2)
getaccel = decoration.getaccelerators(accelmarker, self.config.lang.validaccel)
accel2, bad2 = getaccel(str2)
if count1 == count2:
continue
if count1 == 1 and count2 == 0:
if countbad2 == 1:
messages.append(u"Accelerator '%s' appears before an invalid "
"accelerator character '%s'" %
(accelmarker, bad2[0]))
else:
messages.append(u"Missing accelerator '%s'" %
accelmarker)
elif count1 == 0:
messages.append(u"Added accelerator '%s'" % accelmarker)
elif count1 == 1 and count2 > count1:
messages.append(u"Accelerator '%s' is repeated in translation" %
accelmarker)
else:
messages.append(u"Accelerator '%s' occurs %d time(s) in original "
"and %d time(s) in translation" %
(accelmarker, count1, count2))
if messages:
if "accelerators" in self.config.criticaltests:
raise SeriousFilterFailure(messages)
else:
raise FilterFailure(messages)
return True
# def acceleratedvariables(self, str1, str2):
# """checks that no variables are accelerated"""
# messages = []
# for accelerator in self.config.accelmarkers:
# for variablestart, variableend in self.config.varmatches:
# error = accelerator + variablestart
# if str1.find(error) >= 0:
# messages.append(u"original has an accelerated variable")
# if str2.find(error) >= 0:
# messages.append(u"translation has an accelerated variable")
# if messages:
# raise FilterFailure(messages)
# return True
@critical
def variables(self, str1, str2):
"""Checks whether variables of various forms are consistent between the
two strings.
"""
messages = []
mismatch1, mismatch2 = [], []
varnames1, varnames2 = [], []
for startmarker, endmarker in self.config.varmatches:
varchecker = decoration.getvariables(startmarker, endmarker)
if startmarker and endmarker:
if isinstance(endmarker, int):
redecorate = lambda var: startmarker + var
else:
redecorate = lambda var: startmarker + var + endmarker
elif startmarker:
redecorate = lambda var: startmarker + var
else:
redecorate = lambda var: var
vars1 = varchecker(str1)
vars2 = varchecker(str2)
if vars1 != vars2:
# we use counts to compare so we can handle multiple variables
vars1, vars2 = [var for var in vars1 if vars1.count(var) > vars2.count(var)], \
[var for var in vars2 if vars1.count(var) < vars2.count(var)]
# filter variable names we've already seen, so they aren't
# matched by more than one filter...
vars1, vars2 = [var for var in vars1 if var not in varnames1], [var for var in vars2 if var not in varnames2]
varnames1.extend(vars1)
varnames2.extend(vars2)
vars1 = map(redecorate, vars1)
vars2 = map(redecorate, vars2)
mismatch1.extend(vars1)
mismatch2.extend(vars2)
if mismatch1:
messages.append(u"Do not translate: %s" % u", ".join(mismatch1))
elif mismatch2:
messages.append(u"Added variables: %s" % u", ".join(mismatch2))
if messages and mismatch1:
raise SeriousFilterFailure(messages)
elif messages:
raise FilterFailure(messages)
return True
@functional
def functions(self, str1, str2):
"""Checks that function names are not translated."""
# We can't just use helpers.funcmatch() since it doesn't ignore order
if not set(decoration.getfunctions(str1)).symmetric_difference(set(decoration.getfunctions(str2))):
return True
else:
raise FilterFailure(u"Different functions")
@functional
def emails(self, str1, str2):
"""Checks that emails are not translated."""
if helpers.funcmatch(str1, str2, decoration.getemails):
return True
else:
raise FilterFailure(u"Different e-mails")
@functional
def urls(self, str1, str2):
"""Checks that URLs are not translated."""
if helpers.funcmatch(str1, str2, decoration.geturls):
return True
else:
raise FilterFailure(u"Different URLs")
@functional
def numbers(self, str1, str2):
"""Checks whether numbers of various forms are consistent between the
two strings.
"""
if helpers.countsmatch(str1, str2, decoration.getnumbers(str1)):
return True
else:
raise FilterFailure(u"Different numbers")
@cosmetic
def startwhitespace(self, str1, str2):
"""Checks whether whitespace at the beginning of the strings
matches.
"""
if helpers.funcmatch(str1, str2, decoration.spacestart):
return True
else:
raise FilterFailure(u"Different whitespace at the start")
@cosmetic
def endwhitespace(self, str1, str2):
"""Checks whether whitespace at the end of the strings matches."""
str1 = self.config.lang.punctranslate(str1)
if helpers.funcmatch(str1, str2, decoration.spaceend):
return True
else:
raise FilterFailure(u"Different whitespace at the end")
@cosmetic
def startpunc(self, str1, str2):
"""Checks whether punctuation at the beginning of the strings match."""
str1 = self.filterxml(self.filterwordswithpunctuation(self.filteraccelerators(self.filtervariables(str1))))
str1 = self.config.lang.punctranslate(str1)
str2 = self.filterxml(self.filterwordswithpunctuation(self.filteraccelerators(self.filtervariables(str2))))
if helpers.funcmatch(str1, str2, decoration.puncstart, self.config.punctuation):
return True
else:
raise FilterFailure(u"Different punctuation at the start")
@cosmetic
def endpunc(self, str1, str2):
"""Checks whether punctuation at the end of the strings match."""
str1 = self.filtervariables(str1)
str1 = self.config.lang.punctranslate(str1)
str2 = self.filtervariables(str2)
str1 = str1.rstrip()
str2 = str2.rstrip()
if helpers.funcmatch(str1, str2, decoration.puncend, self.config.endpunctuation + u":"):
return True
else:
raise FilterFailure(u"Different punctuation at the end")
@functional
def purepunc(self, str1, str2):
"""Checks that strings that are purely punctuation are not changed."""
# this test is a subset of startandend
if (decoration.ispurepunctuation(str1)):
success = str1 == str2
else:
success = not decoration.ispurepunctuation(str2)
if success:
return True
else:
raise FilterFailure(u"Consider not translating punctuation")
@cosmetic
def brackets(self, str1, str2):
"""Checks that the number of brackets in both strings match."""
str1 = self.filtervariables(str1)
str2 = self.filtervariables(str2)
messages = []
missing = []
extra = []
for bracket in (u"[", u"]", u"{", u"}", u"(", u")"):
count1 = str1.count(bracket)
count2 = str2.count(bracket)
if count2 < count1:
missing.append(u"'%s'" % bracket)
elif count2 > count1:
extra.append(u"'%s'" % bracket)
if missing:
messages.append(u"Missing %s" % u", ".join(missing))
if extra:
messages.append(u"Added %s" % u", ".join(extra))
if messages:
raise FilterFailure(messages)
return True
@functional
def sentencecount(self, str1, str2):
"""Checks that the number of sentences in both strings match."""
str1 = self.filteraccelerators(str1)
str2 = self.filteraccelerators(str2)
sentences1 = len(self.config.sourcelang.sentences(str1))
sentences2 = len(self.config.lang.sentences(str2))
if not sentences1 == sentences2:
raise FilterFailure(u"Different number of sentences: "
u"%d ≠ %d" % (sentences1, sentences2))
return True
@functional
def options(self, str1, str2):
"""Checks that options are not translated."""
str1 = self.filtervariables(str1)
for word1 in str1.split():
if word1 != u"--" and word1.startswith(u"--") and word1[-1].isalnum():
parts = word1.split(u"=")
if not parts[0] in str2:
raise FilterFailure(u"Missing or translated option '%s'" % parts[0])
if len(parts) > 1 and parts[1] in str2:
raise FilterFailure(u"Consider translating parameter "
u"'%(param)s' of option '%(option)s'"
% {"param": parts[1],
"option": parts[0]})
return True
@cosmetic
def startcaps(self, str1, str2):
"""Checks that the message starts with the correct capitalisation."""
str1 = self.filteraccelerators(str1)
str2 = self.filteraccelerators(str2)
if len(str1) > 1 and len(str2) > 1:
if self.config.sourcelang.capsstart(str1) == self.config.lang.capsstart(str2):
return True
elif self.config.sourcelang.numstart(str1) or self.config.lang.numstart(str2):
return True
else:
raise FilterFailure(u"Different capitalization at the start")
if len(str1) == 0 and len(str2) == 0:
return True
if len(str1) == 0 or len(str2) == 0:
raise FilterFailure(u"Different capitalization at the start")
return True
@cosmetic
def simplecaps(self, str1, str2):
"""Checks the capitalisation of two strings isn't wildly different."""
str1 = self.removevariables(str1)
str2 = self.removevariables(str2)
# TODO: review this. The 'I' is specific to English, so it probably
# serves no purpose to get sourcelang.sentenceend
str1 = re.sub(u"[^%s]( I )" % self.config.sourcelang.sentenceend, u" i ", str1)
capitals1 = helpers.filtercount(str1, unicode.isupper)
capitals2 = helpers.filtercount(str2, unicode.isupper)
alpha1 = helpers.filtercount(str1, unicode.isalpha)
alpha2 = helpers.filtercount(str2, unicode.isalpha)
# Capture the all caps case
if capitals1 == alpha1:
if capitals2 == alpha2:
return True
else:
raise FilterFailure(u"Different capitalization")
# some heuristic tests to try and see that the style of capitals is
# vaguely the same
if capitals1 == 0 or capitals1 == 1:
success = capitals2 == capitals1
elif capitals1 < len(str1) / 10:
success = capitals2 <= len(str2) / 8
elif len(str1) < 10:
success = abs(capitals1 - capitals2) < 3
elif capitals1 > len(str1) * 6 / 10:
success = capitals2 > len(str2) * 6 / 10
else:
success = abs(capitals1 - capitals2) < (len(str1) + len(str2)) / 6
if success:
return True
else:
raise FilterFailure(u"Different capitalization")
@functional
def acronyms(self, str1, str2):
"""Checks that acronyms that appear are unchanged."""
acronyms = []
allowed = []
for startmatch, endmatch in self.config.varmatches:
allowed += decoration.getvariables(startmatch, endmatch)(str1)
allowed += self.config.musttranslatewords.keys()
str1 = self.filteraccelerators(self.filtervariables(str1))
iter = self.config.lang.word_iter(str1)
str2 = self.filteraccelerators(self.filtervariables(str2))
#TODO: strip XML? - should provide better error messsages
# see mail/chrome/messanger/smime.properties.po
#TODO: consider limiting the word length for recognising acronyms to
#something like 5/6 characters
for word in iter:
if word.isupper() and len(word) > 1 and word not in allowed:
if str2.find(word) == -1:
acronyms.append(word)
if acronyms:
raise FilterFailure(u"Consider not translating acronyms: %s" %
u", ".join(acronyms))
return True
@cosmetic
def doublewords(self, str1, str2):
"""Checks for repeated words in the translation."""
lastword = ""
without_newlines = "\n".join(str2.split("\n"))
words = self.filteraccelerators(self.removevariables(self.filterxml(without_newlines))).replace(u".", u"").lower().split()
for word in words:
if word == lastword and word not in self.config.lang.validdoublewords:
raise FilterFailure(u"The word '%s' is repeated" % word)
lastword = word
return True
@functional
def notranslatewords(self, str1, str2):
"""Checks that words configured as untranslatable appear in the
translation too."""
if not self.config.notranslatewords:
return True
str1 = self.filtervariables(str1)
str2 = self.filtervariables(str2)
#The above is full of strange quotes and things in utf-8 encoding.
#single apostrophe perhaps problematic in words like "doesn't"
for seperator in self.config.punctuation:
str1 = str1.replace(seperator, u" ")
str2 = str2.replace(seperator, u" ")
words1 = self.filteraccelerators(str1).split()
words2 = self.filteraccelerators(str2).split()
stopwords = [word for word in words1 if word in self.config.notranslatewords and word not in words2]
if stopwords:
raise FilterFailure(u"Do not translate: %s" %
(u", ".join(stopwords)))
return True
@functional
def musttranslatewords(self, str1, str2):
"""Checks that words configured as definitely translatable don't appear
in the translation."""
if not self.config.musttranslatewords:
return True
str1 = self.removevariables(str1)
str2 = self.removevariables(str2)
# The above is full of strange quotes and things in utf-8 encoding.
# single apostrophe perhaps problematic in words like "doesn't"
for seperator in self.config.punctuation:
str1 = str1.replace(seperator, u" ")
str2 = str2.replace(seperator, u" ")
words1 = self.filteraccelerators(str1).split()
words2 = self.filteraccelerators(str2).split()
stopwords = [word for word in words1 if word.lower() in self.config.musttranslatewords and word in words2]
if stopwords:
raise FilterFailure(u"Please translate: %s" % (u", ".join(stopwords)))
return True
@cosmetic
def validchars(self, str1, str2):
"""Checks that only characters specified as valid appear in the
translation.
"""
if not self.config.validcharsmap:
return True
invalid1 = str1.translate(self.config.validcharsmap)
invalid2 = str2.translate(self.config.validcharsmap)
invalidchars = [u"'%s' (\\u%04x)" % (invalidchar, ord(invalidchar)) for invalidchar in invalid2 if invalidchar not in invalid1]
if invalidchars:
raise FilterFailure(u"Invalid characters: %s" % (u", ".join(invalidchars)))
return True
@functional
def filepaths(self, str1, str2):
"""Checks that file paths have not been translated."""
for word1 in self.filteraccelerators(self.filterxml(str1)).split():
if word1.startswith(u"/"):
if not helpers.countsmatch(str1, str2, (word1,)):
raise FilterFailure(u"Different file paths")
return True
@critical
def xmltags(self, str1, str2):
"""Checks that XML/HTML tags have not been translated."""
tags1 = tag_re.findall(str1)
if len(tags1) > 0:
if (len(tags1[0]) == len(str1)) and not u"=" in tags1[0]:
return True
tags2 = tag_re.findall(str2)
properties1 = tagproperties(tags1, self.config.ignoretags)
properties2 = tagproperties(tags2, self.config.ignoretags)
filtered1 = []
filtered2 = []
for property1 in properties1:
filtered1 += [intuplelist(property1, self.config.canchangetags)]
for property2 in properties2:
filtered2 += [intuplelist(property2, self.config.canchangetags)]
# TODO: consider the consequences of different ordering of
# attributes/tags
if filtered1 != filtered2:
raise FilterFailure(u"Different XML tags")
else:
# No tags in str1, let's just check that none were added in str2.
# This might be useful for fuzzy strings wrongly unfuzzied.
tags2 = tag_re.findall(str2)
if len(tags2) > 0:
raise FilterFailure(u"Added XML tags")
return True
@functional
def kdecomments(self, str1, str2):
"""Checks to ensure that no KDE style comments appear in the
translation.
"""
return str2.find(u"\n_:") == -1 and not str2.startswith(u"_:")
@extraction
def compendiumconflicts(self, str1, str2):
"""Checks for Gettext compendium conflicts (#-#-#-#-#)."""
return str2.find(u"#-#-#-#-#") == -1
@cosmetic
def simpleplurals(self, str1, str2):
"""Checks for English style plural(s) for you to review."""
def numberofpatterns(string, patterns):
number = 0
for pattern in patterns:
number += len(re.findall(pattern, string))
return number
sourcepatterns = ["\(s\)"]
targetpatterns = ["\(s\)"]
sourcecount = numberofpatterns(str1, sourcepatterns)
targetcount = numberofpatterns(str2, targetpatterns)
if self.config.lang.nplurals == 1:
if targetcount:
raise FilterFailure(u"Plural(s) were kept in translation")
else:
return True
if sourcecount == targetcount:
return True
else:
raise FilterFailure(u"The original uses plural(s)")
@functional
def spellcheck(self, str1, str2):
"""Checks words that don't pass a spell check."""
if not self.config.targetlanguage:
return True
if not spelling.available:
return True
# TODO: filterxml?
str1 = self.filteraccelerators_by_list(self.removevariables(str1),
self.config.sourcelang.validaccel)
str2 = self.filteraccelerators_by_list(self.removevariables(str2),
self.config.lang.validaccel)
errors = set()
# We cache spelling results of source texts:
ignore1 = self.source_spell_cache.get(str1, None)
if ignore1 is None:
ignore1 = set(spelling.simple_check(str1, lang=self.config.sourcelang.code))
self.source_spell_cache[str1] = ignore1
# We cache spelling results of target texts sentence-by-sentence. This
# way we can reuse most of the results while someone is typing a long
# segment in Virtaal.
sentences2 = self.config.lang.sentences(str2)
for sentence in sentences2:
sentence_errors = self.target_spell_cache.get(sentence, None)
if sentence_errors is None:
sentence_errors = spelling.simple_check(sentence, lang=self.config.targetlanguage)
self.target_spell_cache[sentence] = sentence_errors
errors.update(sentence_errors)
errors.difference_update(ignore1, self.config.notranslatewords)
if errors:
messages = [u"Check the spelling of: %s" % u", ".join(errors)]
raise FilterFailure(messages)
return True
@extraction
def credits(self, str1, str2):
"""Checks for messages containing translation credits instead of
normal translations.
"""
if str1 in self.config.credit_sources:
raise FilterFailure(u"Don't translate. Just credit the translators.")
else:
return True
# If the precondition filter is run and fails then the other tests listed are ignored
preconditions = {
"untranslated": ("simplecaps", "variables", "startcaps",
"accelerators", "brackets", "endpunc",
"acronyms", "xmltags", "startpunc",
"endwhitespace", "startwhitespace",
"escapes", "doublequoting", "singlequoting",
"filepaths", "purepunc", "doublespacing",
"sentencecount", "numbers", "isfuzzy",
"isreview", "notranslatewords", "musttranslatewords",
"emails", "simpleplurals", "urls", "printf",
"tabs", "newlines", "functions", "options",
"blank", "nplurals", "gconf", "dialogsizes"),
"blank": ("simplecaps", "variables", "startcaps",
"accelerators", "brackets", "endpunc",
"acronyms", "xmltags", "startpunc",
"endwhitespace", "startwhitespace",
"escapes", "doublequoting", "singlequoting",
"filepaths", "purepunc", "doublespacing",
"sentencecount", "numbers", "isfuzzy",
"isreview", "notranslatewords", "musttranslatewords",
"emails", "simpleplurals", "urls", "printf",
"tabs", "newlines", "functions", "options",
"gconf", "dialogsizes"),
"credits": ("simplecaps", "variables", "startcaps",
"accelerators", "brackets", "endpunc",
"acronyms", "xmltags", "startpunc",
"escapes", "doublequoting", "singlequoting",
"filepaths", "doublespacing",
"sentencecount", "numbers",
"emails", "simpleplurals", "urls", "printf",
"tabs", "newlines", "functions", "options"),
"purepunc": ("startcaps", "options"),
# This is causing some problems since Python 2.6, as
# startcaps is now seen as an important one to always execute
# and could now be done before it is blocked by a failing
# "untranslated" or "blank" test. This is probably happening
# due to slightly different implementation of the internal
# dict handling since Python 2.6. We should never have relied
# on this ordering anyway.
#"startcaps": ("simplecaps",),
"endwhitespace": ("endpunc",),
"startwhitespace": ("startpunc",),
"unchanged": ("doublewords",),
"compendiumconflicts": ("accelerators", "brackets", "escapes",
"numbers", "startpunc", "long", "variables",
"startcaps", "sentencecount", "simplecaps",
"doublespacing", "endpunc", "xmltags",
"startwhitespace", "endwhitespace",
"singlequoting", "doublequoting",
"filepaths", "purepunc", "doublewords", "printf",
"newlines"),
}
# code to actually run the tests (use unittest?)
openofficeconfig = CheckerConfig(
accelmarkers=["~"],
varmatches=[("&", ";"), ("%", "%"), ("%", None), ("%", 0), ("$(", ")"),
("$", "$"), ("${", "}"), ("#", "#"), ("#", 1), ("#", 0),
("($", ")"), ("$[", "]"), ("[", "]"), ("@", "@"),
("$", None)],
ignoretags=[("alt", "xml-lang", None), ("ahelp", "visibility", "visible"),
("img", "width", None), ("img", "height", None)],
canchangetags=[("link", "name", None)],
)
class OpenOfficeChecker(StandardChecker):
def __init__(self, **kwargs):
checkerconfig = kwargs.get("checkerconfig", None)
if checkerconfig is None:
checkerconfig = CheckerConfig()
kwargs["checkerconfig"] = checkerconfig
checkerconfig.update(openofficeconfig)
StandardChecker.__init__(self, **kwargs)
mozillaconfig = CheckerConfig(
accelmarkers=["&"],
varmatches=[("&", ";"), ("%", "%"), ("%", 1), ("$", "$"), ("$", None),
("#", 1), ("${", "}"), ("$(^", ")"), ("{{", "}}"), ],
criticaltests=["accelerators"],
)
class MozillaChecker(StandardChecker):
def __init__(self, **kwargs):
checkerconfig = kwargs.get("checkerconfig", None)
if checkerconfig is None:
checkerconfig = CheckerConfig()
kwargs["checkerconfig"] = checkerconfig
checkerconfig.update(mozillaconfig)
StandardChecker.__init__(self, **kwargs)
@extraction
def credits(self, str1, str2):
"""Checks for messages containing translation credits instead of
normal translations.
"""
for location in self.locations:
if location in ['MOZ_LANGPACK_CONTRIBUTORS', 'credit.translation']:
raise FilterFailure(u"Don't translate. Just credit the translators.")
return True
mozilla_dialog_re = re.compile("""( # option pair "key: value;"
(?P<key>[-a-z]+) # key
:\s+ # seperator
(?P<number>\d+(?:[.]\d+)?) # number
(?P<unit>[a-z][a-z]);? # units
)+ # multiple pairs
""", re.VERBOSE)
mozilla_dialog_valid_units = ['em', 'px', 'ch']
@critical
def dialogsizes(self, str1, str2):
"""Checks that dialog sizes are not translated."""
# Example: "width: 635px; height: 400px;"
if "width" in str1 or "height" in str1:
str1pairs = self.mozilla_dialog_re.findall(str1)
if str1pairs:
str2pairs = self.mozilla_dialog_re.findall(str2)
if len(str1pairs) != len(str2pairs):
raise FilterFailure(u"A dialog pair is missing")
for i, pair1 in enumerate(str1pairs):
pair2 = str2pairs[i]
if pair1[0] != pair2[0]: # Only check pairs that differ
if len(pair2) != 4:
raise FilterFailure(u"A part of the dialog pair is missing")
if pair1[1] not in pair2: # key
raise FilterFailure(u"Do not translate the key '%s'" % pair1[1])
# FIXME we could check more carefully for numbers in pair1[2]
if pair2[3] not in self.mozilla_dialog_valid_units:
raise FilterFailure(u"Units should be one of '%s'. "
"The source string uses '%s'" % (", ".join(self.mozilla_dialog_valid_units), pair1[3]))
return True
@functional
def numbers(self, str1, str2):
"""Checks that numbers are not translated.
Special handling for Mozilla to ignore entries that are dialog sizes.
"""
if self.mozilla_dialog_re.findall(str1):
return True
return super(MozillaChecker, self).numbers(str1, str2)
@functional
def unchanged(self, str1, str2):
"""Checks whether a translation is basically identical to the original
string.
Special handling for Mozilla to ignore entries that are dialog sizes.
"""
if (self.mozilla_dialog_re.findall(str1) or
str1.strip().lstrip('0123456789') in self.mozilla_dialog_valid_units):
return True
return super(MozillaChecker, self).unchanged(str1, str2)
@cosmetic
def accelerators(self, str1, str2):
"""Checks whether accelerators are consistent between the
two strings.
For Mozilla we lower the severity to cosmetic.
"""
return super(MozillaChecker, self).accelerators(str1, str2)
drupalconfig = CheckerConfig(
varmatches=[("%", None), ("@", None), ("!", None)],
)
class DrupalChecker(StandardChecker):
def __init__(self, **kwargs):
checkerconfig = kwargs.get("checkerconfig", None)
if checkerconfig is None:
checkerconfig = CheckerConfig()
kwargs["checkerconfig"] = checkerconfig
checkerconfig.update(drupalconfig)
StandardChecker.__init__(self, **kwargs)
gnomeconfig = CheckerConfig(
accelmarkers=["_"],
varmatches=[("%", 1), ("$(", ")")],
credit_sources=[u"translator-credits"],
)
class GnomeChecker(StandardChecker):
def __init__(self, **kwargs):
checkerconfig = kwargs.get("checkerconfig", None)
if checkerconfig is None:
checkerconfig = CheckerConfig()
kwargs["checkerconfig"] = checkerconfig
checkerconfig.update(gnomeconfig)
StandardChecker.__init__(self, **kwargs)
@functional
def gconf(self, str1, str2):
"""Checks if we have any gconf config settings translated."""
for location in self.locations:
if location.find('schemas.in') != -1 or location.find('gschema.xml.in') != -1:
gconf_attributes = gconf_attribute_re.findall(str1)
#stopwords = [word for word in words1 if word in self.config.notranslatewords and word not in words2]
stopwords = [word for word in gconf_attributes if word[1:-1] not in str2]
if stopwords:
raise FilterFailure(u"Do not translate GConf attributes: %s" %
(u", ".join(stopwords)))
return True
return True
kdeconfig = CheckerConfig(
accelmarkers=["&"],
varmatches=[("%", 1)],
credit_sources=[u"Your names", u"Your emails", u"ROLES_OF_TRANSLATORS"],
)
class KdeChecker(StandardChecker):
def __init__(self, **kwargs):
# TODO allow setup of KDE plural and translator comments so that they do
# not create false postives
checkerconfig = kwargs.get("checkerconfig", None)
if checkerconfig is None:
checkerconfig = CheckerConfig()
kwargs["checkerconfig"] = checkerconfig
checkerconfig.update(kdeconfig)
StandardChecker.__init__(self, **kwargs)
cclicenseconfig = CheckerConfig(varmatches=[("@", "@")])
class CCLicenseChecker(StandardChecker):
def __init__(self, **kwargs):
checkerconfig = kwargs.get("checkerconfig", None)
if checkerconfig is None:
checkerconfig = CheckerConfig()
kwargs["checkerconfig"] = checkerconfig
checkerconfig.update(cclicenseconfig)
StandardChecker.__init__(self, **kwargs)
termconfig = CheckerConfig()
class TermChecker(StandardChecker):
def __init__(self, **kwargs):
checkerconfig = kwargs.get("checkerconfig", None)
if checkerconfig is None:
checkerconfig = CheckerConfig()
kwargs["checkerconfig"] = checkerconfig
checkerconfig.update(termconfig)
StandardChecker.__init__(self, **kwargs)
projectcheckers = {
"openoffice": OpenOfficeChecker,
"mozilla": MozillaChecker,
"kde": KdeChecker,
"wx": KdeChecker,
"gnome": GnomeChecker,
"creativecommons": CCLicenseChecker,
"drupal": DrupalChecker,
"terminology": TermChecker,
}
class StandardUnitChecker(UnitChecker):
"""The standard checks for common checks on translation units."""
@extraction
def isfuzzy(self, unit):
"""Check if the unit has been marked fuzzy."""
return not unit.isfuzzy()
@extraction
def isreview(self, unit):
"""Check if the unit has been marked review."""
return not unit.isreview()
@critical
def nplurals(self, unit):
"""Checks for the correct number of noun forms for plural
translations.
"""
if unit.hasplural():
# if we don't have a valid nplurals value, don't run the test
nplurals = self.config.lang.nplurals
if nplurals > 0:
return len(filter(None, unit.target.strings)) == nplurals
return True
@extraction
def hassuggestion(self, unit):
"""Checks if there is at least one suggested translation for this
unit.
"""
self.suggestion_store = getattr(self, 'suggestion_store', None)
suggestions = []
if self.suggestion_store:
suggestions = self.suggestion_store.findunits(unit.source)
elif getattr(unit, "getalttrans", None):
# TODO: we probably want to filter them somehow
suggestions = unit.getalttrans()
return not bool(suggestions)
def runtests(str1, str2, ignorelist=()):
"""Verifies that the tests pass for a pair of strings."""
from translate.storage import base
str1 = data.normalized_unicode(str1)
str2 = data.normalized_unicode(str2)
unit = base.TranslationUnit(str1)
unit.target = str2
checker = StandardChecker(excludefilters=ignorelist)
failures = checker.run_filters(unit)
for test in failures:
print "failure: %s: %s\n %r\n %r" % \
(test, failures[test]['message'], str1, str2)
return failures
def batchruntests(pairs):
"""Runs test on a batch of string pairs."""
passed, numpairs = 0, len(pairs)
for str1, str2 in pairs:
if runtests(str1, str2):
passed += 1
print
print "total: %d/%d pairs passed" % (passed, numpairs)
if __name__ == '__main__':
testset = [(r"simple", r"somple"),
(r"\this equals \that", r"does \this equal \that?"),
(r"this \'equals\' that", r"this 'equals' that"),
(r" start and end! they must match.",
r"start and end! they must match."),
(r"check for matching %variables marked like %this",
r"%this %variable is marked"),
(r"check for mismatching %variables marked like %this",
r"%that %variable is marked"),
(r"check for mismatching %variables% too",
r"how many %variable% are marked"),
(r"%% %%", r"%%"),
(r"Row: %1, Column: %2", r"Mothalo: %1, Kholomo: %2"),
(r"simple lowercase", r"it is all lowercase"),
(r"simple lowercase", r"It Is All Lowercase"),
(r"Simple First Letter Capitals", r"First Letters"),
(r"SIMPLE CAPITALS", r"First Letters"),
(r"SIMPLE CAPITALS", r"ALL CAPITALS"),
(r"forgot to translate", r" "),
]
batchruntests(testset)
| bsd-3-clause | -1,620,984,560,167,256,800 | 35.039707 | 136 | 0.586271 | false |
TUW-GEO/OGRSpatialRef3D | gdal-1.10.0/swig/python/scripts/gdal_retile.py | 1 | 30866 | #!/usr/bin/env python
###############################################################################
# $Id: gdal_retile.py 24037 2012-02-28 17:21:35Z rouault $
#
# Purpose: Module for retiling (merging) tiles and building tiled pyramids
# Author: Christian Meuller, [email protected]
# UseDirForEachRow support by Chris Giesey & Elijah Robison
#
###############################################################################
# Copyright (c) 2007, Christian Mueller
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
try:
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
from osgeo.gdalconst import *
except:
import gdal
import ogr
import osr
from gdalconst import *
import sys
import os
import math
class AffineTransformDecorator:
""" A class providing some usefull methods for affine Transformations """
def __init__(self, transform ):
self.geotransform=transform
self.scaleX=self.geotransform[1]
self.scaleY=self.geotransform[5]
if self.scaleY > 0:
self.scaleY*=-1
self.ulx = self.geotransform[0]
self.uly = self.geotransform[3]
def pointsFor(self,width,height):
xlist=[]
ylist=[]
w=self.scaleX * width;
h=self.scaleY * height;
xlist.append(self.ulx)
ylist.append(self.uly)
xlist.append(self.ulx+w)
ylist.append(self.uly)
xlist.append(self.ulx+w)
ylist.append(self.uly+h)
xlist.append(self.ulx)
ylist.append(self.uly+h)
return [ xlist, ylist]
class DataSetCache:
""" A class for caching source tiles """
def __init__(self ):
self.cacheSize=8
self.queue=[]
self.dict={}
def get(self,name ):
if name in self.dict:
return self.dict[name]
result = gdal.Open(name)
if result is None:
print("Error openenig:%s" % NameError)
sys.exit(1)
if len(self.queue)==self.cacheSize:
toRemove = self.queue.pop(0)
del self.dict[toRemove]
self.queue.append(name)
self.dict[name]=result
return result
def __del__(self):
for name, dataset in self.dict.items():
del dataset
del self.queue
del self.dict
class tile_info:
""" A class holding info how to tile """
def __init__(self,xsize,ysize,tileWidth,tileHeight):
self.tileWidth=tileWidth
self.tileHeight=tileHeight
self.countTilesX= int(xsize / tileWidth)
self.countTilesY= int(ysize / tileHeight)
self.lastTileWidth = int(xsize - self.countTilesX * tileWidth)
self.lastTileHeight = int(ysize - self.countTilesY * tileHeight)
if (self.lastTileWidth > 0 ):
self.countTilesX=self.countTilesX+1
else:
self.lastTileWidth=tileWidth
if (self.lastTileHeight > 0 ):
self.countTilesY=self.countTilesY+1
else:
self.lastTileHeight=tileHeight
def report( self ):
print('tileWidth %d' % self.tileWidth)
print('tileHeight %d' % self.tileHeight)
print('countTilesX: %d' % self.countTilesX)
print('countTilesY: %d' % self.countTilesY)
print('lastTileWidth: %d' % self.lastTileWidth)
print('lastTileHeight: %d' % self.lastTileHeight)
class mosaic_info:
"""A class holding information about a GDAL file or a GDAL fileset"""
def __init__(self, filename,inputDS ):
"""
Initialize mosaic_info from filename
filename -- Name of file to read.
"""
self.TempDriver=gdal.GetDriverByName("MEM")
self.filename = filename
self.cache = DataSetCache()
self.ogrTileIndexDS = inputDS
self.ogrTileIndexDS.GetLayer().ResetReading()
feature = self.ogrTileIndexDS.GetLayer().GetNextFeature()
imgLocation = feature.GetField(0)
fhInputTile = self.cache.get(imgLocation)
self.bands = fhInputTile.RasterCount
self.band_type = fhInputTile.GetRasterBand(1).DataType
self.projection = fhInputTile.GetProjection()
dec = AffineTransformDecorator(fhInputTile.GetGeoTransform())
self.scaleX=dec.scaleX
self.scaleY=dec.scaleY
ct = fhInputTile.GetRasterBand(1).GetRasterColorTable()
if ct is not None:
self.ct = ct.Clone()
else:
self.ct = None
self.ci = [0] * self.bands
for iband in range(self.bands):
self.ci[iband] = fhInputTile.GetRasterBand(iband + 1).GetRasterColorInterpretation()
extent = self.ogrTileIndexDS.GetLayer().GetExtent()
self.ulx = extent[0];
self.uly = extent[3]
self.lrx = extent[1]
self.lry = extent[2]
self.xsize = int(round((self.lrx-self.ulx) / self.scaleX))
self.ysize = abs(int(round((self.uly-self.lry) / self.scaleY)))
def __del__(self):
del self.cache
del self.ogrTileIndexDS
def getDataSet(self,minx,miny,maxx,maxy):
self.ogrTileIndexDS.GetLayer().ResetReading()
self.ogrTileIndexDS.GetLayer().SetSpatialFilterRect(minx,miny,maxx,maxy)
features = []
envelope = None
while True:
feature = self.ogrTileIndexDS.GetLayer().GetNextFeature();
if feature is None:
break
features.append(feature)
if envelope is None:
envelope=feature.GetGeometryRef().GetEnvelope()
else:
featureEnv = feature.GetGeometryRef().GetEnvelope()
envelope= ( min(featureEnv[0],envelope[0]),max(featureEnv[1],envelope[1]),
min(featureEnv[2],envelope[2]),max(featureEnv[3],envelope[3]))
if envelope is None:
return None
#enlarge to query rect if necessairy
envelope= ( min(minx,envelope[0]),max(maxx,envelope[1]),
min(miny,envelope[2]),max(maxy,envelope[3]))
self.ogrTileIndexDS.GetLayer().SetSpatialFilter(None)
# merge tiles
resultSizeX =int(math.ceil(((maxx-minx) / self.scaleX )))
resultSizeY =int(math.ceil(((miny-maxy) / self.scaleY )))
resultDS = self.TempDriver.Create( "TEMP", resultSizeX, resultSizeY, self.bands,self.band_type,[])
resultDS.SetGeoTransform( [minx,self.scaleX,0,maxy,0,self.scaleY] )
for feature in features:
featureName = feature.GetField(0)
sourceDS=self.cache.get(featureName)
dec = AffineTransformDecorator(sourceDS.GetGeoTransform())
#calculate read and write offsets
readOffsetX =int(round((minx-dec.ulx) / self.scaleX))
readOffsetY =int(round((maxy-dec.uly) / self.scaleY))
writeOffsetX=0
if readOffsetX<0:
writeOffsetX=readOffsetX*-1;
readOffsetX=0
writeOffsetY=0
if readOffsetY<0:
writeOffsetY=readOffsetY*-1;
readOffsetY=0
#calculate read and write dimensions
readX=min(resultSizeX,sourceDS.RasterXSize-readOffsetX,resultSizeX-writeOffsetX)
if readX<=0:
continue
readY=min(resultSizeY,sourceDS.RasterYSize-readOffsetY,resultSizeY-writeOffsetY)
if readY<=0:
continue
# print "READ",readOffsetX,readOffsetY,readX,readY
for bandNr in range(1,self.bands+1):
s_band = sourceDS.GetRasterBand( bandNr )
t_band = resultDS.GetRasterBand( bandNr )
if self.ct is not None:
t_band.SetRasterColorTable(self.ct)
t_band.SetRasterColorInterpretation(self.ci[bandNr-1])
data = s_band.ReadRaster( readOffsetX,readOffsetY,readX,readY, readX,readY, self.band_type )
t_band.WriteRaster(writeOffsetX,writeOffsetY,readX,readY,data )
return resultDS
def closeDataSet(self, memDS):
del memDS
#self.TempDriver.Delete("TEMP")
def report( self ):
print('Filename: '+ self.filename)
print('File Size: %dx%dx%d' \
% (self.xsize, self.ysize, self.bands))
print('Pixel Size: %f x %f' \
% (self.scaleX,self.scaleY))
print('UL:(%f,%f) LR:(%f,%f)' \
% (self.ulx,self.uly,self.lrx,self.lry))
def getTileIndexFromFiles( inputTiles, driverTyp):
if Verbose:
from sys import version_info
if version_info >= (3,0,0):
exec('print("Building internal Index for %d tile(s) ..." % len(inputTiles), end=" ")')
else:
exec('print "Building internal Index for %d tile(s) ..." % len(inputTiles), ')
ogrTileIndexDS = createTileIndex("TileIndex",TileIndexFieldName,None,driverTyp);
for inputTile in inputTiles:
fhInputTile = gdal.Open(inputTile)
if fhInputTile is None:
return None
dec = AffineTransformDecorator(fhInputTile.GetGeoTransform())
points = dec.pointsFor(fhInputTile.RasterXSize, fhInputTile.RasterYSize)
addFeature(ogrTileIndexDS,inputTile,points[0],points[1])
del fhInputTile
if Verbose:
print("finished")
#ogrTileIndexDS.GetLayer().SyncToDisk()
return ogrTileIndexDS
def getTargetDir (level = -1):
if level==-1:
return TargetDir
else:
return TargetDir+str(level)+os.sep
def tileImage(minfo, ti ):
"""
Tile image in mosaicinfo minfo based on tileinfo ti
returns list of created tiles
"""
global LastRowIndx
LastRowIndx=-1
OGRDS=createTileIndex("TileResult_0", TileIndexFieldName, Source_SRS,TileIndexDriverTyp)
yRange = list(range(1,ti.countTilesY+1))
xRange = list(range(1,ti.countTilesX+1))
for yIndex in yRange:
for xIndex in xRange:
offsetY=(yIndex-1)* ti.tileHeight
offsetX=(xIndex-1)* ti.tileWidth
if yIndex==ti.countTilesY:
height=ti.lastTileHeight
else:
height=ti.tileHeight
if xIndex==ti.countTilesX:
width=ti.lastTileWidth
else:
width=ti.tileWidth
if UseDirForEachRow :
tilename=getTileName(minfo,ti, xIndex, yIndex,0)
else:
tilename=getTileName(minfo,ti, xIndex, yIndex)
createTile(minfo, offsetX, offsetY, width, height,tilename,OGRDS)
if TileIndexName is not None:
if UseDirForEachRow and PyramidOnly == False:
shapeName=getTargetDir(0)+TileIndexName
else:
shapeName=getTargetDir()+TileIndexName
copyTileIndexToDisk(OGRDS,shapeName)
if CsvFileName is not None:
if UseDirForEachRow and PyramidOnly == False:
csvName=getTargetDir(0)+CsvFileName
else:
csvName=getTargetDir()+CsvFileName
copyTileIndexToCSV(OGRDS,csvName)
return OGRDS
def copyTileIndexToDisk(OGRDS, fileName):
SHAPEDS = createTileIndex(fileName, TileIndexFieldName, OGRDS.GetLayer().GetSpatialRef(), "ESRI Shapefile")
OGRDS.GetLayer().ResetReading()
while True:
feature = OGRDS.GetLayer().GetNextFeature()
if feature is None:
break
newFeature = feature.Clone()
basename = os.path.basename(feature.GetField(0))
if UseDirForEachRow :
t = os.path.split(os.path.dirname(feature.GetField(0)))
basename = t[1]+"/"+basename
newFeature.SetField(0,basename)
SHAPEDS.GetLayer().CreateFeature(newFeature)
closeTileIndex(SHAPEDS)
def copyTileIndexToCSV(OGRDS, fileName):
csvfile = open(fileName, 'w')
OGRDS.GetLayer().ResetReading()
while True:
feature = OGRDS.GetLayer().GetNextFeature()
if feature is None:
break
basename = os.path.basename(feature.GetField(0))
if UseDirForEachRow :
t = os.path.split(os.path.dirname(feature.GetField(0)))
basename = t[1]+"/"+basename
csvfile.write(basename);
geom = feature.GetGeometryRef()
coords = geom.GetEnvelope();
for i in range(len(coords)):
csvfile.write(CsvDelimiter)
csvfile.write("%f" % coords[i])
csvfile.write("\n");
csvfile.close()
def createPyramidTile(levelMosaicInfo, offsetX, offsetY, width, height,tileName,OGRDS):
sx= levelMosaicInfo.scaleX*2
sy= levelMosaicInfo.scaleY*2
dec = AffineTransformDecorator([levelMosaicInfo.ulx+offsetX*sx,sx,0,
levelMosaicInfo.uly+offsetY*sy,0,sy])
s_fh = levelMosaicInfo.getDataSet(dec.ulx,dec.uly+height*dec.scaleY,
dec.ulx+width*dec.scaleX,dec.uly)
if s_fh is None:
return
if OGRDS is not None:
points = dec.pointsFor(width, height)
addFeature(OGRDS, tileName, points[0], points[1])
if BandType is None:
bt=levelMosaicInfo.band_type
else:
bt=BandType
geotransform = [dec.ulx, dec.scaleX, 0,dec.uly,0,dec.scaleY]
bands = levelMosaicInfo.bands
if MemDriver is None:
t_fh = Driver.Create( tileName, width, height, bands,bt,CreateOptions)
else:
t_fh = MemDriver.Create( tileName, width, height, bands,bt)
if t_fh is None:
print('Creation failed, terminating gdal_tile.')
sys.exit( 1 )
t_fh.SetGeoTransform( geotransform )
t_fh.SetProjection( levelMosaicInfo.projection)
for band in range(1,bands+1):
t_band = t_fh.GetRasterBand( band )
if levelMosaicInfo.ct is not None:
t_band.SetRasterColorTable(levelMosaicInfo.ct)
t_band.SetRasterColorInterpretation(levelMosaicInfo.ci[band-1])
res = gdal.ReprojectImage(s_fh,t_fh,None,None,ResamplingMethod)
if res!=0:
print("Reprojection failed for %s, error %d" % (tileName,res))
sys.exit( 1 )
levelMosaicInfo.closeDataSet(s_fh);
if MemDriver is not None:
tt_fh = Driver.CreateCopy( tileName, t_fh, 0, CreateOptions )
if Verbose:
print(tileName + " : " + str(offsetX)+"|"+str(offsetY)+"-->"+str(width)+"-"+str(height))
def createTile( minfo, offsetX,offsetY,width,height, tilename,OGRDS):
"""
Create tile
return name of created tile
"""
if BandType is None:
bt=minfo.band_type
else:
bt=BandType
dec = AffineTransformDecorator([minfo.ulx,minfo.scaleX,0,minfo.uly,0,minfo.scaleY])
s_fh = minfo.getDataSet(dec.ulx+offsetX*dec.scaleX,dec.uly+offsetY*dec.scaleY+height*dec.scaleY,
dec.ulx+offsetX*dec.scaleX+width*dec.scaleX,
dec.uly+offsetY*dec.scaleY)
if s_fh is None:
return;
geotransform = [dec.ulx+offsetX*dec.scaleX, dec.scaleX, 0,
dec.uly+offsetY*dec.scaleY, 0,dec.scaleY]
if OGRDS is not None:
dec2 = AffineTransformDecorator(geotransform)
points = dec2.pointsFor(width, height)
addFeature(OGRDS, tilename, points[0], points[1])
bands = minfo.bands
if MemDriver is None:
t_fh = Driver.Create( tilename, width, height, bands,bt,CreateOptions)
else:
t_fh = MemDriver.Create( tilename, width, height, bands,bt)
if t_fh is None:
print('Creation failed, terminating gdal_tile.')
sys.exit( 1 )
t_fh.SetGeoTransform( geotransform )
if Source_SRS is not None:
t_fh.SetProjection( Source_SRS.ExportToWkt())
readX=min(s_fh.RasterXSize,width)
readY=min(s_fh.RasterYSize,height)
for band in range(1,bands+1):
s_band = s_fh.GetRasterBand( band )
t_band = t_fh.GetRasterBand( band )
if minfo.ct is not None:
t_band.SetRasterColorTable(minfo.ct)
# data = s_band.ReadRaster( offsetX,offsetY,width,height,width,height, t_band.DataType )
data = s_band.ReadRaster( 0,0,readX,readY,readX,readY, t_band.DataType )
t_band.WriteRaster( 0,0,readX,readY, data,readX,readY, t_band.DataType )
minfo.closeDataSet(s_fh);
if MemDriver is not None:
tt_fh = Driver.CreateCopy( tilename, t_fh, 0, CreateOptions )
if Verbose:
print(tilename + " : " + str(offsetX)+"|"+str(offsetY)+"-->"+str(width)+"-"+str(height))
def createTileIndex(dsName,fieldName,srs,driverName):
OGRDriver = ogr.GetDriverByName(driverName);
if OGRDriver is None:
print('ESRI Shapefile driver not found')
sys.exit( 1 )
OGRDataSource=OGRDriver.Open(dsName)
if OGRDataSource is not None:
OGRDataSource.Destroy()
OGRDriver.DeleteDataSource(dsName)
if Verbose:
print('truncating index '+ dsName)
OGRDataSource=OGRDriver.CreateDataSource(dsName)
if OGRDataSource is None:
print('Could not open datasource '+dsName)
sys.exit( 1 )
OGRLayer = OGRDataSource.CreateLayer("index", srs, ogr.wkbPolygon)
if OGRLayer is None:
print('Could not create Layer')
sys.exit( 1 )
OGRFieldDefn = ogr.FieldDefn(fieldName,ogr.OFTString)
if OGRFieldDefn is None:
print('Could not create FieldDefn for '+fieldName)
sys.exit( 1 )
OGRFieldDefn.SetWidth(256)
if OGRLayer.CreateField(OGRFieldDefn) != 0:
print('Could not create Field for '+fieldName)
sys.exit( 1 )
return OGRDataSource
def addFeature(OGRDataSource,location,xlist,ylist):
OGRLayer=OGRDataSource.GetLayer();
OGRFeature = ogr.Feature(OGRLayer.GetLayerDefn())
if OGRFeature is None:
print('Could not create Feature')
sys.exit( 1 )
OGRFeature.SetField(TileIndexFieldName,location);
wkt = 'POLYGON ((%f %f,%f %f,%f %f,%f %f,%f %f ))' % (xlist[0],ylist[0],
xlist[1],ylist[1],xlist[2],ylist[2],xlist[3],ylist[3],xlist[0],ylist[0])
OGRGeometry=ogr.CreateGeometryFromWkt(wkt,OGRLayer.GetSpatialRef())
if (OGRGeometry is None):
print('Could not create Geometry')
sys.exit( 1 )
OGRFeature.SetGeometryDirectly(OGRGeometry)
OGRLayer.CreateFeature(OGRFeature)
OGRFeature.Destroy()
def closeTileIndex(OGRDataSource):
OGRDataSource.Destroy()
def buildPyramid(minfo,createdTileIndexDS,tileWidth, tileHeight):
global LastRowIndx
inputDS=createdTileIndexDS
for level in range(1,Levels+1):
LastRowIndx = -1
levelMosaicInfo = mosaic_info(minfo.filename,inputDS)
levelOutputTileInfo = tile_info(levelMosaicInfo.xsize/2,levelMosaicInfo.ysize/2,tileWidth,tileHeight)
inputDS=buildPyramidLevel(levelMosaicInfo,levelOutputTileInfo,level)
def buildPyramidLevel(levelMosaicInfo,levelOutputTileInfo, level):
yRange = list(range(1,levelOutputTileInfo.countTilesY+1))
xRange = list(range(1,levelOutputTileInfo.countTilesX+1))
OGRDS=createTileIndex("TileResult_"+str(level), TileIndexFieldName, Source_SRS,TileIndexDriverTyp)
for yIndex in yRange:
for xIndex in xRange:
offsetY=(yIndex-1)* levelOutputTileInfo.tileHeight
offsetX=(xIndex-1)* levelOutputTileInfo.tileWidth
if yIndex==levelOutputTileInfo.countTilesY:
height=levelOutputTileInfo.lastTileHeight
else:
height=levelOutputTileInfo.tileHeight
if xIndex==levelOutputTileInfo.countTilesX:
width=levelOutputTileInfo.lastTileWidth
else:
width=levelOutputTileInfo.tileWidth
tilename=getTileName(levelMosaicInfo,levelOutputTileInfo, xIndex, yIndex,level)
createPyramidTile(levelMosaicInfo, offsetX, offsetY, width, height,tilename,OGRDS)
if TileIndexName is not None:
shapeName=getTargetDir(level)+TileIndexName
copyTileIndexToDisk(OGRDS,shapeName)
if CsvFileName is not None:
csvName=getTargetDir(level)+CsvFileName
copyTileIndexToCSV(OGRDS,csvName)
return OGRDS
def getTileName(minfo,ti,xIndex,yIndex,level = -1):
"""
creates the tile file name
"""
global LastRowIndx
max = ti.countTilesX
if (ti.countTilesY > max):
max=ti.countTilesY
countDigits= len(str(max))
parts=os.path.splitext(os.path.basename(minfo.filename))
if parts[0][0]=="@" : #remove possible leading "@"
parts = ( parts[0][1:len(parts[0])], parts[1])
if UseDirForEachRow :
format=getTargetDir(level)+str(yIndex)+os.sep+parts[0]+"_%0"+str(countDigits)+"i"+"_%0"+str(countDigits)+"i"
#See if there was a switch in the row, if so then create new dir for row.
if LastRowIndx < yIndex :
LastRowIndx = yIndex
if (os.path.exists(getTargetDir(level)+str(yIndex)) == False) :
os.mkdir(getTargetDir(level)+str(yIndex))
else:
format=getTargetDir(level)+parts[0]+"_%0"+str(countDigits)+"i"+"_%0"+str(countDigits)+"i"
#Check for the extension that should be used.
if Extension is None:
format=format+parts[1]
else:
format=format+"."+Extension
return format % (yIndex,xIndex)
def UsageFormat():
print('Valid formats:')
count = gdal.GetDriverCount()
for index in range(count):
driver= gdal.GetDriver(index)
print(driver.ShortName)
# =============================================================================
def Usage():
print('Usage: gdal_retile.py ')
print(' [-v] [-co NAME=VALUE]* [-of out_format]')
print(' [-ps pixelWidth pixelHeight]')
print(' [-ot {Byte/Int16/UInt16/UInt32/Int32/Float32/Float64/')
print(' CInt16/CInt32/CFloat32/CFloat64}]')
print(' [ -tileIndex tileIndexName [-tileIndexField fieldName]]')
print(' [ -csv fileName [-csvDelim delimiter]]')
print(' [-s_srs srs_def] [-pyramidOnly] -levels numberoflevels')
print(' [-r {near/bilinear/cubic/cubicspline/lanczos}]')
print(' [-useDirForEachRow]')
print(' -targetDir TileDirectory input_files')
# =============================================================================
# =============================================================================
#
# Program mainline.
#
def main(args = None):
global Verbose
global CreateOptions
global Names
global TileWidth
global TileHeight
global Format
global BandType
global Driver
global Extension
global MemDriver
global TileIndexFieldName
global TileIndexName
global CsvDelimiter
global CsvFileName
global TileIndexDriverTyp
global Source_SRS
global TargetDir
global ResamplingMethod
global Levels
global PyramidOnly
global UseDirForEachRow
gdal.AllRegister()
if args is None:
args = sys.argv
argv = gdal.GeneralCmdLineProcessor( args )
if argv is None:
return 1
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-of':
i+=1
Format = argv[i]
elif arg == '-ot':
i+=1
BandType = gdal.GetDataTypeByName( argv[i] )
if BandType == gdal.GDT_Unknown:
print('Unknown GDAL data type: %s' % argv[i])
return 1
elif arg == '-co':
i+=1
CreateOptions.append( argv[i] )
elif arg == '-v':
Verbose = True
elif arg == '-targetDir':
i+=1
TargetDir=argv[i]
if os.path.exists(TargetDir)==False:
print("TargetDir " + TargetDir + " does not exist")
return 1
if TargetDir[len(TargetDir)-1:] != os.sep:
TargetDir = TargetDir+os.sep
elif arg == '-ps':
i+=1
TileWidth=int(argv[i])
i+=1
TileHeight=int(argv[i])
elif arg == '-r':
i+=1
ResamplingMethodString=argv[i]
if ResamplingMethodString=="near":
ResamplingMethod=GRA_NearestNeighbour
elif ResamplingMethodString=="bilinear":
ResamplingMethod=GRA_Bilinear
elif ResamplingMethodString=="cubic":
ResamplingMethod=GRA_Cubic
elif ResamplingMethodString=="cubicspline":
ResamplingMethod=GRA_CubicSpline
elif ResamplingMethodString=="lanczos":
ResamplingMethod=GRA_Lanczos
else:
print("Unknown resampling method: %s" % ResamplingMethodString)
return 1
elif arg == '-levels':
i+=1
Levels=int(argv[i])
if Levels<1:
print("Invalid number of levels : %d" % Levels)
return 1
elif arg == '-s_srs':
i+=1
Source_SRS = osr.SpatialReference()
if Source_SRS.SetFromUserInput( argv[i] ) != 0:
print('invalid -s_srs: ' + argv[i]);
return 1;
elif arg == "-pyramidOnly":
PyramidOnly=True
elif arg == '-tileIndex':
i+=1
TileIndexName=argv[i]
parts=os.path.splitext(TileIndexName)
if len(parts[1])==0:
TileIndexName+=".shp"
elif arg == '-tileIndexField':
i+=1
TileIndexFieldName=argv[i]
elif arg == '-csv':
i+=1
CsvFileName=argv[i]
parts=os.path.splitext(CsvFileName)
if len(parts[1])==0:
CsvFileName+=".csv"
elif arg == '-csvDelim':
i+=1
CsvDelimiter=argv[i]
elif arg == '-useDirForEachRow':
UseDirForEachRow=True
elif arg[:1] == '-':
print('Unrecognised command option: %s' % arg)
Usage()
return 1
else:
Names.append( arg )
i+=1
if len(Names) == 0:
print('No input files selected.')
Usage()
return 1
if (TileWidth==0 or TileHeight==0):
print("Invalid tile dimension %d,%d" % (TileWidth,TileHeight))
return 1
if (TargetDir is None):
print("Missing Directory for Tiles -targetDir")
Usage()
return 1
# create level 0 directory if needed
if(UseDirForEachRow and PyramidOnly==False) :
leveldir=TargetDir+str(0)+os.sep
if (os.path.exists(leveldir)==False):
os.mkdir(leveldir)
if Levels > 0: #prepare Dirs for pyramid
startIndx=1
for levelIndx in range (startIndx,Levels+1):
leveldir=TargetDir+str(levelIndx)+os.sep
if (os.path.exists(leveldir)):
continue
os.mkdir(leveldir)
if (os.path.exists(leveldir)==False):
print("Cannot create level dir: %s" % leveldir)
return 1
if Verbose :
print("Created level dir: %s" % leveldir)
Driver = gdal.GetDriverByName(Format)
if Driver is None:
print('Format driver %s not found, pick a supported driver.' % Format)
UsageFormat()
return 1
DriverMD = Driver.GetMetadata()
Extension=DriverMD.get(DMD_EXTENSION);
if 'DCAP_CREATE' not in DriverMD:
MemDriver=gdal.GetDriverByName("MEM")
tileIndexDS=getTileIndexFromFiles(Names,TileIndexDriverTyp)
if tileIndexDS is None:
print("Error building tile index")
return 1;
minfo = mosaic_info(Names[0],tileIndexDS)
ti=tile_info(minfo.xsize,minfo.ysize, TileWidth, TileHeight)
if Source_SRS is None and len(minfo.projection) > 0 :
Source_SRS = osr.SpatialReference()
if Source_SRS.SetFromUserInput( minfo.projection ) != 0:
print('invalid projection ' + minfo.projection);
return 1
if Verbose:
minfo.report()
ti.report()
if PyramidOnly==False:
dsCreatedTileIndex = tileImage(minfo,ti)
tileIndexDS.Destroy()
else:
dsCreatedTileIndex=tileIndexDS
if Levels>0:
buildPyramid(minfo,dsCreatedTileIndex,TileWidth, TileHeight)
if Verbose:
print("FINISHED")
return 0
def initGlobals():
""" Only used for unit tests """
global Verbose
global CreateOptions
global Names
global TileWidth
global TileHeight
global Format
global BandType
global Driver
global Extension
global MemDriver
global TileIndexFieldName
global TileIndexName
global TileIndexDriverTyp
global CsvDelimiter
global CsvFileName
global Source_SRS
global TargetDir
global ResamplingMethod
global Levels
global PyramidOnly
global LastRowIndx
global UseDirForEachRow
Verbose=False
CreateOptions = []
Names=[]
TileWidth=256
TileHeight=256
Format='GTiff'
BandType = None
Driver=None
Extension=None
MemDriver=None
TileIndexFieldName='location'
TileIndexName=None
TileIndexDriverTyp="Memory"
CsvDelimiter=";"
CsvFileName=None
Source_SRS=None
TargetDir=None
ResamplingMethod=GRA_NearestNeighbour
Levels=0
PyramidOnly=False
LastRowIndx=-1
UseDirForEachRow=False
#global vars
Verbose=False
CreateOptions = []
Names=[]
TileWidth=256
TileHeight=256
Format='GTiff'
BandType = None
Driver=None
Extension=None
MemDriver=None
TileIndexFieldName='location'
TileIndexName=None
TileIndexDriverTyp="Memory"
CsvDelimiter=";"
CsvFileName=None
Source_SRS=None
TargetDir=None
ResamplingMethod=GRA_NearestNeighbour
Levels=0
PyramidOnly=False
LastRowIndx=-1
UseDirForEachRow=False
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit | -6,382,919,717,801,504,000 | 29.835165 | 116 | 0.610834 | false |
cysuncn/python | spark/crm/PROC_A_ANALYSIS_INFO.py | 1 | 3852 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_A_ANALYSIS_INFO').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
OCRM_F_CI_CUST_DESC = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_DESC/*')
OCRM_F_CI_CUST_DESC.registerTempTable("OCRM_F_CI_CUST_DESC")
ACRM_F_CI_CUST_CONTRIBUTION = sqlContext.read.parquet(hdfs+'/ACRM_F_CI_CUST_CONTRIBUTION/*')
ACRM_F_CI_CUST_CONTRIBUTION.registerTempTable("ACRM_F_CI_CUST_CONTRIBUTION")
ACRM_F_CI_LOYALTY_INFO = sqlContext.read.parquet(hdfs+'/ACRM_F_CI_LOYALTY_INFO/*')
ACRM_F_CI_LOYALTY_INFO.registerTempTable("ACRM_F_CI_LOYALTY_INFO")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT A.CUST_ID AS CUST_ID
,CAST(SUM(B.CUST_CONTRIBUTION) AS DECIMAL(24,6) ) AS CUST_CONTRIBUTION
,CAST(MAX(C.LOYA_SCORE) AS DECIMAL(22,2)) AS LOYA_SCORE
,CAST('' AS DECIMAL(22,2)) AS MONTH_TOTAL_INT
,CAST('' AS DECIMAL(22,2)) AS CUST_TOTAL_INT
,CAST('' AS DECIMAL(22,2)) AS MONTH_COST_INT
,CAST('' AS DECIMAL(22,2)) AS CUST_TOTAL_COST
,CAST('' AS DECIMAL(22,2)) AS CUST_USABLE_INT
,A.OBJ_RATING AS OBJ_RATING
,A.SUB_RATING AS SUB_RATING
,CAST('' AS DECIMAL(22,2)) AS ALERT_SCORE
,CAST('' AS DECIMAL(22,2)) AS LOST_SCORE
,CAST('' AS DECIMAL(22,2)) AS LIFE_CYCLE
,V_DT AS ODS_ST_DATE
,A.FR_ID AS FR_ID
FROM OCRM_F_CI_CUST_DESC A --统一客户信息
LEFT JOIN ACRM_F_CI_CUST_CONTRIBUTION B --
ON A.CUST_ID = B.CUST_ID
AND A.FR_ID = B.FR_ID
AND B.ODS_DATE = V_DT
LEFT JOIN ACRM_F_CI_LOYALTY_INFO C --
ON A.CUST_ID = C.CUST_ID
AND A.FR_ID = C.FR_ID
AND C.ETL_DATE = V_DT
GROUP BY A.CUST_ID
,A.OBJ_RATING
,A.SUB_RATING
,A.FR_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
ACRM_A_ANALYSIS_INFO = sqlContext.sql(sql)
ACRM_A_ANALYSIS_INFO.registerTempTable("ACRM_A_ANALYSIS_INFO")
dfn="ACRM_A_ANALYSIS_INFO/"+V_DT+".parquet"
ACRM_A_ANALYSIS_INFO.cache()
nrows = ACRM_A_ANALYSIS_INFO.count()
ACRM_A_ANALYSIS_INFO.write.save(path=hdfs + '/' + dfn, mode='overwrite')
ACRM_A_ANALYSIS_INFO.unpersist()
OCRM_F_CI_CUST_DESC.unpersist()
ACRM_F_CI_CUST_CONTRIBUTION.unpersist()
ACRM_F_CI_LOYALTY_INFO.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/ACRM_A_ANALYSIS_INFO/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert ACRM_A_ANALYSIS_INFO lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
| gpl-3.0 | -7,797,813,515,775,515,000 | 42.953488 | 172 | 0.573545 | false |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/celery/task/__init__.py | 6 | 1743 | # -*- coding: utf-8 -*-
"""
celery.task
~~~~~~~~~~~
This is the old task module, it should not be used anymore,
import from the main 'celery' module instead.
If you're looking for the decorator implementation then that's in
``celery.app.base.Celery.task``.
"""
from __future__ import absolute_import
from celery._state import current_app, current_task as current
from celery.five import LazyModule, recreate_module
from celery.local import Proxy
__all__ = [
'BaseTask', 'Task', 'PeriodicTask', 'task', 'periodic_task',
'group', 'chord', 'subtask', 'TaskSet',
]
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK: # pragma: no cover
# This is never executed, but tricks static analyzers (PyDev, PyCharm,
# pylint, etc.) into knowing the types of these symbols, and what
# they contain.
from celery.canvas import group, chord, subtask
from .base import BaseTask, Task, PeriodicTask, task, periodic_task
from .sets import TaskSet
class module(LazyModule):
def __call__(self, *args, **kwargs):
return self.task(*args, **kwargs)
old_module, new_module = recreate_module( # pragma: no cover
__name__,
by_module={
'celery.task.base': ['BaseTask', 'Task', 'PeriodicTask',
'task', 'periodic_task'],
'celery.canvas': ['group', 'chord', 'subtask'],
'celery.task.sets': ['TaskSet'],
},
base=module,
__package__='celery.task',
__file__=__file__,
__path__=__path__,
__doc__=__doc__,
current=current,
discard_all=Proxy(lambda: current_app.control.purge),
backend_cleanup=Proxy(
lambda: current_app.tasks['celery.backend_cleanup']
),
)
| agpl-3.0 | 6,246,125,139,671,379,000 | 28.542373 | 74 | 0.624785 | false |
KaranToor/MA450 | google-cloud-sdk/lib/surface/compute/url_maps/add_host_rule.py | 3 | 4320 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for adding a host rule to a URL map."""
import copy
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.command_lib.compute.url_maps import flags
class AddHostRule(base_classes.ReadWriteCommand):
"""Add a rule to a URL map to map hosts to a path matcher."""
URL_MAP_ARG = None
@classmethod
def Args(cls, parser):
cls.URL_MAP_ARG = flags.UrlMapArgument()
cls.URL_MAP_ARG.AddArgument(parser)
parser.add_argument(
'--description',
help='An optional, textual description for the host rule.')
hosts = parser.add_argument(
'--hosts',
type=arg_parsers.ArgList(min_length=1),
metavar='HOST',
required=True,
help='The set of hosts to match requests against.')
hosts.detailed_help = """\
The set of hosts to match requests against. Each host must be
a fully qualified domain name (FQDN) with the exception that
the host can begin with a ``*'' or ``*-''. ``*'' acts as a
glob and will match any string of atoms to the left where an
atom is separated by dots (``.'') or dashes (``-'').
"""
path_matcher = parser.add_argument(
'--path-matcher-name',
required=True,
help=('The name of the patch matcher to use if a request matches this '
'host rule.'))
path_matcher.detailed_help = """\
The name of the patch matcher to use if a request matches this
host rule. The patch matcher must already exist in the URL map
(see `gcloud compute url-maps add-path-matcher`).
"""
@property
def service(self):
return self.compute.urlMaps
@property
def resource_type(self):
return 'urlMaps'
def CreateReference(self, args):
return self.URL_MAP_ARG.ResolveAsResource(args, self.resources)
def GetGetRequest(self, args):
"""Returns the request for the existing URL map resource."""
return (self.service,
'Get',
self.messages.ComputeUrlMapsGetRequest(
urlMap=self.ref.Name(),
project=self.project))
def GetSetRequest(self, args, replacement, existing):
return (self.service,
'Update',
self.messages.ComputeUrlMapsUpdateRequest(
urlMap=self.ref.Name(),
urlMapResource=replacement,
project=self.project))
def Modify(self, args, existing):
"""Returns a modified URL map message."""
replacement = copy.deepcopy(existing)
new_host_rule = self.messages.HostRule(
description=args.description,
hosts=sorted(args.hosts),
pathMatcher=args.path_matcher_name)
replacement.hostRules.append(new_host_rule)
return replacement
AddHostRule.detailed_help = {
'brief': 'Add a rule to a URL map to map hosts to a path matcher',
'DESCRIPTION': """\
*{command}* is used to add a mapping of hosts to a patch
matcher in a URL map. The mapping will match the host
component of HTTP requests to path matchers which in turn map
the request to a backend service. Before adding a host rule,
at least one path matcher must exist in the URL map to take
care of the path component of the requests.
`gcloud compute url-maps add-path-matcher` or
`gcloud compute url-maps edit` can be used to add path matchers.
""",
'EXAMPLES': """\
To create a host rule mapping the ```*-foo.google.com``` and
```google.com``` hosts to the ```www``` path matcher, run:
$ {command} MY-URL-MAP --hosts '*-foo.google.com,google.com' --path-matcher-name www
""",
}
| apache-2.0 | -3,303,266,660,240,219,600 | 34.409836 | 94 | 0.654861 | false |
alfanugraha/LUMENS-repo | processing/GroupStats/help/source/conf.py | 2 | 7034 | # -*- coding: utf-8 -*-
#
# groupstats documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 12 17:11:03 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'groupstats'
copyright = u'2012, Rayo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.900'
# The full version, including alpha/beta/rc tags.
release = '1.900'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'templateclassdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'groupstats.tex', u'groupstats Documentation',
u'Rayo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'templateclass', u'groupstats Documentation',
[u'Rayo'], 1)
]
| gpl-2.0 | 8,135,983,765,758,161,000 | 31.564815 | 80 | 0.708985 | false |
maddox/home-assistant | tests/components/automation/test_event.py | 7 | 2561 | """
tests.test_component_demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests demo component.
"""
import unittest
import homeassistant.core as ha
import homeassistant.components.automation as automation
import homeassistant.components.automation.event as event
from homeassistant.const import CONF_PLATFORM
class TestAutomationEvent(unittest.TestCase):
""" Test the event automation. """
def setUp(self): # pylint: disable=invalid-name
self.hass = ha.HomeAssistant()
self.calls = []
def record_call(service):
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_fails_setup_if_no_event_type(self):
self.assertFalse(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'event',
automation.CONF_SERVICE: 'test.automation'
}
}))
def test_if_fires_on_event(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'event',
event.CONF_EVENT_TYPE: 'test_event',
automation.CONF_SERVICE: 'test.automation'
}
}))
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_event_with_data(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'event',
event.CONF_EVENT_TYPE: 'test_event',
event.CONF_EVENT_DATA: {'some_attr': 'some_value'},
automation.CONF_SERVICE: 'test.automation'
}
}))
self.hass.bus.fire('test_event', {'some_attr': 'some_value'})
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_not_fires_if_event_data_not_matches(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'event',
event.CONF_EVENT_TYPE: 'test_event',
event.CONF_EVENT_DATA: {'some_attr': 'some_value'},
automation.CONF_SERVICE: 'test.automation'
}
}))
self.hass.bus.fire('test_event', {'some_attr': 'some_other_value'})
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
| mit | -6,792,689,384,429,014,000 | 31.833333 | 75 | 0.582585 | false |
didrocks/snapcraft | snapcraft/plugins/nil.py | 13 | 1196 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The nil plugin is useful for parts with no source.
Using this, parts can be defined purely by utilizing properties automatically
included by Snapcraft, e.g. stage-packages.
"""
import snapcraft
class NilPlugin(snapcraft.BasePlugin):
@classmethod
def schema(cls):
return {
'$schema': 'http://json-schema.org/draft-04/schema#',
'type': 'object',
'additionalProperties': False,
'properties': {},
}
def enable_cross_compilation(self):
pass
| gpl-3.0 | -8,811,879,237,259,613,000 | 30.473684 | 77 | 0.695652 | false |
awacha/credolib | credolib/io.py | 1 | 4915 | __all__ = ['load_headers', 'getsascurve', 'getsasexposure', 'getheaders', 'getdists', 'filter_headers', 'load_exposure',
'load_mask']
from typing import List, Tuple, Union
import numpy as np
from IPython.core.getipython import get_ipython
from sastool.classes2.curve import Curve
from sastool.classes2.exposure import Exposure
from sastool.classes2.header import Header
from sastool.classes2.loader import Loader
def filter_headers(criterion):
"""Filter already loaded headers against some criterion.
The criterion function must accept a single argument, which is an instance
of sastool.classes2.header.Header, or one of its subclasses. The function
must return True if the header is to be kept or False if it needs to be
discarded. All manipulations on the header (including sample name changes,
etc.) carried out by this function are preserved.
"""
ip = get_ipython()
for headerkind in ['processed', 'raw']:
for h in ip.user_ns['_headers'][headerkind][:]:
if not criterion(h):
ip.user_ns['_headers'][headerkind].remove(h)
ip.user_ns['allsamplenames'] = {h.title for h in ip.user_ns['_headers']['processed']}
def load_headers(fsns:List[int]):
"""Load header files
"""
ip = get_ipython()
ip.user_ns['_headers'] = {}
for type_ in ['raw', 'processed']:
print("Loading %d headers (%s)" % (len(fsns), type_), flush=True)
processed = type_ == 'processed'
headers = []
for f in fsns:
for l in [l_ for l_ in ip.user_ns['_loaders'] if l_.processed == processed]:
try:
headers.append(l.loadheader(f))
break
except FileNotFoundError:
continue
allsamplenames = {h.title for h in headers}
if not headers:
print('NO HEADERS READ FOR TYPE "%s"' % type_)
else:
print("%d headers (%s) out of %d have been loaded successfully." % (len(headers), type_, len(fsns)))
print('Read FSN range:', min([h.fsn for h in headers]), 'to', max([h.fsn for h in headers]))
print("Samples covered by these headers:")
print(" " + "\n ".join(sorted(allsamplenames)), flush=True)
if processed:
ip.user_ns['allsamplenames'] = allsamplenames
ip.user_ns['_headers'][type_] = headers
def getsascurve(samplename:str, dist=None) -> Tuple[Curve, Union[float, str]]:
ip = get_ipython()
if dist == 'united':
data1d = ip.user_ns['_data1dunited'][samplename]
elif dist is None:
try:
data1d = ip.user_ns['_data1dunited'][samplename]
dist = 'united'
except KeyError:
data1d = ip.user_ns['_data1d'][samplename]
dist = sorted(data1d.keys())[0]
data1d = data1d[dist]
else:
data1d = ip.user_ns['_data1d'][samplename]
dist = sorted(list(data1d.keys()), key=lambda k:abs(float(dist) - k))[0]
data1d = data1d[dist]
return data1d, dist
def getsasexposure(samplename, dist=None) -> Tuple[Curve, float]:
ip = get_ipython()
if dist is None:
data2d = ip.user_ns['_data2d'][samplename]
dist = sorted(data2d.keys())[0]
data2d = data2d[dist]
else:
data2d = ip.user_ns['_data2d'][samplename]
dist = sorted(list(data2d.keys()), key=lambda k:abs(float(dist) - k))[0]
data2d = data2d[dist]
return data2d, dist
def getheaders(processed=True) -> List[Header]:
ip = get_ipython()
if processed:
return ip.user_ns['_headers']['processed']
else:
return ip.user_ns['_headers']['raw']
def getdists(samplename) -> List[float]:
ip = get_ipython()
return sorted([d for d in ip.user_ns['_headers_sample'][samplename]])
def get_different_distances(headers, tolerance=2) -> List[float]:
alldists = {float(h.distance) for h in headers}
dists = []
for d in alldists:
if [d_ for d_ in dists if abs(d - d_) < tolerance]:
continue
dists.append(d)
return sorted(dists)
def load_exposure(fsn:int, raw=True, processed=True) -> Exposure:
ip = get_ipython()
for l in ip.user_ns['_loaders']:
assert isinstance(l, Loader)
if l.processed and not processed:
continue
if not l.processed and not raw:
continue
try:
return l.loadexposure(fsn)
except (OSError, ValueError):
continue
raise FileNotFoundError('Cannot find exposure for fsn #{:d}'.format(fsn))
def load_mask(maskname: str) -> np.ndarray:
ip = get_ipython()
for l in ip.user_ns['_loaders']:
assert isinstance(l, Loader)
try:
return l.loadmask(maskname)
except OSError:
continue
raise FileNotFoundError('Cannot load mask file {}'.format(maskname))
| bsd-3-clause | -4,738,564,507,141,617,000 | 36.519084 | 120 | 0.603052 | false |
juliandev/SIFTER | bin/scripts/sifter_prepare.py | 2 | 35833 | #!/usr/bin/python
import _mysql as mysql
import _mysql_exceptions as mysql_exceptions
import MySQLdb.cursors
import os
import pickle
import numpy as np
import sys
import StringIO
from scipy.misc import comb
import getopt
import csv
import re
import subprocess
import random
import glob
def usage():
print "\n-----------------------------------------------------------------"
print "Usage: "
print " sifter_prepare.py [options] <families_data_folder> <output_folder>"
print "-----------------------------------------------------------------\n"
print "Examples:"
print " sifter_prepare.py -p C0JYY2_HUMAN ../example/fam_data ../example/queries\n"
print " sifter_prepare.py -s 9823 ../example/fam_data ../example/queries\n"
print " sifter_prepare.py -f PF03818 -a ../example/fam_data ../example/queries\n"
print " sifter_prepare.py --ip ../example/protein_list.txt -r ../example/fam_data ../example/queries\n"
print " sifter_prepare.py --if ../example/family_list.txt ../example/fam_data ../example/queries\n"
print " sifter_prepare.py -p C0JYY2_HUMAN -x 1e5 ../example/fam_data ../example/queries\n"
print " sifter_prepare.py -p C0JYY2_HUMAN -t 2 ../example/fam_data ../example/queries\n"
print " sifter_prepare.py -s 9823 --dbaddr www.example.org --dbuser jack --dbpass 1234 ../example/fam_data ../example/queries\n"
print " sifter_prepare.py -A --hit_file ../example/pfam_res.txt ../example/fam_data ../example/queries\n"
print "This function prepares necessary files for your query to run SIFTER on."
print "@author Sayed Mohammad Ebrahim Sahraeian ([email protected])"
print "Please cite new paper:"
print "-Sahraeian SME, Luo KR, Brenner SE (2015)"
print "\nThe SIFTER algorithm presented in the following paper:"
print "- Engelhardt BE, Jordan MI, Srouji JR, Brenner SE. 2011. Genome-scale phylogenetic function annotation of large and diverse protein families. Genome Research 21:1969-1980. \n"
print "inputs:"
print " <families_data_folder> Path to the folder where the"
print " families data are placed. You can"
print " download the precomputed data"
print " or build it using the"
print " 'sifter_gather_family_data.py' script."
print " <output_folder> Path to the output folder where"
print " the necessary query files and"
print " results will be written to."
print "options: (you should only use one of '-p -s -f --ip -A' options.)"
print " -p STRING List of query proteins (use Uniprot ID"
print " or Accession) in comma seperated format."
print " -s STRING NCBI taxonomy ID for input species."
print " -f STRING List of Pfam families for which you"
print " want to prepare data."
print " (in comma seperated format)"
print " --ip STRING Path to the input file where the list"
print " of proteins are placed."
print " --if STRING Path to the input file where the list"
print " of families are placed."
print " --hit_file STRING Output of pfam_scan.pl file on the "
print " novel genome. This file consists of"
print " the list of pfam hits for the genome."
print " If this option is uded, we will"
print " look in this file to find Pfams"
print " instead of the SQL database."
print " -A Prepare for all Pfam families of queried"
print " novel genome. (hit_file should be provided)"
print " -a Include all experimental and"
print " non-experimental evidence"
print " in the inference. (Defualt [if"
print " this option is not used]: only"
print " experimental evidence will be used)."
print " -r Remove all query files already prepared"
print " and rebuild the queries."
print " -x INT Maximum number of nonzero elements"
print " in the transition matrix. Should be"
print " a number in [1e5,1e7] for reasonable"
print " time and accuracy balance (Default=2250000)"
print " Smaller value leads to faster running time."
print " -t INT Number of functions to truncate"
print " to in approximation [Default:"
print " adaptive based on -x option]"
print " Smaller value leads to faster running time."
print " --dbaddr STRING Address of the MySQL database that"
print " has neccessary data for SIFTER"
print " [Default: localhost]"
print " --dbname STRING Name of the MySQL database that"
print " has neccessary data for SIFTER"
print " [Default: sifter_db]"
print " --dbuser STRING Name of the MySQL database that"
print " has neccessary data for SIFTER"
print " [Default: root]"
print " --dbpass STRING Password of the user for the MySQL"
print " database that has neccessary data"
print " for SIFTER [Default: '']"
print " -h Help. Print Usage."
def msql(query, db):
c = db.cursor()
c.execute(query)
results = c.fetchall()
c.close()
return results
def find_pfam_for_genes(genes):
sql="""SELECT
pfamseq.pfamseq_acc,
pfamseq.pfamseq_id,
pfamA.pfamA_acc
FROM pfamseq
INNER JOIN pfamA_reg_full_significant on (pfamA_reg_full_significant.auto_pfamseq=pfamseq.auto_pfamseq)
INNER JOIN pfamA on (pfamA.auto_pfamA=pfamA_reg_full_significant.auto_pfamA)
WHERE
pfamseq.pfamseq_acc in ('%s')
OR pfamseq.pfamseq_id in ('%s')
"""%("','".join(genes),"','".join(genes))
seq_anns = msql(sql, db_mysql)
return seq_anns
def find_pfam_for_taxid(taxid):
sql="""SELECT
pfamseq.pfamseq_acc,
pfamseq.pfamseq_id,
pfamA.pfamA_acc
FROM pfamseq
INNER JOIN pfamA_reg_full_significant on (pfamA_reg_full_significant.auto_pfamseq=pfamseq.auto_pfamseq)
INNER JOIN pfamA on (pfamA.auto_pfamA=pfamA_reg_full_significant.auto_pfamA)
WHERE
pfamseq.ncbi_taxid = '%s'
"""%(taxid)
seq_anns = msql(sql, db_mysql)
return seq_anns
def find_genes_for_pfams(pfam_ids):
sql="""SELECT
pfamseq.pfamseq_acc,
pfamA.pfamA_acc
FROM pfamseq
INNER JOIN pfamA_reg_full_significant on (pfamA_reg_full_significant.auto_pfamseq=pfamseq.auto_pfamseq)
INNER JOIN pfamA on (pfamA.auto_pfamA=pfamA_reg_full_significant.auto_pfamA)
WHERE
pfamA.pfamA_acc in ('%s')
"""%("','".join(pfam_ids))
seq_anns = msql(sql, db_mysql)
return seq_anns
def find_pfam_2_gene(res):
pfam_2_gene={}
gene_2_pfam={}
for w in res:
my_pfam=w['pfamA_acc']
my_gene=w['pfamseq_acc']
if my_pfam not in pfam_2_gene:
pfam_2_gene[my_pfam]=set([])
pfam_2_gene[my_pfam].add(my_gene)
if my_gene not in gene_2_pfam:
gene_2_pfam[my_gene]=set([])
gene_2_pfam[my_gene].add(my_pfam)
return pfam_2_gene,gene_2_pfam
def find_pfam_2_gene_from_file(hit_file):
pfam_2_gene={}
gene_2_pfam={}
with open(hit_file, 'rb') as infile:
for line in infile:
line=line.strip()
if not line:
continue
if len(line)<3:
continue
if line[0]=="#" and not line[2]=="<":
continue
if line[0]=="#" and line[2]=="<":
keys=line.split('> <')
keys[0]=keys[0].split('<')[1]
keys[-1]=keys[-1].split('>')[0]
continue
row=line.split()
if not len(row)==15:
print "ERR"
break
r={k:row[i] for i,k in enumerate(keys)}
if r['significance']=='1':
pfam_id=r['hmm acc'][0:r['hmm acc'].find('.')]
my_gene=r['seq id']
if pfam_id not in pfam_2_gene.keys():
pfam_2_gene[pfam_id]=set([])
pfam_2_gene[pfam_id].add(my_gene)
if my_gene not in gene_2_pfam.keys():
gene_2_pfam[my_gene]=set([])
gene_2_pfam[my_gene].add(pfam_id)
print "Your queried novel genome has:"
print len(gene_2_pfam), "genes in pfam"
print len(pfam_2_gene), "pfam families\n"
return pfam_2_gene,gene_2_pfam
# See how many sequences are in each family, add it to pfds
def get_pfds(pfams):
pfds={}
for p in pfams:
sql_q="""select pfamA.num_full, pfamA.number_species, pfamA.pfamA_id, pfamA.description, group_concat(go_id) as go_ids, group_concat(term) as go_terms from pfamA left join gene_ontology on gene_ontology.auto_pfamA = pfamA.auto_pfamA where pfamA.pfamA_acc='%s' group by pfamA_acc
"""%(p)
#AND Locus.type=1
#AND Synonym.type=2
r = msql(sql_q, db_mysql)
if r:
pfds[p]={}
for w in r[0].keys():
pfds[p][w]=r[0][w]
return pfds
# ##Process Evidence
def parse_GO_OBO(obo_file):
go_dict={}
new_is_comming=-1
with open(obo_file, "r") as infile:
currentGOTerm = None
for line in infile:
line = line.strip()
if not line: continue #Skip empty
if new_is_comming==1:
key, sep, val = line.partition(":")
key=key.strip()
val=val.strip()
currentGOTerm=val
go_dict[currentGOTerm]={}
new_is_comming=0
continue
if line == "[Term]":
new_is_comming=1
elif line == "[Typedef]":
#Skip [Typedef sections]
new_is_comming=-1
elif new_is_comming==0:
#Only process if we're inside a [Term] environment
key, sep, val = line.partition(":")
key=key.strip()
val=val.strip()
if key not in go_dict[currentGOTerm]:
go_dict[currentGOTerm][key]=[]
go_dict[currentGOTerm][key].append(val.strip())
#Add last term
#remove obsoletes
obseletes=[]
for term in go_dict:
if 'is_obsolete' in go_dict[term]:
if go_dict[term]['is_obsolete'][0]== 'true':
obseletes.append(term)
continue
for term in obseletes:
del go_dict[term]
ontologies=['biological_process','molecular_function','cellular_component']
DAGs={w:{} for w in ontologies}
DAGs_r={w:{} for w in ontologies}
roots={w:{} for w in ontologies}
for term in go_dict.keys():
ont=go_dict[term]['namespace'][0]
DAGs[ont][term]=[]
DAGs_r[ont][term]=[]
for term in go_dict.keys():
ont=go_dict[term]['namespace'][0]
if 'is_a' in go_dict[term]:
for pa in go_dict[term]['is_a']:
term_2=pa.split(' ! ')[0]
DAGs[ont][term].append(term_2)
DAGs_r[ont][term_2].append(term)
else:
roots[ont]=term
return go_dict,DAGs,DAGs_r,roots
def trace_to_ontology_root(cur_node):
"""
Generator to recursively visit all nodes on each path
from a node up to the root node.
"""
#print "Graph node:", cur_node
yield cur_node
for pa in DAGs[ont][cur_node]:
for n in trace_to_ontology_root(pa):
yield n
def get_ontology_subdag(annotated_term_nodes):
"""
Given evidence_set, returns a filtered subgraph of evidence_ontology
that only contains those nodes or their ancestors.
"""
# For each annotated node, traverse to the root node of the ontology
# to include all its less-specific terms
all_term_nodes = set([])
for go_term in annotated_term_nodes:
traced=trace_to_ontology_root(go_term)
all_term_nodes.update(set(traced))
sub_dag = all_term_nodes
return sub_dag
def get_leaves_from_node(sub_dag, top_node):
descendant_leaves = set()
#print "Top node is: %s"%str(top_node)
#print "Successors: %s"%str(godag.successors(top_node))
for ch in set(DAGs_r[ont][top_node])&set(sub_dag):
if not set(DAGs_r[ont][ch])&set(sub_dag):
descendant_leaves.add(ch)
else:
descendant_leaves.update(get_leaves_from_node(sub_dag,ch))
return descendant_leaves
def find_candidate_fcns(unique_terms):
'''os.devnull
Using the parsed evidence, this places the evidence set
and modifies the gene ontology graph in the SIFTER 2.0 way.
'''
# For each protein in the evidence set, store the annotation
# into the evidence graph
annotated_term_nodes = []
for go_term in unique_terms:
if go_term not in DAGs[ont]:
print "GO term, %s doesn't seem to be named in your ontology."%go_term
continue
annotated_term_nodes.append(go_term)
go_subdag = get_ontology_subdag(annotated_term_nodes=annotated_term_nodes)
root_node = roots[ont]
candidate_fcns=get_leaves_from_node(go_subdag, root_node)
return candidate_fcns
def max_fun_possible(i,thr):
max_f=0
for j in range(1,i+1):
max_f_temp=max_f+comb(i,j,exact=0)
if max_f_temp>thr:
return [j-1,max_f]
else:
max_f=max_f_temp
return [i,max_f]
def calc_numel(numTerms, maxFun):
return pow(sum([float(comb(numTerms, i, exact=0)) for i in range(1, maxFun + 1)]), 2)
def get_criteria(numTerms, maxFun):
if numTerms > 8:
if maxFun > 1:
return 1
else:
return 2
else:
return 3
def get_category(numel, famSize):
# List of dividers based upon the number of elements NUMEL in transition matrix
numelDivs = [65025.0, 330625.0, 1046529.0]
# List of dividers based upon the family size FAMSIZE
famSizeDivs = [567.0, 1637.0, 4989.0]
n = sum(map(lambda x: numel > x, numelDivs))
s = sum(map(lambda x: famSize > x, famSizeDivs))
return (n, s)
def est_processing_time(numTerms, famSize, maxFun,numel):
paramsDict = {1: [-6.6940979152046394, 1.2175437752942884, 0.61437156459022535],
2: [-3.6107074614976109, 0.91343454244972999, 0.45521131812635984],
3: [-2.7026843343076519, 0.052132418536663394, 0.93755721899494526]}
crit = get_criteria(numTerms, maxFun)
line = paramsDict[crit]
return pow(10, line[0]) * pow(numel, line[1]) * pow(famSize, line[2])
def get_upper_bound(eTime, cat, per):
percentileDict={(0, 0): {'95': 8.3435056315411593, '99.9': 10.953643510480756},
(0, 1): {'95': 9.4040189875556379, '99.9': 10.175590194144538},
(0, 2): {'95': 7.0857310513064657, '99.9': 10.031292126553355},
(0, 3): {'95': 4.3471755740354761, '99.9': 8.7766092407283836},
(1, 0): {'95': 4.0445760101251587, '99.9': 9.5270816900332136},
(1, 1): {'95': 2.3310236959309329, '99.9': 3.4547033474036422},
(1, 2): {'95': 1.8195072570575042, '99.9': 2.9109043732685018},
(1, 3): {'95': 2.0892177205927638, '99.9': 7.8978069638688924},
(2, 0): {'95': 2.2542718513558571, '99.9': 2.9746194223225029},
(2, 1): {'95': 2.6775509810516125, '99.9': 4.4976310858312294},
(2, 2): {'95': 2.9809620961392786, '99.9': 4.8087748272548554},
(2, 3): {'95': 4.4914777165287258, '99.9': 6.7709753345612205},
(3, 0): {'95': 2.6439743599924892, '99.9': 3.3485478896514702},
(3, 1): {'95': 2.883955861280195, '99.9': 3.9323761482164077},
(3, 2): {'95': 3.156846158873563, '99.9': 3.904755873693849},
(3, 3): {'95': 3.898056279279821, '99.9': 4.4261063907623219}}
percentiles = percentileDict[(cat[0],cat[1])][per]
return eTime*percentiles
def format_times(times):
if not times:
return times
t = times[0]
if t < 1:
return ['%.1f seconds' % (60 * t) for t in times]
elif t < 60:
return ['%.1f minutes' % t for t in times]
elif t < 60 * 24:
return ['%.1f hours' % (t / 60) for t in times]
elif t < 60 * 24 * 365:
return ['%.1f days' % (t / 60 / 24) for t in times]
else:
return ['%.1f years' % (t / 60 / 24 / 365) for t in times]
def estimate_time(numTerms, famSize,t_lev):
tableBody = []
pers = ['95','99.9']
maxFun=min(t_lev,numTerms)
numel = calc_numel(numTerms, maxFun)
eTime = est_processing_time(numTerms, famSize, maxFun,numel)
eTime = max(eTime, 1.0) # set minimum estimated time to 1 minute
cat = get_category(numel, famSize)
row = [maxFun]
times = [eTime]
for j in range(len(pers)):
upper = get_upper_bound(eTime, cat, pers[j])
times.append(upper)
row.extend(times)
row.extend(format_times(times))
return row
def store_run_data(pfam_id):
data={}
data['pfam_id']=pfam_id
data['query_proteins']=[]#pplacer_queries[pfam_id]
data['query_protein_accs']={}#{k['id']:pfamseq_acc_for_id[k['id']] for k in pplacer_queries[pfam_id]}
data['tree_size']=tree_sizes[pfam_id]
data['evidence_constraints']=evidence_allowed
data['tree_loc']=reconciled_folder+'/%s'%pfam_id+"_reconciled.xml"
data['tree_format']='phyloxml',
data['annotation_loc']=evidence_folder+'/%s.pli'%pfam_id
data['annotation_loc_pickle']=evidence_folder+'/%s.pickle'%pfam_id
data['annotation_format']='pli'
print "Loading goa annotations for %s..."%pfam_id
evidence_pickle_file = evidence_folder+'/%s.pickle'%pfam_id # file with annotations
rand_id_1=random.randint(1000000,9999999)
if os.path.exists(evidence_pickle_file+'.gz'):
if os.path.exists('%s.%d'%(evidence_pickle_file,rand_id_1)):
subprocess.check_call("rm %s"%(evidence_pickle_file),shell=True)
subprocess.check_call("gunzip -c %s.gz > %s.%d"%(evidence_pickle_file,evidence_pickle_file,rand_id_1),shell=True)
[evidence_file2,pfam_anns, pp, seq_lookup] = pickle.load(open('%s.%d'%(evidence_pickle_file,rand_id_1), 'rb'))
if os.path.exists('%s.%d'%(evidence_pickle_file,rand_id_1)):
subprocess.check_call("rm %s.%d"%(evidence_pickle_file,rand_id_1),shell=True)
# Filter for only experimental annotations.
unique_terms=set([])
num_ev = 0
for prot_id, anns in pfam_anns.iteritems():
alwd_ev = [a['acc'] for a in anns if a['code'] in evidence_allowed]
unique_terms=unique_terms.union(set(alwd_ev))
# a is an annotation with a function and a code that says where the function came from
# keep this annotation if it was gotten through experiments
if len(alwd_ev) > 0:
num_ev += 1
print pfam_id,'has' ,num_ev, "annotated proteins with allowed evidence type"
data['num_ev_prots']=num_ev
data['num_any_ev_prots']=len(pfam_anns)
if len(unique_terms)==1 and ('GO:0003674' in unique_terms):
num_ev=0
if num_ev>0:
# Input evidence
candidate_fcns=find_candidate_fcns(unique_terms)
evidence_format = 'pli'
data['n_terms'] = len(candidate_fcns)
data['candids'] = candidate_fcns
thr=max_fun_possible(data['n_terms'],np.sqrt((mx_numel)))[0]
if truncation_level:
thr=min(thr,truncation_level)
row=estimate_time(data['n_terms'],data['tree_size'],thr)
data['e_time']=row
print "Number of functions:",data['n_terms']
print "We will use truncation level = %s"%row[0]
print "Estimated running time for family %s = %s (95%% confidence upper bound = %s)"%(pfam_id,row[4],row[5])
pickle.dump(data, open(queries_folder+'/%s_query.pickle'%pfam_id, 'wb'))
print "Processed evidence from:", pfam_id
else:
print "No candidate functions: SIFTER will not be run on this family."
data['n_terms'] = 0
pickle.dump(data, open(queries_folder+'/NQ/%s_query.pickle'%pfam_id, 'wb'))
def prepare_for_each_family(pfam_id):
reconciled_fname = reconciled_folder+'/%s'%pfam_id
evidence_file = evidence_folder+'/%s.pli'%pfam_id
evidence_pickle_file = evidence_folder+'/%s.pickle'%pfam_id
queries_to_process=[]
skip_flag=0
if not(os.path.isfile(reconciled_fname+"_reconciled.xml.gz")):
print "\nERROR: No tree file %s. Skip this family.\n"%(reconciled_fname+"_reconciled.xml.gz")
skip_flag=1
if not(os.path.isfile(evidence_file+'.gz')):
print "\nERROR: No evidence file %s.gz. Skip this family.\n"%(evidence_file)
skip_flag=1
if not(os.path.isfile(evidence_pickle_file+'.gz')):
print "\nERROR: No evidence file %s.gz. Skip this family.\n"%(evidence_pickle_file)
skip_flag=1
q_flag=0
if (skip_flag==0):
if not(os.path.isfile(queries_folder+'/%s_query.pickle'%pfam_id)) and not(os.path.isfile(queries_folder+'/NQ/%s_query.pickle'%pfam_id)):
store_run_data(pfam_id)
else:
print "Family %s already prepared."%(pfam_id)
if (os.path.isfile(queries_folder+'/%s_query.pickle'%pfam_id)):
q_flag=1
return q_flag
if __name__=="__main__":
# Initialization
params_mysql = {\
'db_address': 'localhost',
'db_username': 'root',
'db_password': '',
'db_name': 'sifter_db'
}
evidence_constraints_exp = [
# Experimental
'EXP', # Experiment
'IDA', # Direct Assay
'IPI', # Physical Interaction
'IMP', # Mutant Phenotype
'IGI', # Genetic Interaction
'IEP', # Expression Pattern
# Author Statements
'TAS', # Traceable Author Statement
'NAS', # Non-traceable Author Statement
]
evidence_constraints_all = [
# Experimental
'EXP', # Experiment
'IDA', # Direct Assay
'IPI', # Physical Interaction
'IMP', # Mutant Phenotype
'IGI', # Genetic Interaction
'IEP', # Expression Pattern
# Author Statements
'TAS', # Traceable Author Statement
'NAS', # Non-traceable Author Statement
# Computational Analysis Evidence Codes
'ISS', # Sequence/Structural Similarity
'ISO', # Sequence Orthology
'ISA', # Sequence Alignment
'ISM', # Sequence Model
'IGC', # Genomic Context
'IBA', # Biological aspect of ancestor
'IBD', # Biological aspect of descendant
'IKR', # Key Residues
'IRD', # Rapid Divergence
'RCA', # Reviews Computational Analysis
# Curator Statement
'IC', # Curator
'ND', # No biological data available
# Automatically assigned
'IEA', # Electronic Annotation
# Obsolete
'NR' # Not recorded
]
main_dir=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
obo_file=main_dir+'/data/go.obo'
evidence_allowed = evidence_constraints_exp
taxid=''
query_families=[]
query_proteins=[]
p_input_file=''
f_input_file=''
truncation_level=0
pfams=[]
remove_query_files=0
mx_numel=2250000
hit_file=''
all_fams=0
# Check for options
opts, args = getopt.getopt(sys.argv[1:], "hraAp:s:f:t:x:",['ip=','if=','dbname=','dbpass=','dbuser=','dbaddr=','hit_file='])
if len(args) != 2:
usage()
sys.exit()
choices=[]
if len(opts)>0:
for o, a in opts:
if o == "-p":
splited =a.strip().split(',')
query_proteins=list(set([w for w in splited if w]))
choices.append('p')
elif o == "-s":
taxid = a
choices.append('s')
elif o == "-f":
splited =a.strip().split(',')
query_families=list(set([w for w in splited if w]))
choices.append('f')
elif o == "--ip":
p_input_file = a
choices.append('ip')
elif o == "--if":
f_input_file = a
choices.append('if')
elif o == "-A":
all_fams = 1
choices.append('A')
elif o == "--hit_file":
hit_file = a
elif o == "-a":
evidence_allowed = evidence_constraints_all
elif o == "-r":
remove_query_files=1
elif o == "-x":
mx_numel=int(float(a))
elif o == "-t":
truncation_level=int(a)
elif o == "--dbname":
params_mysql['db_name']= a
elif o == "--dbaddr":
params_mysql['db_address']= a
elif o == "--dbpass":
params_mysql['db_password']= a
elif o == "--dbuser":
params_mysql['db_username']= a
else:
usage()
sys.exit()
if len(choices)==0:
print "\nERROR: No queries are entered."
print "Please use one of the '-p -s -f --ip --if -A' options to enter your query.\n"
sys.exit()
elif len(choices)>1:
print "\nERROR: Please use ONLY one of the '-p -s -f --ip --if -A' options to enter your query.\n"
sys.exit()
families_data_path=args[0]
if not os.path.exists(families_data_path):
print "\nERROR: families_data directory ( %s ) does not exist\n"%families_data_path
sys.exit()
evidence_folder=families_data_path+'/annotations'
if not os.path.exists(evidence_folder):
print "\nERROR: annotations directory ( %s ) not exists\n"%evidence_folder
sys.exit()
reconciled_folder=families_data_path+'/reconciled_trees'
if not os.path.exists(reconciled_folder):
print "\nERROR: reconciled_trees directory ( %s ) not exists\n"%reconciled_folder
sys.exit()
alignment_folder=families_data_path+'/alignments'
if not os.path.exists(alignment_folder):
print "\nERROR: alignment directory( %s ) not exists\n"%alignment_folder
sys.exit()
###
output_path=args[1]
queries_folder=output_path
if remove_query_files==1:
os.system('rm -rf %s'%queries_folder)
if not os.path.exists(output_path):
os.mkdir(output_path)
db_mysql = MySQLdb.connect(host=params_mysql['db_address'],
user=params_mysql['db_username'],
passwd=params_mysql['db_password'],
db=params_mysql['db_name'],
cursorclass=MySQLdb.cursors.DictCursor)
queries_folder_NQ=output_path+'/NQ'
if not os.path.exists(queries_folder_NQ):
os.mkdir(queries_folder_NQ)
prepared_queries=glob.glob(output_path+'/*.pickle')+glob.glob(queries_folder_NQ+'/*.pickle')
prepared_queries=[(w.split('/')[-1]).split('_query')[0] for w in prepared_queries]
already_prepared_fams=[]
print "\n\n--------------Reading the query information------------"
if hit_file:
if not os.path.exists(hit_file):
print "\nERROR: No Pfam hit file at %s.\n"%hit_file
sys.exit()
else:
pfam_2_gene_hit,gene_2_pfam_hit=find_pfam_2_gene_from_file(hit_file)
if query_families or f_input_file:
if f_input_file:
if not os.path.exists(f_input_file):
print "\nERROR: No file exists at %s\n"%f_input_file
sys.exit()
f = open(f_input_file, 'r')
a=f.read()
splited =re.split(' |,|;|\n',a.strip())
query_families=list(set([w for w in splited if w]))
already_prepared_fams=list(set(query_families)&set(prepared_queries))
toprep_families=list(set(query_families)-set(prepared_queries))
print "%s out of %s Families have already prepared. We will Check %s others."%(len(already_prepared_fams),len(query_families),len(toprep_families))
query_families=toprep_families
res=find_genes_for_pfams(query_families)
pfam_2_gene,gene_2_pfam=find_pfam_2_gene(res)
pfams=pfam_2_gene.keys()
for f in set(query_families)-set(pfams):
print "Family %s is not in the SQL database."%f
if query_families:
print "Run SIFTER for Pfam families: %s"%','.join(query_families)
elif query_proteins or p_input_file:
if p_input_file:
if not os.path.exists(p_input_file):
print "\nERROR: No file exists at %s\n"%p_input_file
sys.exit()
f = open(p_input_file, 'r')
a=f.read()
splited =re.split(' |,|;|\n',a.strip())
query_proteins=list(set([w for w in splited if w]))
if not hit_file:
res=find_pfam_for_genes(query_proteins)
pfam_2_gene,gene_2_pfam=find_pfam_2_gene(res)
pfams=pfam_2_gene.keys()
else:
gene_2_pfam={p:gene_2_pfam_hit[p] for p in query_proteins if p in gene_2_pfam_hit}
pfam_2_gene={}
for g,fs in gene_2_pfam.iteritems():
for f in fs:
if not f in pfam_2_gene:
pfam_2_gene[f]=set([])
pfam_2_gene[f].add(g)
pfams=pfam_2_gene.keys()
print "Run SIFTER for %s Pfam families for %s query proteins"%(len(pfams),len(query_proteins))
elif taxid:
if not hit_file:
res=find_pfam_for_taxid(taxid)
pfam_2_gene,gene_2_pfam=find_pfam_2_gene(res)
pfams=pfam_2_gene.keys()
print "Run SIFTER for %s Pfam families for query species (taxid=%s) with %s proteins"%(len(pfams),taxid,len(gene_2_pfam))
else:
gene_2_pfam=gene_2_pfam_hit;
pfam_2_gene=pfam_2_gene_hit;
pfams=pfam_2_gene.keys()
print "-s will be ignored. We will run on all %s Pfam families in the hit-file"%(len(pfams))
elif all_fams==1:
if not hit_file:
print "\nERROR: -A option can only used for novel genomes (hit_file should be provided)\n"
sys.exit()
else:
gene_2_pfam=gene_2_pfam_hit;
pfam_2_gene=pfam_2_gene_hit;
pfams=pfam_2_gene.keys()
print "We will run on all %s Pfam families in the hit-file"%(len(pfams))
if (not pfams):
if (not already_prepared_fams):
print "\nERROR: There are no pfam families for your input query."
print "Please use one of the '-p -s -f --ip -A --hit-file' options to enter your query.\n"
sys.exit()
else:
print "-------------------Preperation is Done----------------------"
print "All of your %s query families have been already prepared."%(len(already_prepared_fams))
print "\nNext step is to run 'sifter_run.py'."
print "You may exclude some of the more complex families there.\n"
else:
pfds=get_pfds(pfams)
tree_sizes = {}
for p in pfds.keys():
tree_sizes[p] = pfds[p]['num_full']
sorted_fams = sorted(pfds.keys(), key=lambda k:pfds[k]['num_full'])
print "Number of families:" ,len(sorted_fams)
print "\n-----------------Reading the ontology file----------------"
ont='molecular_function'
go_dict,DAGs,DAGs_r,roots=parse_GO_OBO(obo_file)
print "\n------------Prepare the necessary query files-------------"
pfams_to_process = []
for i,pfam_id in enumerate(sorted_fams):
q_flag=prepare_for_each_family(pfam_id)
if q_flag==1:
pfams_to_process.append(pfam_id)
print "Input file prepared for %s (%d out of %d families)"%(pfam_id,i+1,len(sorted_fams))
nqs=0
for pfam_id in sorted_fams:
nqf=queries_folder_NQ+'/%s_query.pickle'%pfam_id
if (os.path.isfile(nqf)):
nqs+=1
errors=len(sorted_fams)-len(pfams_to_process)-nqs
if len(pfams_to_process)>0:
e_times = []
total_e=0
total_95=0
with open(output_path+'/running_estimation.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['Family','number of candidate functions','Family size','Truncation level','Estimated running time','95%% confidence upper bound','99.9%% confidence upper bound'])
for pfam_id in pfams_to_process:
qfile=queries_folder+'/%s_query.pickle'%pfam_id
query_data = pickle.load(open(qfile, "rb" ))
row=query_data['e_time']
spamwriter.writerow([pfam_id,query_data['n_terms'],query_data['tree_size'],row[0],row[4],row[5],row[6]])
total_e +=(row[1])
total_95 +=(row[2])
fe=format_times([total_e,total_95])
if already_prepared_fams:
print "%s of your query families have been already prepared."%(len(already_prepared_fams))
print "Here is the statistics for the rest of queries."
print "\nFiles are prepared for %d out of %d families. (%s missed due to errors, %s are skipped duo to no candidate functions)"%(len(pfams_to_process),len(sorted_fams),errors,nqs)
print "-------------------Preperation is Done----------------------"
print "There are %s families to run SIFTER on."%(len(pfams_to_process))
print "\nTotal estimated time for your query is %s (95%% confidence upper bound = %s)."%(fe[0],fe[1])
print "Details for individual families are written in '%s/running_estimation.csv'"%output_path
print "\nNext step is to run 'sifter_run.py'."
print "You may exclude some of the more complex families there.\n"
else:
print "\nFiles are prepared for %d out of %d families. (%s missed due to errors, %s are skipped duo to no candidate functions)"%(len(pfams_to_process),len(sorted_fams),errors,nqs)
| gpl-3.0 | 4,171,967,167,763,337,700 | 41.405917 | 286 | 0.549159 | false |
jnishi/chainer | docs/source/_autosummary_check.py | 6 | 1109 | import inspect
import os
import types
import chainer.functions
import chainer.links
def _is_rst_exists(entity):
return os.path.exists('source/reference/generated/{}.rst'.format(entity))
def check(app, exception):
missing_entities = []
missing_entities += [
name for name in _list_chainer_functions()
if not _is_rst_exists(name)]
missing_entities += [
name for name in _list_chainer_links()
if not _is_rst_exists(name)]
if len(missing_entities) != 0:
app.warn('\n'.join([
'Undocumented entities found.',
'',
] + missing_entities))
def _list_chainer_functions():
# List exported functions under chainer.functions.
return ['chainer.functions.{}'.format(name)
for (name, func) in chainer.functions.__dict__.items()
if isinstance(func, types.FunctionType)]
def _list_chainer_links():
# List exported classes under chainer.links.
return ['chainer.links.{}'.format(name)
for (name, link) in chainer.links.__dict__.items()
if inspect.isclass(link)]
| mit | 4,384,092,957,385,267,700 | 25.404762 | 77 | 0.625789 | false |
eufarn7sp/egads-eufar | egads/thirdparty/quantities/constants/quantum.py | 4 | 5836 | # -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
from ._utils import _cd
from ..unitquantity import UnitConstant
molar_Planck_constant = UnitConstant(
'molar_Planck_constant',
_cd('molar Planck constant'),
symbol='(N_A*h)',
u_symbol='(N_A·h)'
)
molar_Planck_constant_times_c = UnitConstant(
'molar_Planck_constant_times_c',
_cd('molar Planck constant times c'),
symbol='(N_A*h*c)',
u_symbol='(N_A·h·c)'
)
h = Planck_constant = UnitConstant(
'Planck_constant',
_cd('Planck constant'),
symbol='h'
)
hbar = Planck_constant_over_2_pi = UnitConstant(
'Planck_constant_over_2_pi',
_cd('Planck constant over 2 pi'),
symbol='(h/(2*pi))',
u_symbol='ħ'
)
quantum_of_circulation = UnitConstant(
'quantum_of_circulation',
_cd('quantum of circulation'),
symbol='(h/(2*m_e))',
u_symbol='(h/(2·mₑ))'
)
quantum_of_circulation_times_2 = UnitConstant(
'quantum_of_circulation_times_2',
_cd('quantum of circulation times 2'),
symbol='(h/m_e)',
u_symbol='(h/mₑ)'
)
l_P = Planck_length = UnitConstant(
'Planck_length',
_cd('Planck length'),
symbol='l_P'
)
m_P = Planck_mass = UnitConstant(
'Planck_mass',
_cd('Planck mass'),
symbol='m_P'
)
T_P = Planck_temperature = UnitConstant(
'Planck_temperature',
_cd('Planck temperature'),
symbol='T_P'
)
t_P = Planck_time = UnitConstant(
'Planck_time',
_cd('Planck time'),
symbol='t_P'
)
R_infinity = Rydberg_constant = UnitConstant(
'Rydberg_constant',
_cd('Rydberg constant'),
symbol='R_infinity',
u_symbol='R_∞'
)
Rydberg_constant_times_c_in_Hz = UnitConstant(
'Rydberg_constant_times_c_in_Hz',
_cd('Rydberg constant times c in Hz')
)
Rydberg_constant_times_hc_in_eV = UnitConstant(
'Rydberg_constant_times_hc_in_eV',
_cd('Rydberg constant times hc in eV')
)
Rydberg_constant_times_hc_in_J = UnitConstant(
'Rydberg_constant_times_hc_in_J',
_cd('Rydberg constant times hc in J'),
symbol='(R_infinity*h*c)',
u_symbol='(R_∞·h·c)'
)
G_0 = conductance_quantum = UnitConstant(
'conductance_quantum',
_cd('conductance quantum'),
symbol='G_0',
u_symbol='G₀'
)
K_J90 = conventional_value_of_Josephson_constant = UnitConstant(
'conventional_value_of_Josephson_constant',
_cd('conventional value of Josephson constant')
)
R_K90 = conventional_value_of_von_Klitzing_constant = UnitConstant(
'conventional_value_of_von_Klitzing_constant',
_cd('conventional value of von Klitzing constant')
)
Fermi_coupling_constant = UnitConstant(
'Fermi_coupling_constant',
_cd('Fermi coupling constant'),
symbol='(G_F/(hbar*c)**3)',
u_symbol='(G_F/(ħ·c)³)'
)
alpha = fine_structure_constant = UnitConstant(
'fine_structure_constant',
_cd('fine-structure constant'),
symbol='alpha',
u_symbol='α'
)
inverse_fine_structure_constant = UnitConstant(
'inverse_fine_structure_constant',
_cd('inverse fine-structure constant'),
symbol='alpha**-1',
u_symbol='α⁻¹'
)
c_1 = first_radiation_constant = UnitConstant(
'first_radiation_constant',
_cd('first radiation constant'),
symbol='c_1',
u_symbol='c₁'
)
c_1L = first_radiation_constant_for_spectral_radiance = UnitConstant(
'first_radiation_constant_for_spectral_radiance',
_cd('first radiation constant for spectral radiance'),
symbol='c_1L',
u_symbol='c₁_L'
)
inverse_of_conductance_quantum = UnitConstant(
'inverse_of_conductance_quantum',
_cd('inverse of conductance quantum'),
symbol='G_0**-1',
u_symbol='G₀⁻¹'
)
Josephson_constant = K_J = UnitConstant(
'Josephson_constant',
_cd('Josephson constant'),
symbol='K_J'
)
Phi_0 = magnetic_flux_quantum = UnitConstant(
'magnetic_flux_quantum',
_cd('magnetic flux quantum'),
symbol='Phi_0',
u_symbol='Φ₀'
)
Newtonian_constant_of_gravitation_over_h_bar_c = UnitConstant(
'Newtonian_constant_of_gravitation_over_h_bar_c',
_cd('Newtonian constant of gravitation over h-bar c'),
symbol='(G/(hbar*c))',
u_symbol='(G/(ħ·c))'
)
Sackur_Tetrode_constant_ST_100kPa = UnitConstant(
'Sackur_Tetrode_constant_ST_100kPa',
_cd('Sackur-Tetrode constant (1 K, 100 kPa)')
)
Sackur_Tetrode_constant_STP = UnitConstant(
'Sackur_Tetrode_constant_STP',
_cd('Sackur-Tetrode constant (1 K, 101.325 kPa)')
)
c_2 = second_radiation_constant = UnitConstant(
'second_radiation_constant',
_cd('second radiation constant'),
symbol='c_2',
u_symbol='c₂'
)
sigma = Stefan_Boltzmann_constant = UnitConstant(
'Stefan_Boltzmann_constant',
_cd('Stefan-Boltzmann constant'),
symbol='sigma',
u_symbol='σ'
)
R_K = von_Klitzing_constant = UnitConstant(
'von_Klitzing_constant',
_cd('von Klitzing constant'),
symbol='R_K'
)
b_prime = Wien_frequency_displacement_law_constant = UnitConstant(
'Wien_frequency_displacement_law_constant',
_cd('Wien frequency displacement law constant'),
symbol='bprime',
u_symbol='b′'
)
b = Wien_wavelength_displacement_law_constant = UnitConstant(
'Wien_wavelength_displacement_law_constant',
_cd('Wien wavelength displacement law constant'),
symbol='b'
)
Planck_constant_in_eV_s = UnitConstant(
'Planck_constant_in_eV_s',
_cd('Planck constant in eV s')
)
Planck_constant_over_2_pi_in_eV_s = UnitConstant(
'Planck_constant_over_2_pi_in_eV_s',
_cd('Planck constant over 2 pi in eV s')
)
Planck_constant_over_2_pi_times_c_in_MeV_fm = UnitConstant(
'Planck_constant_over_2_pi_times_c_in_MeV_fm',
_cd('Planck constant over 2 pi times c in MeV fm')
)
Planck_mass_energy_equivalent_in_GeV = UnitConstant(
'Planck_mass_energy_equivalent_in_GeV',
_cd('Planck mass energy equivalent in GeV')
)
del UnitConstant, _cd
| bsd-3-clause | -8,252,205,652,304,337,000 | 26.712919 | 69 | 0.659876 | false |
jCliveStewart/robotframework-DebugUI | build/lib/DebugUiLibrary/DebugUI.py | 2 | 16365 | #!/usr/bin/python
# ---------------------------------------------------------------------------------------------------
# Copyright 2015 UCAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DebugUI.py
# Debugger user interface to try commands for robotFramework scripts
# ---------------------------------------------------------------------------------------------------
import Tkinter as tk
listHeight=30
listWidth=100
import tkMessageBox
commandComboLabel='EXAMPLE commands - Click any command to use it'
historyComboLabel='HISTORY of commands tried - Click any command to use it'
programComboLabel='SAVED commands - these are now on the clipboard ready to use'
varsComboLabel ='VARIABLES - robotFramework variables and their values'
helpText ='''
RobotFramework Script Debug UI:
The entryfield near the top takes commands to try.
You can edit or type into it or select commands from the page below.
Three commands are on buttons on the right and in the 'menu' at the left:
Try - tries out whatever command you have in the entryfield.
Save - saves the current command to the saved page
Exit - quits the debugger and continues your program with the next line after the 'Debug' line
Three views are available depending on which menu item you click:
Commands - shows a list of suggested available commands.
History - shows a list of all commands you try.
Saved - shows a list of commands you previously saved.
To use the Debug UI :
Click on any command to put it into the entryfield. Edit the command, then try it by clicking the Try button or menu item.
Double click will load and execute in one go if you are feeling lucky.
After a command is executed the list of available commands is refreshed. This may take some time where there are lots of controls.
When a command works click save to put it on the saved page
You can step through several commands and save them.
Go to the saved page to get the commands into the paste buffer.
Paste them into your robotframework script (you need do this before exiting)
When you have finished debugging click exit and robotframework resumes running your testcase on the line after 'Debug'.
You can interact with the browser between tries to get the page ready or back to it's original state.
You may need to experiment with editing the command to make it work correctly.
When you open the 'Saved' page the list of saved commands is put on the clipboard ready to paste into your script.
You can use the debugger alongside checkXpath and firebug. It has been tested with firefox and Chrome.
Check the robotFramework log in RIDE to see the current status.
You will get far faster response time if you use Chrome ...
If you want to add controls to search for edit controlsList.py
Good luck !!!
'''
class DebugUI:
def __init__(self,rfInterface):
self.savedCommands=[]
self.rfInterface=rfInterface
self.root = tk.Tk()
self.root.title('RobotFramework DebugUI')
self.root.bind('<Return>', self.tryCommand) # Put 'focus' on the try command button
# Add a menu to select the different history lists - afraid it has to go at the top as far as I can see
menubar = tk.Menu(self.root)
menubar.add_command(label="Try", command=self.tryCommand)
menubar.add_command(label="Save", command=self.saveCommand)
menubar.add_command(label="Exit", command=self.fini)
menubar.add_command(label="||")
menubar.add_command(label="Commands", command=self.showCommands)
menubar.add_command(label="History", command=self.showHistory)
menubar.add_command(label="Saved", command=self.showProgram)
menubar.add_command(label="||")
menubar.add_command(label="Variables", command=self.showVars)
menubar.add_command(label="||")
menubar.add_command(label="Help", command=self.showHelp)
self.root.config(menu=menubar)
# A button panel at the top
buttonPanel = tk.Frame(self.root)
buttonPanel.pack(side=tk.TOP) # Add the buttons panel to the UI
# Add an entry field for rf commands
self.commandEntryfield=tk.Entry(buttonPanel, width=listWidth)
self.commandEntryValue=tk.StringVar()
self.commandEntryfield["textvariable"]=self.commandEntryValue
self.commandEntryValue.set('Click link //a[contains(text(),"YOUR_TEXT_HERE")]')
self.commandEntryfield.pack(side=tk.LEFT)
self.commandEntryfield.bind('<Double-Button-1>',self.tryCommand) # Double click trys the command
# Add a Try command button to the panel
PB1 = tk.Button(buttonPanel, text ="Try", command = self.tryCommand, bg="GREEN")
PB1.pack(side=tk.LEFT)
PB1.focus_set() # Set focus on the try command button
# Add a save command button to the panel
PB2 = tk.Button(buttonPanel, text ="Save", command = self.saveCommand, bg="Yellow")
PB2.pack(side=tk.LEFT)
# Add an exit button to the panel
EB = tk.Button(buttonPanel, text ="Exit", command=self.fini, bg="Red")
EB.pack(side=tk.LEFT)
# ---------- Add a combo panel for commands ----------
self.comboPanel=tk.Frame(self.root)
self.comboPanel.pack(side=tk.BOTTOM, expand=tk.YES) # Add the combo panel to the UI
# Add a label to it
self.comboLabelText=tk.StringVar()
self.comboLabelText.set(commandComboLabel)
comboLabel=tk.Label(self.comboPanel, textvariable=self.comboLabelText)
comboLabel.pack(side=tk.TOP, expand=tk.YES)
# ---------- Add a combo panel for history - not packed initially ----------
self.historyCombo=tk.Listbox(self.comboPanel,width=listWidth,height=listHeight)
self.historyCombo.bind('<Double-Button-1>',self.listSelect) # Make double clicks run the list select function
# ---------- Add a combo panel for variables ----------
self.varsScroll = tk.Scrollbar(self.comboPanel)
self.varsCombo = tk.Listbox(self.comboPanel, yscrollcommand=self.varsScroll.set, width=listWidth, height=listHeight)
self.varsScroll.config(command=self.varsCombo.yview)
self.varsCombo.bind('<Double-Button-1>',self.listSelect) # Make double clicks run the list select function
# ---------- Add a list of stored program steps ----------
self.programList=tk.StringVar() # Create a variable to hold the program steps
# Diplayed program list
self.progScroll = tk.Scrollbar(self.comboPanel)
self.programCombo = tk.Text(self.comboPanel, yscrollcommand=self.progScroll.set, height=listHeight, borderwidth=0)
self.progScroll.config(command=self.programCombo.yview)
self.programCombo.configure(bg=self.root.cget('bg'), relief=tk.FLAT)
self.programCombo.configure(state="disabled") # Make the program combo non editable
# ---------- Make text selectable in the program panel ----------
self.programCombo.bind('<Double-Button-1>',self.select_all)
self.programCombo.bind("<Control-Key-a>", self.select_all)
self.programCombo.bind("<Control-Key-A>", self.select_all) # just in case caps lock is on
# Add a listbox with the commands in
self.commandScroll = tk.Scrollbar(self.comboPanel)
self.commandCombo=tk.Listbox(self.comboPanel, selectmode=tk.BROWSE, yscrollcommand=self.commandScroll.set, width=listWidth, height=listHeight)
self.commandScroll.config(command=self.commandCombo.yview)
self.updateCommandsList() # Put the available commands into the commands list - can take ages
self.commandScroll.pack(side=tk.RIGHT, fill=tk.BOTH, expand=tk.YES) # Make this one show on startup
self.commandCombo.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
self.commandCombo.bind('<<ListboxSelect>>',self.listSelect) # Clicks select from the list
self.commandCombo.bind('<Double-Button-1>',self.listSelectRun) # Double clicks select from the list and run the command
self.root.mainloop() # This sits in this loop forever running the UI until you exit
# ---------------- Command entryfield functions ----------------
def select_all(self,event): # Select all the text in textbox
self.programCombo.tag_add("sel", "1.0", "end")
self.programCombo.mark_set("insert", "1.0")
self.programCombo.see("insert")
return 'break'
def updateCommandsList(self): # Refresh the contents of the list of commands available
self.root.config(cursor="watch") # Show an hourglass while selenium reads the controls
self.root.update()
commandsList=self.rfInterface.getAllPageControls()
self.commandCombo.delete(0,tk.END)
for command in commandsList:
self.commandCombo.insert(tk.END,command)
self.commandCombo.selection_set(first=0)
self.root.config(cursor="")
def tryCommand(self,event=None): # (Safely) try out the command from the entry field
# The event parameter because clicking the enter key passes in an enter event - which we ignore
self.command=self.commandEntryfield.get()
self.commands=self.command.split(' ')
self.commands=[c.strip() for c in self.commands if c!='']
self.rfInterface.runCommand(tuple(self.commands))
self.historyCombo.insert(0,' '.join(self.commands))
self.updateCommandsList() # Put the available commands into the commands list
def saveCommand(self): # Save the entry field command in saved commands
command=self.commandEntryfield.get()
self.savedCommands.append(command)
self.programList.set("\n".join(self.savedCommands))
def listSelect(self,event): # Select an item from a list - put it into the entryfield
widget = event.widget
selection=widget.curselection()
# Timing problems when people click around
# should really poll but that seems like overkill ...
if len(selection)>0:
value = widget.get(selection[0])
else:
value='Log Drat - click it again...'
self.commandEntryValue.set(value)
def listSelectRun(self,event): # Select and run an item from a list
self.listSelect(event) # put it into the entryfield
self.tryCommand() # Try the command
# ---------------- Show contents ----------------
def clearView(self,label): # Clear the panel and set the label
self.programCombo.pack_forget()
self.progScroll.pack_forget()
self.commandCombo.pack_forget()
self.commandScroll.pack_forget()
self.varsCombo.pack_forget()
self.varsScroll.pack_forget()
self.historyCombo.pack_forget()
self.comboLabelText.set(label)
def showCommands(self): # Show the available commands
self.clearView(commandComboLabel)
self.commandScroll.pack(side=tk.RIGHT, fill=tk.BOTH, expand=tk.YES) # Add the scrollbar
self.commandCombo.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES) # Add the commands
def showProgram(self): # Show the saved program
self.clearView(programComboLabel)
progText=self.programList.get() # Set the text - ie list of steps
self.programCombo.configure(state="normal")
self.programCombo.delete(1.0, "end")
self.programCombo.insert(1.0,progText)
self.programCombo.configure(state="disabled")
self.root.clipboard_clear() # Put the commands on the clipboard automagically
self.root.clipboard_append(progText) # when the page is opened - (probably not sensible)
self.progScroll.pack(side=tk.RIGHT, fill=tk.BOTH, expand=tk.YES) # Add the scroll bar
self.programCombo.pack(side=tk.LEFT, expand=tk.YES) # Show the program steps list
def showVars(self): # Show the list of available variables
varsDict=self.rfInterface.getVars() # Get the values
varsList=dictAsList(varsDict) # Convert to a displayable list
self.varsCombo.delete(0, tk.END) # Add the values to the control
for v in varsList:
self.varsCombo.insert(tk.END, v)
self.clearView(varsComboLabel) # Show the list
self.varsScroll.pack(side=tk.RIGHT, fill=tk.BOTH, expand=tk.YES) # Add the scrollbar
self.varsCombo.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES) # Show the list
def showHistory(self): # Show the command history
self.clearView(historyComboLabel)
self.historyCombo.pack(side=tk.BOTTOM, expand=tk.YES)
def showHelp(self): # Show the help text
self.clearView(helpText)
def fini(self): # Quit the program
print "KTHXBYE"
self.root.destroy()
return
# Convert a dictionary into a list of strings to display
def dictAsList(varsDict):
varsList=[]
for key in varsDict.keys():
varsList.append(str(key)+' '+str(varsDict[key]))
return varsList
# ------------------
# Testing for this code
# ------------------
class FakeRfInterface:
def getVars(self):
varsDict={1:1,2:2,3:3,4:4,5:5,6:6,7:7,8:8,9:9,
}
return varsDict
def runCommand(self,args):
return
def getAllPageControls(self):
return []
if __name__ == '__main__':
print "TESTING"
fakeRfInterface=FakeRfInterface()
app = DebugUI(fakeRfInterface)
#DebugUI.showVars()
# -------- End of file --------
| apache-2.0 | -552,915,443,083,632,600 | 52.136364 | 158 | 0.575252 | false |
btnpushnmunky/pygallerycreator | pygallerycreator/bootstrap_html.py | 1 | 3883 | html = """
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Gallery</title>
<meta name="description" content="The HTML5 Herald">
<meta name="author" content="SitePoint">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" integrity="sha384-TX8t27EcRE3e/ihU7zmQxVncDAy5uIKz4rEkgIXeMed4M0jlfIDPvg6uqKI2xXr2" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js" integrity="sha384-DfXdz2htPH0lsSSs5nCTpuj/zy4C+OGpamoFVy38MVBnE+IbbVYUew+OrCXaRkfj" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js" integrity="sha384-ho+j7jyWK8fNQe+A12Hb8AhRq26LrZ/JpcUGGOn+Y7RsweNrtN/tE3MoK7ZeZDyx" crossorigin="anonymous"></script>
<link rel="stylesheet" href="bootstrap-gallery/styles.css">
<script src="js/lazysizes.min.js" async=""></script>
</head>
<!-- Gallery -->
<!--
Gallery is linked to lightbox using data attributes.
To open lightbox, this is added to the gallery element: {data-toggle="modal" data-target="#exampleModal"}.
To open carousel on correct image, this is added to each image element: {data-target="#carouselExample" data-slide-to="0"}.
Replace '0' with corresponding slide number.
-->
<body>
<div class="row" id="gallery" data-toggle="modal" data-target="#exampleModal">
{% for image in images %}
<div class="col-3 col-lg-2">
<img class="w-100 lazyload" data-src="thumbs/{{image}}" alt="First slide" data-target="#carouselExample" data-slide-to="{{images.index(image)}}">
</div>
{% endfor %}
</div>
<!-- Modal -->
<!--
This part is straight out of Bootstrap docs. Just a carousel inside a modal.
-->
<div class="modal fade" id="exampleModal" tabindex="-1" role="dialog" aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
<div id="carouselExample" class="carousel slide" data-ride="carousel">
<!-- <ol class="carousel-indicators">
<li data-target="#carouselExample" data-slide-to="0" class="active"></li>
{% for image in images %}
<li data-target="#carouselExample" data-slide-to="{{images.index(image)}}"></li>
{% endfor %}
</ol> -->
<div class="carousel-inner">
<div class="carousel-item active">
<img class="d-block w-100" src="large_imgs/{{images[0]}}" alt="First slide">
<div class="carousel-caption d-none d-md-block">
<p>{{images[0]}}</p>
</div>
</div>
{% for image in images[1:] %}
<div class="carousel-item">
<img class="d-block w-100 lazyload" data-src="large_imgs/{{image}}" alt="Second slide">
<div class="carousel-caption d-none d-md-block">
<p>{{image}}</p>
</div>
</div>
{% endfor %}
</div>
<a class="carousel-control-prev" href="#carouselExample" role="button" data-slide="prev">
<span class="carousel-control-prev-icon" aria-hidden="true"></span>
<span class="sr-only">Previous</span>
</a>
<a class="carousel-control-next" href="#carouselExample" role="button" data-slide="next">
<span class="carousel-control-next-icon" aria-hidden="true"></span>
<span class="sr-only">Next</span>
</a>
</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
</body>
</html>
""" | mit | -571,087,539,570,992,900 | 40.763441 | 212 | 0.617306 | false |
bierschenk/ode | ode/integrators.py | 1 | 8839 | # -*- coding: utf-8 -*-
# integrators
__all__ = [
'Euler', 'euler',
'Verlet', 'verlet',
'BackwardEuler', 'backwardeuler',
]
import numpy as np
class Integrator:
'''Defines reusable attribute settors where checking is needed'''
def setdfun(self, dfun):
'''check output of dfun, wrap w/ np.array if necessary'''
xtest = dfun(self.time, self.x)
if not isinstance(xtest, np.ndarray):
def array_dfun(t, x):
x = dfun(t, x)
xarray = np.array(x)
return xarray
self.dfun = array_dfun
else:
self.dfun = dfun
def __iter__(self):
return self
class ConstantTimestep(Integrator):
'''The __init__ function of this class sets instance variables for
integrators with a constant timestep.'''
def __init__(self, dfun, xzero, timerange, timestep):
assert len(timerange) == 2
self.timestart, self.timeend = timerange
self.time = self.timestart
assert ((self.timeend - self.timestart) / timestep) > 0, (
"'timerange' and 'timestep' not consistant. "
"Check signs and order.")
if not isinstance(xzero, np.ndarray):
xzero = np.array(xzero)
self.x = xzero
self.stepcounter = 0
self.timestep = timestep
self.direction = np.sign(timestep)
self.steps = np.ceil((self.timeend - self.timestart) / timestep)
super().setdfun(dfun)
self.status = 'initialized'
class Euler(ConstantTimestep):
'''Euler method integration. This class implements a generator.
:param dfun:
derivative function of the system.
The differential system arranged as a series of first-order
equations: :math:`\dot{X} = \mathrm{dfun}(t, x)`.
Returns :math:`\dot{X}` should be a single dimensional array
or list.
:param xzero:
the initial condition of the system
:param timerange:
the start and end times as (starttime, endtime) tuple/list/array.
:param timestep:
the timestep
:returns: t, x for each iteration. t is a number. x is an array.
'''
def __next__(self):
if self.stepcounter < self.steps:
if self.status == 'initialized':
self.status = 'running'
return self.time, self.x
else:
self.stepcounter += 1
dx = self.dfun(self.time, self.x)
self.time, self.x = (
self.timestart + (self.stepcounter * self.timestep),
self.x + (self.timestep * dx))
return self.time, self.x
else:
self.status = 'finished'
raise StopIteration
def euler(dfun, xzero, timerange, timestep):
'''Euler method integration. This function wraps the Euler class.
:param All: All parameters are identical to the Euler class above.
:returns: t, x as arrays.
'''
t_column, X = zip(*list(Euler(dfun, xzero, timerange, timestep)))
t_column = np.array(t_column)
X_columns = np.vstack(X).T
return t_column, X_columns
class Verlet(ConstantTimestep):
'''Verlet method integration. This class implements a generator.
:param ddfun:
second derivative function of the system.
The differential system arranged as a series of second-order
equations: :math:`\ddot{X} = \mathrm{dfun}(t, x)`
:param xzero:
the initial condition of the system
:param vzero:
the initial condition of first derivative of the system
:param timerange:
the start and end times as (starttime, endtime)
:param timestep:
the timestep
:returns: t, x, v for each iteration.
'''
def __init__(self, ddfun, xzero, vzero, timerange, timestep):
if not isinstance(vzero, np.ndarray):
vzero = np.array(vzero)
assert len(vzero.shape) == 1, 'vzero must be one dimensional'
self.v = vzero
if not isinstance(xzero, np.ndarray):
xzero = np.array(xzero)
assert len(xzero.shape) == 1, 'xzero must be one dimensional'
self.xold = xzero
super().__init__(ddfun, xzero, timerange, timestep)
def __next__(self):
if self.stepcounter < self.steps:
if self.status == 'initialized':
ddx = self.dfun(self.time, self.x)
self.xnext = (
self.x
+ (self.v * self.timestep)
+ (ddx * (self.timestep**2) / 2))
self.status = 'running'
return self.time, self.x, self.v
else:
self.stepcounter += 1
ddx = self.dfun(self.time + self.timestep, self.xnext)
self.time, self.xold, self.x, self.xnext = [
self.timestart + (self.stepcounter * self.timestep),
self.x,
self.xnext,
(2 * self.xnext) - self.x + (ddx * (self.timestep**2))]
self.v = (self.xnext - self.xold) / (2 * self.timestep)
return self.time, self.x, self.v
else:
self.status = 'finished'
raise StopIteration
def verlet(ddfun, xzero, vzero, timerange, timestep):
'''Verlet method integration. This function wraps the Verlet class.
:param All: All parameters are identical to the Verlet class above.
:returns: t, x, v as arrays.
'''
t_column, X, V = zip(*list(Verlet(ddfun, xzero, vzero, timerange, timestep)))
t_column = np.array(t_column)
X_columns = np.vstack(X).T
V_columns = np.vstack(V).T
return t_column, X_columns, V_columns
class BackwardEuler(ConstantTimestep):
'''Backward Euler method integration. This class implements a generator.
:param dfun:
Derivative function of the system.
The differential system arranged as a series of first-order
equations: :math:`\dot{X} = \mathrm{dfun}(t, x)`
:param xzero:
The initial condition of the system.
:param vzero:
The initial condition of first derivative of the system.
:param timerange:
The start and end times as (starttime, endtime).
:param timestep:
The timestep.
:param convergencethreshold:
Each step requires an iterative solution of an implicit equation.
This is the threshold of convergence.
:param maxiterations:
Maximum iterations of the implicit equation before raising
an exception.
:returns: t, x for each iteration.
'''
def __init__(self, dfun, xzero, timerange, timestep,
convergencethreshold=0.0000000001, maxiterations=1000):
assert convergencethreshold > 0, 'convergencethreshold must be > 0'
self.convergencethreshold = convergencethreshold
assert maxiterations > 0, 'maxiterations must be > 0'
self.maxiterations = maxiterations
super().__init__(dfun, xzero, timerange, timestep)
def __next__(self):
if self.stepcounter < self.steps:
if self.status == 'initialized':
self.status = 'running'
return self.time, self.x
else:
self.stepcounter += 1
iterations = 0
error = 1 + self.convergencethreshold
xn1 = self.x
while (
(error >= self.convergencethreshold) and
(iterations < self.maxiterations)):
iterations += 1
xn2 = self.x + (self.dfun(self.time, xn1) * self.timestep)
error = sum(abs(xn1 - xn2))
xn1 = xn2
if error <= self.convergencethreshold:
self.time, self.x = (
self.timestart + (self.stepcounter * self.timestep),
xn1)
return self.time, self.x
else:
raise RuntimeError('maximum iterations exceeded')
else:
self.status = 'finished'
raise StopIteration
def backwardeuler(dfun, xzero, timerange, timestep):
'''Backward Euler method integration. This function wraps BackwardEuler.
:param All: All parameters are identical to the BackwardEuler
class above.
:returns: t, x as arrays.
'''
t_column, X = zip(*list(BackwardEuler(dfun, xzero, timerange, timestep)))
t_column = np.array(t_column)
X_columns = np.vstack(X).T
return t_column, X_columns
| mit | -9,197,420,163,542,815,000 | 36.138655 | 81 | 0.559679 | false |
imoseyon/leanKernel-d2usc-deprecated | external/webkit/Tools/Scripts/webkitpy/layout_tests/port/chromium.py | 15 | 22631 | #!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Chromium implementations of the Port interface."""
import errno
import logging
import re
import signal
import subprocess
import sys
import time
import webbrowser
from webkitpy.common.system import executive
from webkitpy.common.system.path import cygpath
from webkitpy.layout_tests.layout_package import test_expectations
from webkitpy.layout_tests.port import base
from webkitpy.layout_tests.port import http_server
from webkitpy.layout_tests.port import websocket_server
_log = logging.getLogger("webkitpy.layout_tests.port.chromium")
# FIXME: This function doesn't belong in this package.
class ChromiumPort(base.Port):
"""Abstract base class for Chromium implementations of the Port class."""
ALL_BASELINE_VARIANTS = [
'chromium-mac-snowleopard', 'chromium-mac-leopard',
'chromium-win-win7', 'chromium-win-vista', 'chromium-win-xp',
'chromium-linux-x86', 'chromium-linux-x86_64',
'chromium-gpu-mac-snowleopard', 'chromium-gpu-win-win7', 'chromium-gpu-linux-x86_64',
]
def __init__(self, **kwargs):
base.Port.__init__(self, **kwargs)
self._chromium_base_dir = None
def _check_file_exists(self, path_to_file, file_description,
override_step=None, logging=True):
"""Verify the file is present where expected or log an error.
Args:
file_name: The (human friendly) name or description of the file
you're looking for (e.g., "HTTP Server"). Used for error logging.
override_step: An optional string to be logged if the check fails.
logging: Whether or not log the error messages."""
if not self._filesystem.exists(path_to_file):
if logging:
_log.error('Unable to find %s' % file_description)
_log.error(' at %s' % path_to_file)
if override_step:
_log.error(' %s' % override_step)
_log.error('')
return False
return True
def baseline_path(self):
return self._webkit_baseline_path(self._name)
def check_build(self, needs_http):
result = True
dump_render_tree_binary_path = self._path_to_driver()
result = self._check_file_exists(dump_render_tree_binary_path,
'test driver') and result
if result and self.get_option('build'):
result = self._check_driver_build_up_to_date(
self.get_option('configuration'))
else:
_log.error('')
helper_path = self._path_to_helper()
if helper_path:
result = self._check_file_exists(helper_path,
'layout test helper') and result
if self.get_option('pixel_tests'):
result = self.check_image_diff(
'To override, invoke with --no-pixel-tests') and result
# It's okay if pretty patch isn't available, but we will at
# least log a message.
self._pretty_patch_available = self.check_pretty_patch()
return result
def check_sys_deps(self, needs_http):
cmd = [self._path_to_driver(), '--check-layout-test-sys-deps']
local_error = executive.ScriptError()
def error_handler(script_error):
local_error.exit_code = script_error.exit_code
output = self._executive.run_command(cmd, error_handler=error_handler)
if local_error.exit_code:
_log.error('System dependencies check failed.')
_log.error('To override, invoke with --nocheck-sys-deps')
_log.error('')
_log.error(output)
return False
return True
def check_image_diff(self, override_step=None, logging=True):
image_diff_path = self._path_to_image_diff()
return self._check_file_exists(image_diff_path, 'image diff exe',
override_step, logging)
def diff_image(self, expected_contents, actual_contents,
diff_filename=None):
# FIXME: need unit tests for this.
if not actual_contents and not expected_contents:
return False
if not actual_contents or not expected_contents:
return True
tempdir = self._filesystem.mkdtemp()
expected_filename = self._filesystem.join(str(tempdir), "expected.png")
self._filesystem.write_binary_file(expected_filename, expected_contents)
actual_filename = self._filesystem.join(str(tempdir), "actual.png")
self._filesystem.write_binary_file(actual_filename, actual_contents)
executable = self._path_to_image_diff()
if diff_filename:
cmd = [executable, '--diff', expected_filename,
actual_filename, diff_filename]
else:
cmd = [executable, expected_filename, actual_filename]
result = True
try:
exit_code = self._executive.run_command(cmd, return_exit_code=True)
if exit_code == 0:
# The images are the same.
result = False
elif exit_code != 1:
_log.error("image diff returned an exit code of "
+ str(exit_code))
# Returning False here causes the script to think that we
# successfully created the diff even though we didn't. If
# we return True, we think that the images match but the hashes
# don't match.
# FIXME: Figure out why image_diff returns other values.
result = False
except OSError, e:
if e.errno == errno.ENOENT or e.errno == errno.EACCES:
_compare_available = False
else:
raise e
finally:
self._filesystem.rmtree(str(tempdir))
return result
def driver_name(self):
return "DumpRenderTree"
def path_from_chromium_base(self, *comps):
"""Returns the full path to path made by joining the top of the
Chromium source tree and the list of path components in |*comps|."""
if not self._chromium_base_dir:
abspath = self._filesystem.abspath(__file__)
offset = abspath.find('third_party')
if offset == -1:
self._chromium_base_dir = self._filesystem.join(
abspath[0:abspath.find('Tools')],
'Source', 'WebKit', 'chromium')
else:
self._chromium_base_dir = abspath[0:offset]
return self._filesystem.join(self._chromium_base_dir, *comps)
def path_to_test_expectations_file(self):
return self.path_from_webkit_base('LayoutTests', 'platform',
'chromium', 'test_expectations.txt')
def default_results_directory(self):
try:
return self.path_from_chromium_base('webkit',
self.get_option('configuration'),
'layout-test-results')
except AssertionError:
return self._build_path(self.get_option('configuration'),
'layout-test-results')
def setup_test_run(self):
# Delete the disk cache if any to ensure a clean test run.
dump_render_tree_binary_path = self._path_to_driver()
cachedir = self._filesystem.dirname(dump_render_tree_binary_path)
cachedir = self._filesystem.join(cachedir, "cache")
if self._filesystem.exists(cachedir):
self._filesystem.rmtree(cachedir)
def create_driver(self, worker_number):
"""Starts a new Driver and returns a handle to it."""
return ChromiumDriver(self, worker_number)
def start_helper(self):
helper_path = self._path_to_helper()
if helper_path:
_log.debug("Starting layout helper %s" % helper_path)
# Note: Not thread safe: http://bugs.python.org/issue2320
self._helper = subprocess.Popen([helper_path],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None)
is_ready = self._helper.stdout.readline()
if not is_ready.startswith('ready'):
_log.error("layout_test_helper failed to be ready")
def stop_helper(self):
if self._helper:
_log.debug("Stopping layout test helper")
self._helper.stdin.write("x\n")
self._helper.stdin.close()
# wait() is not threadsafe and can throw OSError due to:
# http://bugs.python.org/issue1731717
self._helper.wait()
def all_baseline_variants(self):
return self.ALL_BASELINE_VARIANTS
def test_expectations(self):
"""Returns the test expectations for this port.
Basically this string should contain the equivalent of a
test_expectations file. See test_expectations.py for more details."""
expectations_path = self.path_to_test_expectations_file()
return self._filesystem.read_text_file(expectations_path)
def test_expectations_overrides(self):
try:
overrides_path = self.path_from_chromium_base('webkit', 'tools',
'layout_tests', 'test_expectations.txt')
except AssertionError:
return None
if not self._filesystem.exists(overrides_path):
return None
return self._filesystem.read_text_file(overrides_path)
def skipped_layout_tests(self, extra_test_files=None):
expectations_str = self.test_expectations()
overrides_str = self.test_expectations_overrides()
is_debug_mode = False
all_test_files = self.tests([])
if extra_test_files:
all_test_files.update(extra_test_files)
expectations = test_expectations.TestExpectations(
self, all_test_files, expectations_str, self.test_configuration(),
is_lint_mode=False, overrides=overrides_str)
tests_dir = self.layout_tests_dir()
return [self.relative_test_filename(test)
for test in expectations.get_tests_with_result_type(test_expectations.SKIP)]
def test_repository_paths(self):
# Note: for JSON file's backward-compatibility we use 'chrome' rather
# than 'chromium' here.
repos = super(ChromiumPort, self).test_repository_paths()
repos.append(('chrome', self.path_from_chromium_base()))
return repos
#
# PROTECTED METHODS
#
# These routines should only be called by other methods in this file
# or any subclasses.
#
def _check_driver_build_up_to_date(self, configuration):
if configuration in ('Debug', 'Release'):
try:
debug_path = self._path_to_driver('Debug')
release_path = self._path_to_driver('Release')
debug_mtime = self._filesystem.mtime(debug_path)
release_mtime = self._filesystem.mtime(release_path)
if (debug_mtime > release_mtime and configuration == 'Release' or
release_mtime > debug_mtime and configuration == 'Debug'):
_log.warning('You are not running the most '
'recent DumpRenderTree binary. You need to '
'pass --debug or not to select between '
'Debug and Release.')
_log.warning('')
# This will fail if we don't have both a debug and release binary.
# That's fine because, in this case, we must already be running the
# most up-to-date one.
except OSError:
pass
return True
def _chromium_baseline_path(self, platform):
if platform is None:
platform = self.name()
return self.path_from_webkit_base('LayoutTests', 'platform', platform)
def _convert_path(self, path):
"""Handles filename conversion for subprocess command line args."""
# See note above in diff_image() for why we need this.
if sys.platform == 'cygwin':
return cygpath(path)
return path
def _path_to_image_diff(self):
binary_name = 'ImageDiff'
return self._build_path(self.get_option('configuration'), binary_name)
class ChromiumDriver(base.Driver):
"""Abstract interface for DRT."""
def __init__(self, port, worker_number):
self._port = port
self._worker_number = worker_number
self._image_path = None
self.KILL_TIMEOUT = 3.0
if self._port.get_option('pixel_tests'):
self._image_path = self._port._filesystem.join(self._port.results_directory(),
'png_result%s.png' % self._worker_number)
def cmd_line(self):
cmd = self._command_wrapper(self._port.get_option('wrapper'))
cmd.append(self._port._path_to_driver())
if self._port.get_option('pixel_tests'):
# See note above in diff_image() for why we need _convert_path().
cmd.append("--pixel-tests=" +
self._port._convert_path(self._image_path))
cmd.append('--test-shell')
if self._port.get_option('startup_dialog'):
cmd.append('--testshell-startup-dialog')
if self._port.get_option('gp_fault_error_box'):
cmd.append('--gp-fault-error-box')
if self._port.get_option('js_flags') is not None:
cmd.append('--js-flags="' + self._port.get_option('js_flags') + '"')
if self._port.get_option('stress_opt'):
cmd.append('--stress-opt')
if self._port.get_option('stress_deopt'):
cmd.append('--stress-deopt')
if self._port.get_option('accelerated_compositing'):
cmd.append('--enable-accelerated-compositing')
if self._port.get_option('accelerated_2d_canvas'):
cmd.append('--enable-accelerated-2d-canvas')
if self._port.get_option('enable_hardware_gpu'):
cmd.append('--enable-hardware-gpu')
cmd.extend(self._port.get_option('additional_drt_flag', []))
return cmd
def start(self):
# FIXME: Should be an error to call this method twice.
cmd = self.cmd_line()
# We need to pass close_fds=True to work around Python bug #2320
# (otherwise we can hang when we kill DumpRenderTree when we are running
# multiple threads). See http://bugs.python.org/issue2320 .
# Note that close_fds isn't supported on Windows, but this bug only
# shows up on Mac and Linux.
close_flag = sys.platform not in ('win32', 'cygwin')
self._proc = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=close_flag)
def poll(self):
# poll() is not threadsafe and can throw OSError due to:
# http://bugs.python.org/issue1731717
return self._proc.poll()
def _write_command_and_read_line(self, input=None):
"""Returns a tuple: (line, did_crash)"""
try:
if input:
if isinstance(input, unicode):
# DRT expects utf-8
input = input.encode("utf-8")
self._proc.stdin.write(input)
# DumpRenderTree text output is always UTF-8. However some tests
# (e.g. webarchive) may spit out binary data instead of text so we
# don't bother to decode the output.
line = self._proc.stdout.readline()
# We could assert() here that line correctly decodes as UTF-8.
return (line, False)
except IOError, e:
_log.error("IOError communicating w/ DRT: " + str(e))
return (None, True)
def _test_shell_command(self, uri, timeoutms, checksum):
cmd = uri
if timeoutms:
cmd += ' ' + str(timeoutms)
if checksum:
cmd += ' ' + checksum
cmd += "\n"
return cmd
def _output_image(self):
"""Returns the image output which driver generated."""
png_path = self._image_path
if png_path and self._port._filesystem.exists(png_path):
return self._port._filesystem.read_binary_file(png_path)
else:
return None
def _output_image_with_retry(self):
# Retry a few more times because open() sometimes fails on Windows,
# raising "IOError: [Errno 13] Permission denied:"
retry_num = 50
timeout_seconds = 5.0
for i in range(retry_num):
try:
return self._output_image()
except IOError, e:
if e.errno == errno.EACCES:
time.sleep(timeout_seconds / retry_num)
else:
raise e
return self._output_image()
def _clear_output_image(self):
png_path = self._image_path
if png_path and self._port._filesystem.exists(png_path):
self._port._filesystem.remove(png_path)
def run_test(self, driver_input):
output = []
error = []
crash = False
timeout = False
actual_uri = None
actual_checksum = None
self._clear_output_image()
start_time = time.time()
uri = self._port.filename_to_uri(driver_input.filename)
cmd = self._test_shell_command(uri, driver_input.timeout,
driver_input.image_hash)
(line, crash) = self._write_command_and_read_line(input=cmd)
while not crash and line.rstrip() != "#EOF":
# Make sure we haven't crashed.
if line == '' and self.poll() is not None:
# This is hex code 0xc000001d, which is used for abrupt
# termination. This happens if we hit ctrl+c from the prompt
# and we happen to be waiting on DRT.
# sdoyon: Not sure for which OS and in what circumstances the
# above code is valid. What works for me under Linux to detect
# ctrl+c is for the subprocess returncode to be negative
# SIGINT. And that agrees with the subprocess documentation.
if (-1073741510 == self._proc.returncode or
- signal.SIGINT == self._proc.returncode):
raise KeyboardInterrupt
crash = True
break
# Don't include #URL lines in our output
if line.startswith("#URL:"):
actual_uri = line.rstrip()[5:]
if uri != actual_uri:
# GURL capitalizes the drive letter of a file URL.
if (not re.search("^file:///[a-z]:", uri) or
uri.lower() != actual_uri.lower()):
_log.fatal("Test got out of sync:\n|%s|\n|%s|" %
(uri, actual_uri))
raise AssertionError("test out of sync")
elif line.startswith("#MD5:"):
actual_checksum = line.rstrip()[5:]
elif line.startswith("#TEST_TIMED_OUT"):
timeout = True
# Test timed out, but we still need to read until #EOF.
elif actual_uri:
output.append(line)
else:
error.append(line)
(line, crash) = self._write_command_and_read_line(input=None)
# FIXME: Add support for audio when we're ready.
run_time = time.time() - start_time
output_image = self._output_image_with_retry()
text = ''.join(output)
if not text:
text = None
return base.DriverOutput(text, output_image, actual_checksum, audio=None,
crash=crash, test_time=run_time, timeout=timeout, error=''.join(error))
def stop(self):
if self._proc:
self._proc.stdin.close()
self._proc.stdout.close()
if self._proc.stderr:
self._proc.stderr.close()
# Closing stdin/stdout/stderr hangs sometimes on OS X,
# (see __init__(), above), and anyway we don't want to hang
# the harness if DRT is buggy, so we wait a couple
# seconds to give DRT a chance to clean up, but then
# force-kill the process if necessary.
timeout = time.time() + self.KILL_TIMEOUT
while self._proc.poll() is None and time.time() < timeout:
time.sleep(0.1)
if self._proc.poll() is None:
_log.warning('stopping test driver timed out, '
'killing it')
self._port._executive.kill_process(self._proc.pid)
# FIXME: This is sometime none. What is wrong? assert self._proc.poll() is not None
if self._proc.poll() is not None:
self._proc.wait()
self._proc = None
| gpl-2.0 | -4,713,755,449,226,358,000 | 40.754613 | 95 | 0.588043 | false |
hoover/snoop | snoop/site/settings/common.py | 1 | 1968 | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'snoop',
]
MIDDLEWARE_CLASSES = [
]
ROOT_URLCONF = 'snoop.site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
]
WSGI_APPLICATION = 'snoop.site.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = True
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'logfile': {
'format': ('%(asctime)s %(process)d '
'%(levelname)s %(name)s %(message)s'),
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'loggers': {
'django.request': {
'level': 'WARNING',
'propagate': False,
'handlers': ['stderr'],
},
'snoop': {
'level': 'INFO',
'propagate': False,
'handlers': ['stderr'],
},
'': {
'level': 'WARNING',
'propagate': True,
'handlers': ['stderr'],
},
},
'handlers': {
'stderr': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'logfile',
},
},
}
SNOOP_CACHE = True
SNOOP_ELASTICSEARCH_URL = 'http://localhost:9200'
SNOOP_ELASTICSEARCH_INDEX = 'hoover'
SNOOP_ELASTICSEARCH_TIMEOUT = 10
SNOOP_ANALYZE_LANG = False
SNOOP_TIKA_MAX_FILE_SIZE = 50 * 1024 * 1024 # 50M
SNOOP_TIKA_FILE_TYPES = ['doc', 'pdf', 'xls', 'ppt']
SNOOP_TIKA_SERVER_ENDPOINT = None
SNOOP_MSGCONVERT_SCRIPT = None
SNOOP_MSG_CACHE = None
SNOOP_FLAG_MSGCONVERT_FAIL = False
SNOOP_SEVENZIP_BINARY = None
SNOOP_ARCHIVE_CACHE_ROOT = None
SNOOP_PST_CACHE_ROOT = None
SNOOP_READPST_BINARY = None
SNOOP_GPG_HOME = None
SNOOP_GPG_BINARY = None
SNOOP_LOG_DIR = None
SNOOP_FEED_PAGE_SIZE = 100
| mit | -8,808,912,177,232,153,000 | 19.93617 | 70 | 0.550813 | false |
yaricom/brainhash | src/experiment_cA8_dt_th_al_ah_bl_bh.py | 1 | 2023 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The experiment with 10 Hz/5Hz, wisp, attention, 70, cA 8, delta, theta, alpha low, alpha high, beta low, beta high, batch size = 5 and
balanced data set
@author: yaric
"""
import experiment as ex
import config
from time import time
n_hidden = 8
experiment_name = 'cA_%d_dt-th-a_l-a_h-b_l-b_h' % (n_hidden) # will be used as parent dir for analyzer results
# The sample records identifiers
signal_ids = ['IO_10_2', 'IO_TXT', 'IO_SKY', 'KS_10_2', 'RO_10_2']
noise_ids = ['noise']
# Setup analyzer configuration
analyzer_config = ex.defaultAnalyzerConfig()
analyzer_config['batch_size'] = 5
analyzer_config['learning_rate'] = 0.1
analyzer_config['n_hidden'] = n_hidden
analyzer_config['training_epochs'] = 50000
analyzer_config['encoder'] = 'cA'
analyzer_config['bands'] = 'delta,theta,alpha_l,alpha_h,beta_l,beta_h'
start = time()
#
# Run analyzer
#
print("\nStart analysis with parameters:\n%s\n" % analyzer_config)
print("Start analysis for signal records: %s" % signal_ids)
ex.runEEGAnalyzerWithIDs(ids_list=signal_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
print("Start analysis for noise records: %s" % noise_ids)
ex.runEEGAnalyzerWithIDs(ids_list=noise_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
#
# Run classifiers
#
signal_dir = "%s/%s" % (config.analyzer_out_dir, experiment_name)
noise_dir = "%s/%s/%s" % (config.analyzer_out_dir, experiment_name, noise_ids[0])
out_suffix = experiment_name
print("Run classifiers over analyzed records. \nSignal dir: %s\nNoise dir: %s"
% (signal_dir, noise_dir))
ex.runClassifier(signal_dir=signal_dir,
signal_records=signal_ids,
noise_dir=noise_dir,
out_suffix=out_suffix)
print("\n\nExperiment %s took %.2f seconds.\n"
% (experiment_name, time() - start))
| gpl-3.0 | 2,742,979,014,972,936,000 | 31.629032 | 135 | 0.63915 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.